Back to home page

OSCL-LXR

 
 

    


0001 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
0002 /*
0003     Written 1997-2001 by Donald Becker.
0004 
0005     This software may be used and distributed according to the terms of
0006     the GNU General Public License (GPL), incorporated herein by reference.
0007     Drivers based on or derived from this code fall under the GPL and must
0008     retain the authorship, copyright and license notice.  This file is not
0009     a complete program and may only be used when the entire operating
0010     system is licensed under the GPL.
0011 
0012     This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
0013     It also supports the Symbios Logic version of the same chip core.
0014 
0015     The author may be reached as becker@scyld.com, or C/O
0016     Scyld Computing Corporation
0017     410 Severn Ave., Suite 210
0018     Annapolis MD 21403
0019 
0020     Support and updates available at
0021     http://www.scyld.com/network/yellowfin.html
0022     [link no longer provides useful info -jgarzik]
0023 
0024 */
0025 
0026 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0027 
0028 #define DRV_NAME    "yellowfin"
0029 #define DRV_VERSION "2.1"
0030 #define DRV_RELDATE "Sep 11, 2006"
0031 
0032 /* The user-configurable values.
0033    These may be modified when a driver module is loaded.*/
0034 
0035 static int debug = 1;           /* 1 normal messages, 0 quiet .. 7 verbose. */
0036 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
0037 static int max_interrupt_work = 20;
0038 static int mtu;
0039 #ifdef YF_PROTOTYPE         /* Support for prototype hardware errata. */
0040 /* System-wide count of bogus-rx frames. */
0041 static int bogus_rx;
0042 static int dma_ctrl = 0x004A0263;           /* Constrained by errata */
0043 static int fifo_cfg = 0x0020;               /* Bypass external Tx FIFO. */
0044 #elif defined(YF_NEW)                   /* A future perfect board :->.  */
0045 static int dma_ctrl = 0x00CAC277;           /* Override when loading module! */
0046 static int fifo_cfg = 0x0028;
0047 #else
0048 static const int dma_ctrl = 0x004A0263;             /* Constrained by errata */
0049 static const int fifo_cfg = 0x0020;             /* Bypass external Tx FIFO. */
0050 #endif
0051 
0052 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
0053    Setting to > 1514 effectively disables this feature. */
0054 static int rx_copybreak;
0055 
0056 /* Used to pass the media type, etc.
0057    No media types are currently defined.  These exist for driver
0058    interoperability.
0059 */
0060 #define MAX_UNITS 8             /* More are supported, limit only on options */
0061 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
0062 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
0063 
0064 /* Do ugly workaround for GX server chipset errata. */
0065 static int gx_fix;
0066 
0067 /* Operational parameters that are set at compile time. */
0068 
0069 /* Keep the ring sizes a power of two for efficiency.
0070    Making the Tx ring too long decreases the effectiveness of channel
0071    bonding and packet priority.
0072    There are no ill effects from too-large receive rings. */
0073 #define TX_RING_SIZE    16
0074 #define TX_QUEUE_SIZE   12      /* Must be > 4 && <= TX_RING_SIZE */
0075 #define RX_RING_SIZE    64
0076 #define STATUS_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct tx_status_words)
0077 #define TX_TOTAL_SIZE       2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
0078 #define RX_TOTAL_SIZE       RX_RING_SIZE*sizeof(struct yellowfin_desc)
0079 
0080 /* Operational parameters that usually are not changed. */
0081 /* Time in jiffies before concluding the transmitter is hung. */
0082 #define TX_TIMEOUT  (2*HZ)
0083 #define PKT_BUF_SZ      1536            /* Size of each temporary Rx buffer.*/
0084 
0085 #define yellowfin_debug debug
0086 
0087 #include <linux/module.h>
0088 #include <linux/kernel.h>
0089 #include <linux/string.h>
0090 #include <linux/timer.h>
0091 #include <linux/errno.h>
0092 #include <linux/ioport.h>
0093 #include <linux/interrupt.h>
0094 #include <linux/pci.h>
0095 #include <linux/init.h>
0096 #include <linux/mii.h>
0097 #include <linux/netdevice.h>
0098 #include <linux/etherdevice.h>
0099 #include <linux/skbuff.h>
0100 #include <linux/ethtool.h>
0101 #include <linux/crc32.h>
0102 #include <linux/bitops.h>
0103 #include <linux/uaccess.h>
0104 #include <asm/processor.h>      /* Processor type for cache alignment. */
0105 #include <asm/unaligned.h>
0106 #include <asm/io.h>
0107 
0108 /* These identify the driver base version and may not be removed. */
0109 static const char version[] =
0110   KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
0111   "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
0112 
0113 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
0114 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
0115 MODULE_LICENSE("GPL");
0116 
0117 module_param(max_interrupt_work, int, 0);
0118 module_param(mtu, int, 0);
0119 module_param(debug, int, 0);
0120 module_param(rx_copybreak, int, 0);
0121 module_param_array(options, int, NULL, 0);
0122 module_param_array(full_duplex, int, NULL, 0);
0123 module_param(gx_fix, int, 0);
0124 MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
0125 MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
0126 MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
0127 MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
0128 MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
0129 MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
0130 MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
0131 
0132 /*
0133                 Theory of Operation
0134 
0135 I. Board Compatibility
0136 
0137 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
0138 Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
0139 Symbios 53C885E dual function chip.
0140 
0141 II. Board-specific settings
0142 
0143 PCI bus devices are configured by the system at boot time, so no jumpers
0144 need to be set on the board.  The system BIOS preferably should assign the
0145 PCI INTA signal to an otherwise unused system IRQ line.
0146 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
0147 interrupt lines.
0148 
0149 III. Driver operation
0150 
0151 IIIa. Ring buffers
0152 
0153 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
0154 This is a descriptor list scheme similar to that used by the EEPro100 and
0155 Tulip.  This driver uses two statically allocated fixed-size descriptor lists
0156 formed into rings by a branch from the final descriptor to the beginning of
0157 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
0158 
0159 The driver allocates full frame size skbuffs for the Rx ring buffers at
0160 open() time and passes the skb->data field to the Yellowfin as receive data
0161 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
0162 a fresh skbuff is allocated and the frame is copied to the new skbuff.
0163 When the incoming frame is larger, the skbuff is passed directly up the
0164 protocol stack and replaced by a newly allocated skbuff.
0165 
0166 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
0167 using a full-sized skbuff for small frames vs. the copying costs of larger
0168 frames.  For small frames the copying cost is negligible (esp. considering
0169 that we are pre-loading the cache with immediately useful header
0170 information).  For large frames the copying cost is non-trivial, and the
0171 larger copy might flush the cache of useful data.
0172 
0173 IIIC. Synchronization
0174 
0175 The driver runs as two independent, single-threaded flows of control.  One
0176 is the send-packet routine, which enforces single-threaded use by the
0177 dev->tbusy flag.  The other thread is the interrupt handler, which is single
0178 threaded by the hardware and other software.
0179 
0180 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
0181 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
0182 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
0183 the 'yp->tx_full' flag.
0184 
0185 The interrupt handler has exclusive control over the Rx ring and records stats
0186 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
0187 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
0188 clears both the tx_full and tbusy flags.
0189 
0190 IV. Notes
0191 
0192 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
0193 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
0194 and an AlphaStation to verify the Alpha port!
0195 
0196 IVb. References
0197 
0198 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
0199 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
0200    Data Manual v3.0
0201 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
0202 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
0203 
0204 IVc. Errata
0205 
0206 See Packet Engines confidential appendix (prototype chips only).
0207 */
0208 
0209 
0210 
0211 enum capability_flags {
0212     HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
0213     HasMACAddrBug=32, /* Only on early revs.  */
0214     DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
0215 };
0216 
0217 /* The PCI I/O space extent. */
0218 enum {
0219     YELLOWFIN_SIZE  = 0x100,
0220 };
0221 
0222 struct pci_id_info {
0223         const char *name;
0224         struct match_info {
0225                 int     pci, pci_mask, subsystem, subsystem_mask;
0226                 int revision, revision_mask;                            /* Only 8 bits. */
0227         } id;
0228         int drv_flags;                          /* Driver use, intended as capability flags. */
0229 };
0230 
0231 static const struct pci_id_info pci_id_tbl[] = {
0232     {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
0233      FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
0234     {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
0235       HasMII | DontUseEeprom },
0236     { }
0237 };
0238 
0239 static const struct pci_device_id yellowfin_pci_tbl[] = {
0240     { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
0241     { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
0242     { }
0243 };
0244 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
0245 
0246 
0247 /* Offsets to the Yellowfin registers.  Various sizes and alignments. */
0248 enum yellowfin_offsets {
0249     TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
0250     TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
0251     RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
0252     RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
0253     EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
0254     ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
0255     Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
0256     MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
0257     MII_Status=0xAE,
0258     RxDepth=0xB8, FlowCtrl=0xBC,
0259     AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
0260     EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
0261     EEFeature=0xF5,
0262 };
0263 
0264 /* The Yellowfin Rx and Tx buffer descriptors.
0265    Elements are written as 32 bit for endian portability. */
0266 struct yellowfin_desc {
0267     __le32 dbdma_cmd;
0268     __le32 addr;
0269     __le32 branch_addr;
0270     __le32 result_status;
0271 };
0272 
0273 struct tx_status_words {
0274 #ifdef __BIG_ENDIAN
0275     u16 tx_errs;
0276     u16 tx_cnt;
0277     u16 paused;
0278     u16 total_tx_cnt;
0279 #else  /* Little endian chips. */
0280     u16 tx_cnt;
0281     u16 tx_errs;
0282     u16 total_tx_cnt;
0283     u16 paused;
0284 #endif /* __BIG_ENDIAN */
0285 };
0286 
0287 /* Bits in yellowfin_desc.cmd */
0288 enum desc_cmd_bits {
0289     CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
0290     CMD_NOP=0x60000000, CMD_STOP=0x70000000,
0291     BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
0292     BRANCH_IFTRUE=0x040000,
0293 };
0294 
0295 /* Bits in yellowfin_desc.status */
0296 enum desc_status_bits { RX_EOP=0x0040, };
0297 
0298 /* Bits in the interrupt status/mask registers. */
0299 enum intr_status_bits {
0300     IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
0301     IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
0302     IntrEarlyRx=0x100, IntrWakeup=0x200, };
0303 
0304 #define PRIV_ALIGN  31  /* Required alignment mask */
0305 #define MII_CNT     4
0306 struct yellowfin_private {
0307     /* Descriptor rings first for alignment.
0308        Tx requires a second descriptor for status. */
0309     struct yellowfin_desc *rx_ring;
0310     struct yellowfin_desc *tx_ring;
0311     struct sk_buff* rx_skbuff[RX_RING_SIZE];
0312     struct sk_buff* tx_skbuff[TX_RING_SIZE];
0313     dma_addr_t rx_ring_dma;
0314     dma_addr_t tx_ring_dma;
0315 
0316     struct tx_status_words *tx_status;
0317     dma_addr_t tx_status_dma;
0318 
0319     struct timer_list timer;    /* Media selection timer. */
0320     /* Frequently used and paired value: keep adjacent for cache effect. */
0321     int chip_id, drv_flags;
0322     struct pci_dev *pci_dev;
0323     unsigned int cur_rx, dirty_rx;      /* Producer/consumer ring indices */
0324     unsigned int rx_buf_sz;             /* Based on MTU+slack. */
0325     struct tx_status_words *tx_tail_desc;
0326     unsigned int cur_tx, dirty_tx;
0327     int tx_threshold;
0328     unsigned int tx_full:1;             /* The Tx queue is full. */
0329     unsigned int full_duplex:1;         /* Full-duplex operation requested. */
0330     unsigned int duplex_lock:1;
0331     unsigned int medialock:1;           /* Do not sense media. */
0332     unsigned int default_port:4;        /* Last dev->if_port value. */
0333     /* MII transceiver section. */
0334     int mii_cnt;                        /* MII device addresses. */
0335     u16 advertising;                    /* NWay media advertisement */
0336     unsigned char phys[MII_CNT];        /* MII device addresses, only first one used */
0337     spinlock_t lock;
0338     void __iomem *base;
0339 };
0340 
0341 static int read_eeprom(void __iomem *ioaddr, int location);
0342 static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
0343 static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
0344 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0345 static int yellowfin_open(struct net_device *dev);
0346 static void yellowfin_timer(struct timer_list *t);
0347 static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
0348 static int yellowfin_init_ring(struct net_device *dev);
0349 static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
0350                     struct net_device *dev);
0351 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
0352 static int yellowfin_rx(struct net_device *dev);
0353 static void yellowfin_error(struct net_device *dev, int intr_status);
0354 static int yellowfin_close(struct net_device *dev);
0355 static void set_rx_mode(struct net_device *dev);
0356 static const struct ethtool_ops ethtool_ops;
0357 
0358 static const struct net_device_ops netdev_ops = {
0359     .ndo_open       = yellowfin_open,
0360     .ndo_stop       = yellowfin_close,
0361     .ndo_start_xmit     = yellowfin_start_xmit,
0362     .ndo_set_rx_mode    = set_rx_mode,
0363     .ndo_validate_addr  = eth_validate_addr,
0364     .ndo_set_mac_address    = eth_mac_addr,
0365     .ndo_eth_ioctl      = netdev_ioctl,
0366     .ndo_tx_timeout     = yellowfin_tx_timeout,
0367 };
0368 
0369 static int yellowfin_init_one(struct pci_dev *pdev,
0370                   const struct pci_device_id *ent)
0371 {
0372     struct net_device *dev;
0373     struct yellowfin_private *np;
0374     int irq;
0375     int chip_idx = ent->driver_data;
0376     static int find_cnt;
0377     void __iomem *ioaddr;
0378     int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
0379     int drv_flags = pci_id_tbl[chip_idx].drv_flags;
0380         void *ring_space;
0381         dma_addr_t ring_dma;
0382 #ifdef USE_IO_OPS
0383     int bar = 0;
0384 #else
0385     int bar = 1;
0386 #endif
0387     u8 addr[ETH_ALEN];
0388 
0389 /* when built into the kernel, we only print version if device is found */
0390 #ifndef MODULE
0391     static int printed_version;
0392     if (!printed_version++)
0393         printk(version);
0394 #endif
0395 
0396     i = pci_enable_device(pdev);
0397     if (i) return i;
0398 
0399     dev = alloc_etherdev(sizeof(*np));
0400     if (!dev)
0401         return -ENOMEM;
0402 
0403     SET_NETDEV_DEV(dev, &pdev->dev);
0404 
0405     np = netdev_priv(dev);
0406 
0407     if (pci_request_regions(pdev, DRV_NAME))
0408         goto err_out_free_netdev;
0409 
0410     pci_set_master (pdev);
0411 
0412     ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
0413     if (!ioaddr)
0414         goto err_out_free_res;
0415 
0416     irq = pdev->irq;
0417 
0418     if (drv_flags & DontUseEeprom)
0419         for (i = 0; i < 6; i++)
0420             addr[i] = ioread8(ioaddr + StnAddr + i);
0421     else {
0422         int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
0423         for (i = 0; i < 6; i++)
0424             addr[i] = read_eeprom(ioaddr, ee_offset + i);
0425     }
0426     eth_hw_addr_set(dev, addr);
0427 
0428     /* Reset the chip. */
0429     iowrite32(0x80000000, ioaddr + DMACtrl);
0430 
0431     pci_set_drvdata(pdev, dev);
0432     spin_lock_init(&np->lock);
0433 
0434     np->pci_dev = pdev;
0435     np->chip_id = chip_idx;
0436     np->drv_flags = drv_flags;
0437     np->base = ioaddr;
0438 
0439     ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
0440                     GFP_KERNEL);
0441     if (!ring_space)
0442         goto err_out_cleardev;
0443     np->tx_ring = ring_space;
0444     np->tx_ring_dma = ring_dma;
0445 
0446     ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
0447                     GFP_KERNEL);
0448     if (!ring_space)
0449         goto err_out_unmap_tx;
0450     np->rx_ring = ring_space;
0451     np->rx_ring_dma = ring_dma;
0452 
0453     ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
0454                     &ring_dma, GFP_KERNEL);
0455     if (!ring_space)
0456         goto err_out_unmap_rx;
0457     np->tx_status = ring_space;
0458     np->tx_status_dma = ring_dma;
0459 
0460     if (dev->mem_start)
0461         option = dev->mem_start;
0462 
0463     /* The lower four bits are the media type. */
0464     if (option > 0) {
0465         if (option & 0x200)
0466             np->full_duplex = 1;
0467         np->default_port = option & 15;
0468         if (np->default_port)
0469             np->medialock = 1;
0470     }
0471     if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
0472         np->full_duplex = 1;
0473 
0474     if (np->full_duplex)
0475         np->duplex_lock = 1;
0476 
0477     /* The Yellowfin-specific entries in the device structure. */
0478     dev->netdev_ops = &netdev_ops;
0479     dev->ethtool_ops = &ethtool_ops;
0480     dev->watchdog_timeo = TX_TIMEOUT;
0481 
0482     if (mtu)
0483         dev->mtu = mtu;
0484 
0485     i = register_netdev(dev);
0486     if (i)
0487         goto err_out_unmap_status;
0488 
0489     netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
0490             pci_id_tbl[chip_idx].name,
0491             ioread32(ioaddr + ChipRev), ioaddr,
0492             dev->dev_addr, irq);
0493 
0494     if (np->drv_flags & HasMII) {
0495         int phy, phy_idx = 0;
0496         for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
0497             int mii_status = mdio_read(ioaddr, phy, 1);
0498             if (mii_status != 0xffff  &&  mii_status != 0x0000) {
0499                 np->phys[phy_idx++] = phy;
0500                 np->advertising = mdio_read(ioaddr, phy, 4);
0501                 netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
0502                         phy, mii_status, np->advertising);
0503             }
0504         }
0505         np->mii_cnt = phy_idx;
0506     }
0507 
0508     find_cnt++;
0509 
0510     return 0;
0511 
0512 err_out_unmap_status:
0513     dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
0514               np->tx_status_dma);
0515 err_out_unmap_rx:
0516     dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
0517               np->rx_ring_dma);
0518 err_out_unmap_tx:
0519     dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
0520               np->tx_ring_dma);
0521 err_out_cleardev:
0522     pci_iounmap(pdev, ioaddr);
0523 err_out_free_res:
0524     pci_release_regions(pdev);
0525 err_out_free_netdev:
0526     free_netdev (dev);
0527     return -ENODEV;
0528 }
0529 
0530 static int read_eeprom(void __iomem *ioaddr, int location)
0531 {
0532     int bogus_cnt = 10000;      /* Typical 33Mhz: 1050 ticks */
0533 
0534     iowrite8(location, ioaddr + EEAddr);
0535     iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
0536     while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
0537         ;
0538     return ioread8(ioaddr + EERead);
0539 }
0540 
0541 /* MII Managemen Data I/O accesses.
0542    These routines assume the MDIO controller is idle, and do not exit until
0543    the command is finished. */
0544 
0545 static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
0546 {
0547     int i;
0548 
0549     iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
0550     iowrite16(1, ioaddr + MII_Cmd);
0551     for (i = 10000; i >= 0; i--)
0552         if ((ioread16(ioaddr + MII_Status) & 1) == 0)
0553             break;
0554     return ioread16(ioaddr + MII_Rd_Data);
0555 }
0556 
0557 static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
0558 {
0559     int i;
0560 
0561     iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
0562     iowrite16(value, ioaddr + MII_Wr_Data);
0563 
0564     /* Wait for the command to finish. */
0565     for (i = 10000; i >= 0; i--)
0566         if ((ioread16(ioaddr + MII_Status) & 1) == 0)
0567             break;
0568 }
0569 
0570 
0571 static int yellowfin_open(struct net_device *dev)
0572 {
0573     struct yellowfin_private *yp = netdev_priv(dev);
0574     const int irq = yp->pci_dev->irq;
0575     void __iomem *ioaddr = yp->base;
0576     int i, rc;
0577 
0578     /* Reset the chip. */
0579     iowrite32(0x80000000, ioaddr + DMACtrl);
0580 
0581     rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
0582     if (rc)
0583         return rc;
0584 
0585     rc = yellowfin_init_ring(dev);
0586     if (rc < 0)
0587         goto err_free_irq;
0588 
0589     iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
0590     iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
0591 
0592     for (i = 0; i < 6; i++)
0593         iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
0594 
0595     /* Set up various condition 'select' registers.
0596        There are no options here. */
0597     iowrite32(0x00800080, ioaddr + TxIntrSel);  /* Interrupt on Tx abort */
0598     iowrite32(0x00800080, ioaddr + TxBranchSel);    /* Branch on Tx abort */
0599     iowrite32(0x00400040, ioaddr + TxWaitSel);  /* Wait on Tx status */
0600     iowrite32(0x00400040, ioaddr + RxIntrSel);  /* Interrupt on Rx done */
0601     iowrite32(0x00400040, ioaddr + RxBranchSel);    /* Branch on Rx error */
0602     iowrite32(0x00400040, ioaddr + RxWaitSel);  /* Wait on Rx done */
0603 
0604     /* Initialize other registers: with so many this eventually this will
0605        converted to an offset/value list. */
0606     iowrite32(dma_ctrl, ioaddr + DMACtrl);
0607     iowrite16(fifo_cfg, ioaddr + FIFOcfg);
0608     /* Enable automatic generation of flow control frames, period 0xffff. */
0609     iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
0610 
0611     yp->tx_threshold = 32;
0612     iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
0613 
0614     if (dev->if_port == 0)
0615         dev->if_port = yp->default_port;
0616 
0617     netif_start_queue(dev);
0618 
0619     /* Setting the Rx mode will start the Rx process. */
0620     if (yp->drv_flags & IsGigabit) {
0621         /* We are always in full-duplex mode with gigabit! */
0622         yp->full_duplex = 1;
0623         iowrite16(0x01CF, ioaddr + Cnfg);
0624     } else {
0625         iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
0626         iowrite16(0x1018, ioaddr + FrameGap1);
0627         iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
0628     }
0629     set_rx_mode(dev);
0630 
0631     /* Enable interrupts by setting the interrupt mask. */
0632     iowrite16(0x81ff, ioaddr + IntrEnb);            /* See enum intr_status_bits */
0633     iowrite16(0x0000, ioaddr + EventStatus);        /* Clear non-interrupting events */
0634     iowrite32(0x80008000, ioaddr + RxCtrl);     /* Start Rx and Tx channels. */
0635     iowrite32(0x80008000, ioaddr + TxCtrl);
0636 
0637     if (yellowfin_debug > 2) {
0638         netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
0639     }
0640 
0641     /* Set the timer to check for link beat. */
0642     timer_setup(&yp->timer, yellowfin_timer, 0);
0643     yp->timer.expires = jiffies + 3*HZ;
0644     add_timer(&yp->timer);
0645 out:
0646     return rc;
0647 
0648 err_free_irq:
0649     free_irq(irq, dev);
0650     goto out;
0651 }
0652 
0653 static void yellowfin_timer(struct timer_list *t)
0654 {
0655     struct yellowfin_private *yp = from_timer(yp, t, timer);
0656     struct net_device *dev = pci_get_drvdata(yp->pci_dev);
0657     void __iomem *ioaddr = yp->base;
0658     int next_tick = 60*HZ;
0659 
0660     if (yellowfin_debug > 3) {
0661         netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
0662                   ioread16(ioaddr + IntrStatus));
0663     }
0664 
0665     if (yp->mii_cnt) {
0666         int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
0667         int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
0668         int negotiated = lpa & yp->advertising;
0669         if (yellowfin_debug > 1)
0670             netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
0671                       yp->phys[0], bmsr, lpa);
0672 
0673         yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
0674 
0675         iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
0676 
0677         if (bmsr & BMSR_LSTATUS)
0678             next_tick = 60*HZ;
0679         else
0680             next_tick = 3*HZ;
0681     }
0682 
0683     yp->timer.expires = jiffies + next_tick;
0684     add_timer(&yp->timer);
0685 }
0686 
0687 static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
0688 {
0689     struct yellowfin_private *yp = netdev_priv(dev);
0690     void __iomem *ioaddr = yp->base;
0691 
0692     netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
0693             yp->cur_tx, yp->dirty_tx,
0694             ioread32(ioaddr + TxStatus),
0695             ioread32(ioaddr + RxStatus));
0696 
0697     /* Note: these should be KERN_DEBUG. */
0698     if (yellowfin_debug) {
0699         int i;
0700         pr_warn("  Rx ring %p: ", yp->rx_ring);
0701         for (i = 0; i < RX_RING_SIZE; i++)
0702             pr_cont(" %08x", yp->rx_ring[i].result_status);
0703         pr_cont("\n");
0704         pr_warn("  Tx ring %p: ", yp->tx_ring);
0705         for (i = 0; i < TX_RING_SIZE; i++)
0706             pr_cont(" %04x /%08x",
0707                    yp->tx_status[i].tx_errs,
0708                    yp->tx_ring[i].result_status);
0709         pr_cont("\n");
0710     }
0711 
0712     /* If the hardware is found to hang regularly, we will update the code
0713        to reinitialize the chip here. */
0714     dev->if_port = 0;
0715 
0716     /* Wake the potentially-idle transmit channel. */
0717     iowrite32(0x10001000, yp->base + TxCtrl);
0718     if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
0719         netif_wake_queue (dev);     /* Typical path */
0720 
0721     netif_trans_update(dev); /* prevent tx timeout */
0722     dev->stats.tx_errors++;
0723 }
0724 
0725 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
0726 static int yellowfin_init_ring(struct net_device *dev)
0727 {
0728     struct yellowfin_private *yp = netdev_priv(dev);
0729     int i, j;
0730 
0731     yp->tx_full = 0;
0732     yp->cur_rx = yp->cur_tx = 0;
0733     yp->dirty_tx = 0;
0734 
0735     yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
0736 
0737     for (i = 0; i < RX_RING_SIZE; i++) {
0738         yp->rx_ring[i].dbdma_cmd =
0739             cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
0740         yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
0741             ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
0742     }
0743 
0744     for (i = 0; i < RX_RING_SIZE; i++) {
0745         struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
0746         yp->rx_skbuff[i] = skb;
0747         if (skb == NULL)
0748             break;
0749         skb_reserve(skb, 2);    /* 16 byte align the IP header. */
0750         yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
0751                                  skb->data,
0752                                  yp->rx_buf_sz,
0753                                  DMA_FROM_DEVICE));
0754     }
0755     if (i != RX_RING_SIZE) {
0756         for (j = 0; j < i; j++)
0757             dev_kfree_skb(yp->rx_skbuff[j]);
0758         return -ENOMEM;
0759     }
0760     yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
0761     yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
0762 
0763 #define NO_TXSTATS
0764 #ifdef NO_TXSTATS
0765     /* In this mode the Tx ring needs only a single descriptor. */
0766     for (i = 0; i < TX_RING_SIZE; i++) {
0767         yp->tx_skbuff[i] = NULL;
0768         yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
0769         yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
0770             ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
0771     }
0772     /* Wrap ring */
0773     yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
0774 #else
0775 {
0776     /* Tx ring needs a pair of descriptors, the second for the status. */
0777     for (i = 0; i < TX_RING_SIZE; i++) {
0778         j = 2*i;
0779         yp->tx_skbuff[i] = 0;
0780         /* Branch on Tx error. */
0781         yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
0782         yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
0783             (j+1)*sizeof(struct yellowfin_desc));
0784         j++;
0785         if (yp->flags & FullTxStatus) {
0786             yp->tx_ring[j].dbdma_cmd =
0787                 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
0788             yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
0789             yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
0790                 i*sizeof(struct tx_status_words));
0791         } else {
0792             /* Symbios chips write only tx_errs word. */
0793             yp->tx_ring[j].dbdma_cmd =
0794                 cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
0795             yp->tx_ring[j].request_cnt = 2;
0796             /* Om pade ummmmm... */
0797             yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
0798                 i*sizeof(struct tx_status_words) +
0799                 &(yp->tx_status[0].tx_errs) -
0800                 &(yp->tx_status[0]));
0801         }
0802         yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
0803             ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
0804     }
0805     /* Wrap ring */
0806     yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
0807 }
0808 #endif
0809     yp->tx_tail_desc = &yp->tx_status[0];
0810     return 0;
0811 }
0812 
0813 static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
0814                     struct net_device *dev)
0815 {
0816     struct yellowfin_private *yp = netdev_priv(dev);
0817     unsigned entry;
0818     int len = skb->len;
0819 
0820     netif_stop_queue (dev);
0821 
0822     /* Note: Ordering is important here, set the field with the
0823        "ownership" bit last, and only then increment cur_tx. */
0824 
0825     /* Calculate the next Tx descriptor entry. */
0826     entry = yp->cur_tx % TX_RING_SIZE;
0827 
0828     if (gx_fix) {   /* Note: only works for paddable protocols e.g.  IP. */
0829         int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
0830         /* Fix GX chipset errata. */
0831         if (cacheline_end > 24  || cacheline_end == 0) {
0832             len = skb->len + 32 - cacheline_end + 1;
0833             if (skb_padto(skb, len)) {
0834                 yp->tx_skbuff[entry] = NULL;
0835                 netif_wake_queue(dev);
0836                 return NETDEV_TX_OK;
0837             }
0838         }
0839     }
0840     yp->tx_skbuff[entry] = skb;
0841 
0842 #ifdef NO_TXSTATS
0843     yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
0844                                  skb->data,
0845                                  len, DMA_TO_DEVICE));
0846     yp->tx_ring[entry].result_status = 0;
0847     if (entry >= TX_RING_SIZE-1) {
0848         /* New stop command. */
0849         yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
0850         yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
0851             cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
0852     } else {
0853         yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
0854         yp->tx_ring[entry].dbdma_cmd =
0855             cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
0856     }
0857     yp->cur_tx++;
0858 #else
0859     yp->tx_ring[entry<<1].request_cnt = len;
0860     yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
0861                                 skb->data,
0862                                 len, DMA_TO_DEVICE));
0863     /* The input_last (status-write) command is constant, but we must
0864        rewrite the subsequent 'stop' command. */
0865 
0866     yp->cur_tx++;
0867     {
0868         unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
0869         yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
0870     }
0871     /* Final step -- overwrite the old 'stop' command. */
0872 
0873     yp->tx_ring[entry<<1].dbdma_cmd =
0874         cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
0875                       CMD_TX_PKT | BRANCH_IFTRUE) | len);
0876 #endif
0877 
0878     /* Non-x86 Todo: explicitly flush cache lines here. */
0879 
0880     /* Wake the potentially-idle transmit channel. */
0881     iowrite32(0x10001000, yp->base + TxCtrl);
0882 
0883     if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
0884         netif_start_queue (dev);        /* Typical path */
0885     else
0886         yp->tx_full = 1;
0887 
0888     if (yellowfin_debug > 4) {
0889         netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
0890                   yp->cur_tx, entry);
0891     }
0892     return NETDEV_TX_OK;
0893 }
0894 
0895 /* The interrupt handler does all of the Rx thread work and cleans up
0896    after the Tx thread. */
0897 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
0898 {
0899     struct net_device *dev = dev_instance;
0900     struct yellowfin_private *yp;
0901     void __iomem *ioaddr;
0902     int boguscnt = max_interrupt_work;
0903     unsigned int handled = 0;
0904 
0905     yp = netdev_priv(dev);
0906     ioaddr = yp->base;
0907 
0908     spin_lock (&yp->lock);
0909 
0910     do {
0911         u16 intr_status = ioread16(ioaddr + IntrClear);
0912 
0913         if (yellowfin_debug > 4)
0914             netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
0915                       intr_status);
0916 
0917         if (intr_status == 0)
0918             break;
0919         handled = 1;
0920 
0921         if (intr_status & (IntrRxDone | IntrEarlyRx)) {
0922             yellowfin_rx(dev);
0923             iowrite32(0x10001000, ioaddr + RxCtrl);     /* Wake Rx engine. */
0924         }
0925 
0926 #ifdef NO_TXSTATS
0927         for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
0928             int entry = yp->dirty_tx % TX_RING_SIZE;
0929             struct sk_buff *skb;
0930 
0931             if (yp->tx_ring[entry].result_status == 0)
0932                 break;
0933             skb = yp->tx_skbuff[entry];
0934             dev->stats.tx_packets++;
0935             dev->stats.tx_bytes += skb->len;
0936             /* Free the original skb. */
0937             dma_unmap_single(&yp->pci_dev->dev,
0938                      le32_to_cpu(yp->tx_ring[entry].addr),
0939                      skb->len, DMA_TO_DEVICE);
0940             dev_consume_skb_irq(skb);
0941             yp->tx_skbuff[entry] = NULL;
0942         }
0943         if (yp->tx_full &&
0944             yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
0945             /* The ring is no longer full, clear tbusy. */
0946             yp->tx_full = 0;
0947             netif_wake_queue(dev);
0948         }
0949 #else
0950         if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
0951             unsigned dirty_tx = yp->dirty_tx;
0952 
0953             for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
0954                  dirty_tx++) {
0955                 /* Todo: optimize this. */
0956                 int entry = dirty_tx % TX_RING_SIZE;
0957                 u16 tx_errs = yp->tx_status[entry].tx_errs;
0958                 struct sk_buff *skb;
0959 
0960 #ifndef final_version
0961                 if (yellowfin_debug > 5)
0962                     netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
0963                               entry,
0964                               yp->tx_status[entry].tx_cnt,
0965                               yp->tx_status[entry].tx_errs,
0966                               yp->tx_status[entry].total_tx_cnt,
0967                               yp->tx_status[entry].paused);
0968 #endif
0969                 if (tx_errs == 0)
0970                     break;  /* It still hasn't been Txed */
0971                 skb = yp->tx_skbuff[entry];
0972                 if (tx_errs & 0xF810) {
0973                     /* There was an major error, log it. */
0974 #ifndef final_version
0975                     if (yellowfin_debug > 1)
0976                         netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
0977                                   tx_errs);
0978 #endif
0979                     dev->stats.tx_errors++;
0980                     if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
0981                     if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
0982                     if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
0983                     if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
0984                 } else {
0985 #ifndef final_version
0986                     if (yellowfin_debug > 4)
0987                         netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
0988                                   tx_errs);
0989 #endif
0990                     dev->stats.tx_bytes += skb->len;
0991                     dev->stats.collisions += tx_errs & 15;
0992                     dev->stats.tx_packets++;
0993                 }
0994                 /* Free the original skb. */
0995                 dma_unmap_single(&yp->pci_dev->dev,
0996                          yp->tx_ring[entry << 1].addr,
0997                          skb->len, DMA_TO_DEVICE);
0998                 dev_consume_skb_irq(skb);
0999                 yp->tx_skbuff[entry] = 0;
1000                 /* Mark status as empty. */
1001                 yp->tx_status[entry].tx_errs = 0;
1002             }
1003 
1004 #ifndef final_version
1005             if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1006                 netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1007                        dirty_tx, yp->cur_tx, yp->tx_full);
1008                 dirty_tx += TX_RING_SIZE;
1009             }
1010 #endif
1011 
1012             if (yp->tx_full &&
1013                 yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1014                 /* The ring is no longer full, clear tbusy. */
1015                 yp->tx_full = 0;
1016                 netif_wake_queue(dev);
1017             }
1018 
1019             yp->dirty_tx = dirty_tx;
1020             yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1021         }
1022 #endif
1023 
1024         /* Log errors and other uncommon events. */
1025         if (intr_status & 0x2ee)    /* Abnormal error summary. */
1026             yellowfin_error(dev, intr_status);
1027 
1028         if (--boguscnt < 0) {
1029             netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1030                     intr_status);
1031             break;
1032         }
1033     } while (1);
1034 
1035     if (yellowfin_debug > 3)
1036         netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1037                   ioread16(ioaddr + IntrStatus));
1038 
1039     spin_unlock (&yp->lock);
1040     return IRQ_RETVAL(handled);
1041 }
1042 
1043 /* This routine is logically part of the interrupt handler, but separated
1044    for clarity and better register allocation. */
1045 static int yellowfin_rx(struct net_device *dev)
1046 {
1047     struct yellowfin_private *yp = netdev_priv(dev);
1048     int entry = yp->cur_rx % RX_RING_SIZE;
1049     int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1050 
1051     if (yellowfin_debug > 4) {
1052         printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1053                entry, yp->rx_ring[entry].result_status);
1054         printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1055                entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1056                yp->rx_ring[entry].result_status);
1057     }
1058 
1059     /* If EOP is set on the next entry, it's a new packet. Send it up. */
1060     while (1) {
1061         struct yellowfin_desc *desc = &yp->rx_ring[entry];
1062         struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1063         s16 frame_status;
1064         u16 desc_status;
1065         int data_size, __maybe_unused yf_size;
1066         u8 *buf_addr;
1067 
1068         if(!desc->result_status)
1069             break;
1070         dma_sync_single_for_cpu(&yp->pci_dev->dev,
1071                     le32_to_cpu(desc->addr),
1072                     yp->rx_buf_sz, DMA_FROM_DEVICE);
1073         desc_status = le32_to_cpu(desc->result_status) >> 16;
1074         buf_addr = rx_skb->data;
1075         data_size = (le32_to_cpu(desc->dbdma_cmd) -
1076             le32_to_cpu(desc->result_status)) & 0xffff;
1077         frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1078         if (yellowfin_debug > 4)
1079             printk(KERN_DEBUG "  %s() status was %04x\n",
1080                    __func__, frame_status);
1081         if (--boguscnt < 0)
1082             break;
1083 
1084         yf_size = sizeof(struct yellowfin_desc);
1085 
1086         if ( ! (desc_status & RX_EOP)) {
1087             if (data_size != 0)
1088                 netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1089                         desc_status, data_size);
1090             dev->stats.rx_length_errors++;
1091         } else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1092             /* There was a error. */
1093             if (yellowfin_debug > 3)
1094                 printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1095                        __func__, frame_status);
1096             dev->stats.rx_errors++;
1097             if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1098             if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1099             if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1100             if (frame_status < 0) dev->stats.rx_dropped++;
1101         } else if ( !(yp->drv_flags & IsGigabit)  &&
1102                    ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1103             u8 status1 = buf_addr[data_size-2];
1104             u8 status2 = buf_addr[data_size-1];
1105             dev->stats.rx_errors++;
1106             if (status1 & 0xC0) dev->stats.rx_length_errors++;
1107             if (status2 & 0x03) dev->stats.rx_frame_errors++;
1108             if (status2 & 0x04) dev->stats.rx_crc_errors++;
1109             if (status2 & 0x80) dev->stats.rx_dropped++;
1110 #ifdef YF_PROTOTYPE     /* Support for prototype hardware errata. */
1111         } else if ((yp->flags & HasMACAddrBug)  &&
1112             !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1113                               entry * yf_size),
1114                       dev->dev_addr) &&
1115             !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1116                               entry * yf_size),
1117                       "\377\377\377\377\377\377")) {
1118             if (bogus_rx++ == 0)
1119                 netdev_warn(dev, "Bad frame to %pM\n",
1120                         buf_addr);
1121 #endif
1122         } else {
1123             struct sk_buff *skb;
1124             int pkt_len = data_size -
1125                 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1126             /* To verify: Yellowfin Length should omit the CRC! */
1127 
1128 #ifndef final_version
1129             if (yellowfin_debug > 4)
1130                 printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1131                        __func__, pkt_len, data_size, boguscnt);
1132 #endif
1133             /* Check if the packet is long enough to just pass up the skbuff
1134                without copying to a properly sized skbuff. */
1135             if (pkt_len > rx_copybreak) {
1136                 skb_put(skb = rx_skb, pkt_len);
1137                 dma_unmap_single(&yp->pci_dev->dev,
1138                          le32_to_cpu(yp->rx_ring[entry].addr),
1139                          yp->rx_buf_sz,
1140                          DMA_FROM_DEVICE);
1141                 yp->rx_skbuff[entry] = NULL;
1142             } else {
1143                 skb = netdev_alloc_skb(dev, pkt_len + 2);
1144                 if (skb == NULL)
1145                     break;
1146                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
1147                 skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1148                 skb_put(skb, pkt_len);
1149                 dma_sync_single_for_device(&yp->pci_dev->dev,
1150                                le32_to_cpu(desc->addr),
1151                                yp->rx_buf_sz,
1152                                DMA_FROM_DEVICE);
1153             }
1154             skb->protocol = eth_type_trans(skb, dev);
1155             netif_rx(skb);
1156             dev->stats.rx_packets++;
1157             dev->stats.rx_bytes += pkt_len;
1158         }
1159         entry = (++yp->cur_rx) % RX_RING_SIZE;
1160     }
1161 
1162     /* Refill the Rx ring buffers. */
1163     for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1164         entry = yp->dirty_rx % RX_RING_SIZE;
1165         if (yp->rx_skbuff[entry] == NULL) {
1166             struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1167             if (skb == NULL)
1168                 break;              /* Better luck next round. */
1169             yp->rx_skbuff[entry] = skb;
1170             skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1171             yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
1172                                          skb->data,
1173                                          yp->rx_buf_sz,
1174                                          DMA_FROM_DEVICE));
1175         }
1176         yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1177         yp->rx_ring[entry].result_status = 0;   /* Clear complete bit. */
1178         if (entry != 0)
1179             yp->rx_ring[entry - 1].dbdma_cmd =
1180                 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1181         else
1182             yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1183                 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1184                             | yp->rx_buf_sz);
1185     }
1186 
1187     return 0;
1188 }
1189 
1190 static void yellowfin_error(struct net_device *dev, int intr_status)
1191 {
1192     netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1193     /* Hmmmmm, it's not clear what to do here. */
1194     if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1195         dev->stats.tx_errors++;
1196     if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1197         dev->stats.rx_errors++;
1198 }
1199 
1200 static int yellowfin_close(struct net_device *dev)
1201 {
1202     struct yellowfin_private *yp = netdev_priv(dev);
1203     void __iomem *ioaddr = yp->base;
1204     int i;
1205 
1206     netif_stop_queue (dev);
1207 
1208     if (yellowfin_debug > 1) {
1209         netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1210                   ioread16(ioaddr + TxStatus),
1211                   ioread16(ioaddr + RxStatus),
1212                   ioread16(ioaddr + IntrStatus));
1213         netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1214                   yp->cur_tx, yp->dirty_tx,
1215                   yp->cur_rx, yp->dirty_rx);
1216     }
1217 
1218     /* Disable interrupts by clearing the interrupt mask. */
1219     iowrite16(0x0000, ioaddr + IntrEnb);
1220 
1221     /* Stop the chip's Tx and Rx processes. */
1222     iowrite32(0x80000000, ioaddr + RxCtrl);
1223     iowrite32(0x80000000, ioaddr + TxCtrl);
1224 
1225     del_timer(&yp->timer);
1226 
1227 #if defined(__i386__)
1228     if (yellowfin_debug > 2) {
1229         printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1230                 (unsigned long long)yp->tx_ring_dma);
1231         for (i = 0; i < TX_RING_SIZE*2; i++)
1232             printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1233                    ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1234                    i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1235                    yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1236         printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1237         for (i = 0; i < TX_RING_SIZE; i++)
1238             printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1239                    i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1240                    yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1241 
1242         printk(KERN_DEBUG "  Rx ring %08llx:\n",
1243                 (unsigned long long)yp->rx_ring_dma);
1244         for (i = 0; i < RX_RING_SIZE; i++) {
1245             printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1246                    ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1247                    i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1248                    yp->rx_ring[i].result_status);
1249             if (yellowfin_debug > 6) {
1250                 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1251                     int j;
1252 
1253                     printk(KERN_DEBUG);
1254                     for (j = 0; j < 0x50; j++)
1255                         pr_cont(" %04x",
1256                             get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1257                     pr_cont("\n");
1258                 }
1259             }
1260         }
1261     }
1262 #endif /* __i386__ debugging only */
1263 
1264     free_irq(yp->pci_dev->irq, dev);
1265 
1266     /* Free all the skbuffs in the Rx queue. */
1267     for (i = 0; i < RX_RING_SIZE; i++) {
1268         yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1269         yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1270         if (yp->rx_skbuff[i]) {
1271             dev_kfree_skb(yp->rx_skbuff[i]);
1272         }
1273         yp->rx_skbuff[i] = NULL;
1274     }
1275     for (i = 0; i < TX_RING_SIZE; i++) {
1276         dev_kfree_skb(yp->tx_skbuff[i]);
1277         yp->tx_skbuff[i] = NULL;
1278     }
1279 
1280 #ifdef YF_PROTOTYPE         /* Support for prototype hardware errata. */
1281     if (yellowfin_debug > 0) {
1282         netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1283                   bogus_rx);
1284     }
1285 #endif
1286 
1287     return 0;
1288 }
1289 
1290 /* Set or clear the multicast filter for this adaptor. */
1291 
1292 static void set_rx_mode(struct net_device *dev)
1293 {
1294     struct yellowfin_private *yp = netdev_priv(dev);
1295     void __iomem *ioaddr = yp->base;
1296     u16 cfg_value = ioread16(ioaddr + Cnfg);
1297 
1298     /* Stop the Rx process to change any value. */
1299     iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1300     if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
1301         iowrite16(0x000F, ioaddr + AddrMode);
1302     } else if ((netdev_mc_count(dev) > 64) ||
1303            (dev->flags & IFF_ALLMULTI)) {
1304         /* Too many to filter well, or accept all multicasts. */
1305         iowrite16(0x000B, ioaddr + AddrMode);
1306     } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1307         struct netdev_hw_addr *ha;
1308         u16 hash_table[4];
1309         int i;
1310 
1311         memset(hash_table, 0, sizeof(hash_table));
1312         netdev_for_each_mc_addr(ha, dev) {
1313             unsigned int bit;
1314 
1315             /* Due to a bug in the early chip versions, multiple filter
1316                slots must be set for each address. */
1317             if (yp->drv_flags & HasMulticastBug) {
1318                 bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1319                 hash_table[bit >> 4] |= (1 << bit);
1320                 bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1321                 hash_table[bit >> 4] |= (1 << bit);
1322                 bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1323                 hash_table[bit >> 4] |= (1 << bit);
1324             }
1325             bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1326             hash_table[bit >> 4] |= (1 << bit);
1327         }
1328         /* Copy the hash table to the chip. */
1329         for (i = 0; i < 4; i++)
1330             iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1331         iowrite16(0x0003, ioaddr + AddrMode);
1332     } else {                    /* Normal, unicast/broadcast-only mode. */
1333         iowrite16(0x0001, ioaddr + AddrMode);
1334     }
1335     /* Restart the Rx process. */
1336     iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1337 }
1338 
1339 static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1340 {
1341     struct yellowfin_private *np = netdev_priv(dev);
1342 
1343     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1344     strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1345     strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1346 }
1347 
1348 static const struct ethtool_ops ethtool_ops = {
1349     .get_drvinfo = yellowfin_get_drvinfo
1350 };
1351 
1352 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1353 {
1354     struct yellowfin_private *np = netdev_priv(dev);
1355     void __iomem *ioaddr = np->base;
1356     struct mii_ioctl_data *data = if_mii(rq);
1357 
1358     switch(cmd) {
1359     case SIOCGMIIPHY:       /* Get address of MII PHY in use. */
1360         data->phy_id = np->phys[0] & 0x1f;
1361         fallthrough;
1362 
1363     case SIOCGMIIREG:       /* Read MII PHY register. */
1364         data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1365         return 0;
1366 
1367     case SIOCSMIIREG:       /* Write MII PHY register. */
1368         if (data->phy_id == np->phys[0]) {
1369             u16 value = data->val_in;
1370             switch (data->reg_num) {
1371             case 0:
1372                 /* Check for autonegotiation on or reset. */
1373                 np->medialock = (value & 0x9000) ? 0 : 1;
1374                 if (np->medialock)
1375                     np->full_duplex = (value & 0x0100) ? 1 : 0;
1376                 break;
1377             case 4: np->advertising = value; break;
1378             }
1379             /* Perhaps check_duplex(dev), depending on chip semantics. */
1380         }
1381         mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1382         return 0;
1383     default:
1384         return -EOPNOTSUPP;
1385     }
1386 }
1387 
1388 
1389 static void yellowfin_remove_one(struct pci_dev *pdev)
1390 {
1391     struct net_device *dev = pci_get_drvdata(pdev);
1392     struct yellowfin_private *np;
1393 
1394     BUG_ON(!dev);
1395     np = netdev_priv(dev);
1396 
1397     dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
1398               np->tx_status_dma);
1399     dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
1400               np->rx_ring_dma);
1401     dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
1402               np->tx_ring_dma);
1403     unregister_netdev (dev);
1404 
1405     pci_iounmap(pdev, np->base);
1406 
1407     pci_release_regions (pdev);
1408 
1409     free_netdev (dev);
1410 }
1411 
1412 
1413 static struct pci_driver yellowfin_driver = {
1414     .name       = DRV_NAME,
1415     .id_table   = yellowfin_pci_tbl,
1416     .probe      = yellowfin_init_one,
1417     .remove     = yellowfin_remove_one,
1418 };
1419 
1420 
1421 static int __init yellowfin_init (void)
1422 {
1423 /* when a module, this is printed whether or not devices are found in probe */
1424 #ifdef MODULE
1425     printk(version);
1426 #endif
1427     return pci_register_driver(&yellowfin_driver);
1428 }
1429 
1430 
1431 static void __exit yellowfin_cleanup (void)
1432 {
1433     pci_unregister_driver (&yellowfin_driver);
1434 }
1435 
1436 
1437 module_init(yellowfin_init);
1438 module_exit(yellowfin_cleanup);