Back to home page

OSCL-LXR

 
 

    


0001 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
0002 /*
0003     Written 1998-2000 by Donald Becker.
0004 
0005     Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
0006     send all bug reports to me, and not to Donald Becker, as this code
0007     has been heavily modified from Donald's original version.
0008 
0009     This software may be used and distributed according to the terms of
0010     the GNU General Public License (GPL), incorporated herein by reference.
0011     Drivers based on or derived from this code fall under the GPL and must
0012     retain the authorship, copyright and license notice.  This file is not
0013     a complete program and may only be used when the entire operating
0014     system is licensed under the GPL.
0015 
0016     The information below comes from Donald Becker's original driver:
0017 
0018     The author may be reached as becker@scyld.com, or C/O
0019     Scyld Computing Corporation
0020     410 Severn Ave., Suite 210
0021     Annapolis MD 21403
0022 
0023     Support and updates available at
0024     http://www.scyld.com/network/starfire.html
0025     [link no longer provides useful info -jgarzik]
0026 
0027 */
0028 
0029 #define DRV_NAME    "starfire"
0030 
0031 #include <linux/interrupt.h>
0032 #include <linux/module.h>
0033 #include <linux/kernel.h>
0034 #include <linux/pci.h>
0035 #include <linux/netdevice.h>
0036 #include <linux/etherdevice.h>
0037 #include <linux/init.h>
0038 #include <linux/delay.h>
0039 #include <linux/crc32.h>
0040 #include <linux/ethtool.h>
0041 #include <linux/mii.h>
0042 #include <linux/if_vlan.h>
0043 #include <linux/mm.h>
0044 #include <linux/firmware.h>
0045 #include <asm/processor.h>      /* Processor type for cache alignment. */
0046 #include <linux/uaccess.h>
0047 #include <asm/io.h>
0048 
0049 /*
0050  * The current frame processor firmware fails to checksum a fragment
0051  * of length 1. If and when this is fixed, the #define below can be removed.
0052  */
0053 #define HAS_BROKEN_FIRMWARE
0054 
0055 /*
0056  * If using the broken firmware, data must be padded to the next 32-bit boundary.
0057  */
0058 #ifdef HAS_BROKEN_FIRMWARE
0059 #define PADDING_MASK 3
0060 #endif
0061 
0062 /*
0063  * Define this if using the driver with the zero-copy patch
0064  */
0065 #define ZEROCOPY
0066 
0067 #if IS_ENABLED(CONFIG_VLAN_8021Q)
0068 #define VLAN_SUPPORT
0069 #endif
0070 
0071 /* The user-configurable values.
0072    These may be modified when a driver module is loaded.*/
0073 
0074 /* Used for tuning interrupt latency vs. overhead. */
0075 static int intr_latency;
0076 static int small_frames;
0077 
0078 static int debug = 1;           /* 1 normal messages, 0 quiet .. 7 verbose. */
0079 static int max_interrupt_work = 20;
0080 static int mtu;
0081 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
0082    The Starfire has a 512 element hash table based on the Ethernet CRC. */
0083 static const int multicast_filter_limit = 512;
0084 /* Whether to do TCP/UDP checksums in hardware */
0085 static int enable_hw_cksum = 1;
0086 
0087 #define PKT_BUF_SZ  1536        /* Size of each temporary Rx buffer.*/
0088 /*
0089  * Set the copy breakpoint for the copy-only-tiny-frames scheme.
0090  * Setting to > 1518 effectively disables this feature.
0091  *
0092  * NOTE:
0093  * The ia64 doesn't allow for unaligned loads even of integers being
0094  * misaligned on a 2 byte boundary. Thus always force copying of
0095  * packets as the starfire doesn't allow for misaligned DMAs ;-(
0096  * 23/10/2000 - Jes
0097  *
0098  * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
0099  * at least, having unaligned frames leads to a rather serious performance
0100  * penalty. -Ion
0101  */
0102 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
0103 static int rx_copybreak = PKT_BUF_SZ;
0104 #else
0105 static int rx_copybreak /* = 0 */;
0106 #endif
0107 
0108 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
0109 #ifdef __sparc__
0110 #define DMA_BURST_SIZE 64
0111 #else
0112 #define DMA_BURST_SIZE 128
0113 #endif
0114 
0115 /* Operational parameters that are set at compile time. */
0116 
0117 /* The "native" ring sizes are either 256 or 2048.
0118    However in some modes a descriptor may be marked to wrap the ring earlier.
0119 */
0120 #define RX_RING_SIZE    256
0121 #define TX_RING_SIZE    32
0122 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
0123 #define DONE_Q_SIZE 1024
0124 /* All queues must be aligned on a 256-byte boundary */
0125 #define QUEUE_ALIGN 256
0126 
0127 #if RX_RING_SIZE > 256
0128 #define RX_Q_ENTRIES Rx2048QEntries
0129 #else
0130 #define RX_Q_ENTRIES Rx256QEntries
0131 #endif
0132 
0133 /* Operational parameters that usually are not changed. */
0134 /* Time in jiffies before concluding the transmitter is hung. */
0135 #define TX_TIMEOUT  (2 * HZ)
0136 
0137 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0138 /* 64-bit dma_addr_t */
0139 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
0140 #define netdrv_addr_t __le64
0141 #define cpu_to_dma(x) cpu_to_le64(x)
0142 #define dma_to_cpu(x) le64_to_cpu(x)
0143 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
0144 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
0145 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
0146 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
0147 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
0148 #else  /* 32-bit dma_addr_t */
0149 #define netdrv_addr_t __le32
0150 #define cpu_to_dma(x) cpu_to_le32(x)
0151 #define dma_to_cpu(x) le32_to_cpu(x)
0152 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
0153 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
0154 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
0155 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
0156 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
0157 #endif
0158 
0159 #define skb_first_frag_len(skb) skb_headlen(skb)
0160 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
0161 
0162 /* Firmware names */
0163 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
0164 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
0165 
0166 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
0167 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
0168 MODULE_LICENSE("GPL");
0169 MODULE_FIRMWARE(FIRMWARE_RX);
0170 MODULE_FIRMWARE(FIRMWARE_TX);
0171 
0172 module_param(max_interrupt_work, int, 0);
0173 module_param(mtu, int, 0);
0174 module_param(debug, int, 0);
0175 module_param(rx_copybreak, int, 0);
0176 module_param(intr_latency, int, 0);
0177 module_param(small_frames, int, 0);
0178 module_param(enable_hw_cksum, int, 0);
0179 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
0180 MODULE_PARM_DESC(mtu, "MTU (all boards)");
0181 MODULE_PARM_DESC(debug, "Debug level (0-6)");
0182 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
0183 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
0184 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
0185 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
0186 
0187 /*
0188                 Theory of Operation
0189 
0190 I. Board Compatibility
0191 
0192 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
0193 
0194 II. Board-specific settings
0195 
0196 III. Driver operation
0197 
0198 IIIa. Ring buffers
0199 
0200 The Starfire hardware uses multiple fixed-size descriptor queues/rings.  The
0201 ring sizes are set fixed by the hardware, but may optionally be wrapped
0202 earlier by the END bit in the descriptor.
0203 This driver uses that hardware queue size for the Rx ring, where a large
0204 number of entries has no ill effect beyond increases the potential backlog.
0205 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
0206 disables the queue layer priority ordering and we have no mechanism to
0207 utilize the hardware two-level priority queue.  When modifying the
0208 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
0209 levels.
0210 
0211 IIIb/c. Transmit/Receive Structure
0212 
0213 See the Adaptec manual for the many possible structures, and options for
0214 each structure.  There are far too many to document all of them here.
0215 
0216 For transmit this driver uses type 0/1 transmit descriptors (depending
0217 on the 32/64 bitness of the architecture), and relies on automatic
0218 minimum-length padding.  It does not use the completion queue
0219 consumer index, but instead checks for non-zero status entries.
0220 
0221 For receive this driver uses type 2/3 receive descriptors.  The driver
0222 allocates full frame size skbuffs for the Rx ring buffers, so all frames
0223 should fit in a single descriptor.  The driver does not use the completion
0224 queue consumer index, but instead checks for non-zero status entries.
0225 
0226 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
0227 is allocated and the frame is copied to the new skbuff.  When the incoming
0228 frame is larger, the skbuff is passed directly up the protocol stack.
0229 Buffers consumed this way are replaced by newly allocated skbuffs in a later
0230 phase of receive.
0231 
0232 A notable aspect of operation is that unaligned buffers are not permitted by
0233 the Starfire hardware.  Thus the IP header at offset 14 in an ethernet frame
0234 isn't longword aligned, which may cause problems on some machine
0235 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
0236 the frame into a new skbuff unconditionally. Copied frames are put into the
0237 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
0238 
0239 IIId. Synchronization
0240 
0241 The driver runs as two independent, single-threaded flows of control.  One
0242 is the send-packet routine, which enforces single-threaded use by the
0243 dev->tbusy flag.  The other thread is the interrupt handler, which is single
0244 threaded by the hardware and interrupt handling software.
0245 
0246 The send packet thread has partial control over the Tx ring and the netif_queue
0247 status. If the number of free Tx slots in the ring falls below a certain number
0248 (currently hardcoded to 4), it signals the upper layer to stop the queue.
0249 
0250 The interrupt handler has exclusive control over the Rx ring and records stats
0251 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
0252 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
0253 number of free Tx slow is above the threshold, it signals the upper layer to
0254 restart the queue.
0255 
0256 IV. Notes
0257 
0258 IVb. References
0259 
0260 The Adaptec Starfire manuals, available only from Adaptec.
0261 http://www.scyld.com/expert/100mbps.html
0262 http://www.scyld.com/expert/NWay.html
0263 
0264 IVc. Errata
0265 
0266 - StopOnPerr is broken, don't enable
0267 - Hardware ethernet padding exposes random data, perform software padding
0268   instead (unverified -- works correctly for all the hardware I have)
0269 
0270 */
0271 
0272 
0273 
0274 enum chip_capability_flags {CanHaveMII=1, };
0275 
0276 enum chipset {
0277     CH_6915 = 0,
0278 };
0279 
0280 static const struct pci_device_id starfire_pci_tbl[] = {
0281     { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
0282     { 0, }
0283 };
0284 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
0285 
0286 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
0287 static const struct chip_info {
0288     const char *name;
0289     int drv_flags;
0290 } netdrv_tbl[] = {
0291     { "Adaptec Starfire 6915", CanHaveMII },
0292 };
0293 
0294 
0295 /* Offsets to the device registers.
0296    Unlike software-only systems, device drivers interact with complex hardware.
0297    It's not useful to define symbolic names for every register bit in the
0298    device.  The name can only partially document the semantics and make
0299    the driver longer and more difficult to read.
0300    In general, only the important configuration values or bits changed
0301    multiple times should be defined symbolically.
0302 */
0303 enum register_offsets {
0304     PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
0305     IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
0306     MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
0307     GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
0308     TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
0309     TxRingHiAddr=0x5009C,       /* 64 bit address extension. */
0310     TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
0311     TxThreshold=0x500B0,
0312     CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
0313     RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
0314     CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
0315     RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
0316     RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
0317     TxMode=0x55000, VlanType=0x55064,
0318     PerfFilterTable=0x56000, HashTable=0x56100,
0319     TxGfpMem=0x58000, RxGfpMem=0x5a000,
0320 };
0321 
0322 /*
0323  * Bits in the interrupt status/mask registers.
0324  * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
0325  * enables all the interrupt sources that are or'ed into those status bits.
0326  */
0327 enum intr_status_bits {
0328     IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
0329     IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
0330     IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
0331     IntrTxComplQLow=0x200000, IntrPCI=0x100000,
0332     IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
0333     IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
0334     IntrNormalSummary=0x8000, IntrTxDone=0x4000,
0335     IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
0336     IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
0337     IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
0338     IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
0339     IntrNoTxCsum=0x20, IntrTxBadID=0x10,
0340     IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
0341     IntrTxGfp=0x02, IntrPCIPad=0x01,
0342     /* not quite bits */
0343     IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
0344     IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
0345     IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
0346 };
0347 
0348 /* Bits in the RxFilterMode register. */
0349 enum rx_mode_bits {
0350     AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
0351     AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
0352     PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
0353     WakeupOnGFP=0x0800,
0354 };
0355 
0356 /* Bits in the TxMode register */
0357 enum tx_mode_bits {
0358     MiiSoftReset=0x8000, MIILoopback=0x4000,
0359     TxFlowEnable=0x0800, RxFlowEnable=0x0400,
0360     PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
0361 };
0362 
0363 /* Bits in the TxDescCtrl register. */
0364 enum tx_ctrl_bits {
0365     TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
0366     TxDescSpace128=0x30, TxDescSpace256=0x40,
0367     TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
0368     TxDescType3=0x03, TxDescType4=0x04,
0369     TxNoDMACompletion=0x08,
0370     TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
0371     TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
0372     TxDMABurstSizeShift=8,
0373 };
0374 
0375 /* Bits in the RxDescQCtrl register. */
0376 enum rx_ctrl_bits {
0377     RxBufferLenShift=16, RxMinDescrThreshShift=0,
0378     RxPrefetchMode=0x8000, RxVariableQ=0x2000,
0379     Rx2048QEntries=0x4000, Rx256QEntries=0,
0380     RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
0381     RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
0382     RxDescSpace4=0x000, RxDescSpace8=0x100,
0383     RxDescSpace16=0x200, RxDescSpace32=0x300,
0384     RxDescSpace64=0x400, RxDescSpace128=0x500,
0385     RxConsumerWrEn=0x80,
0386 };
0387 
0388 /* Bits in the RxDMACtrl register. */
0389 enum rx_dmactrl_bits {
0390     RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
0391     RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
0392     RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
0393     RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
0394     RxChecksumRejectTCPOnly=0x01000000,
0395     RxCompletionQ2Enable=0x800000,
0396     RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
0397     RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
0398     RxDMAQ2NonIP=0x400000,
0399     RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
0400     RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
0401     RxBurstSizeShift=0,
0402 };
0403 
0404 /* Bits in the RxCompletionAddr register */
0405 enum rx_compl_bits {
0406     RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
0407     RxComplProducerWrEn=0x40,
0408     RxComplType0=0x00, RxComplType1=0x10,
0409     RxComplType2=0x20, RxComplType3=0x30,
0410     RxComplThreshShift=0,
0411 };
0412 
0413 /* Bits in the TxCompletionAddr register */
0414 enum tx_compl_bits {
0415     TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
0416     TxComplProducerWrEn=0x40,
0417     TxComplIntrStatus=0x20,
0418     CommonQueueMode=0x10,
0419     TxComplThreshShift=0,
0420 };
0421 
0422 /* Bits in the GenCtrl register */
0423 enum gen_ctrl_bits {
0424     RxEnable=0x05, TxEnable=0x0a,
0425     RxGFPEnable=0x10, TxGFPEnable=0x20,
0426 };
0427 
0428 /* Bits in the IntrTimerCtrl register */
0429 enum intr_ctrl_bits {
0430     Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
0431     SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
0432     IntrLatencyMask=0x1f,
0433 };
0434 
0435 /* The Rx and Tx buffer descriptors. */
0436 struct starfire_rx_desc {
0437     netdrv_addr_t rxaddr;
0438 };
0439 enum rx_desc_bits {
0440     RxDescValid=1, RxDescEndRing=2,
0441 };
0442 
0443 /* Completion queue entry. */
0444 struct short_rx_done_desc {
0445     __le32 status;          /* Low 16 bits is length. */
0446 };
0447 struct basic_rx_done_desc {
0448     __le32 status;          /* Low 16 bits is length. */
0449     __le16 vlanid;
0450     __le16 status2;
0451 };
0452 struct csum_rx_done_desc {
0453     __le32 status;          /* Low 16 bits is length. */
0454     __le16 csum;            /* Partial checksum */
0455     __le16 status2;
0456 };
0457 struct full_rx_done_desc {
0458     __le32 status;          /* Low 16 bits is length. */
0459     __le16 status3;
0460     __le16 status2;
0461     __le16 vlanid;
0462     __le16 csum;            /* partial checksum */
0463     __le32 timestamp;
0464 };
0465 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
0466 #ifdef VLAN_SUPPORT
0467 typedef struct full_rx_done_desc rx_done_desc;
0468 #define RxComplType RxComplType3
0469 #else  /* not VLAN_SUPPORT */
0470 typedef struct csum_rx_done_desc rx_done_desc;
0471 #define RxComplType RxComplType2
0472 #endif /* not VLAN_SUPPORT */
0473 
0474 enum rx_done_bits {
0475     RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
0476 };
0477 
0478 /* Type 1 Tx descriptor. */
0479 struct starfire_tx_desc_1 {
0480     __le32 status;          /* Upper bits are status, lower 16 length. */
0481     __le32 addr;
0482 };
0483 
0484 /* Type 2 Tx descriptor. */
0485 struct starfire_tx_desc_2 {
0486     __le32 status;          /* Upper bits are status, lower 16 length. */
0487     __le32 reserved;
0488     __le64 addr;
0489 };
0490 
0491 #ifdef ADDR_64BITS
0492 typedef struct starfire_tx_desc_2 starfire_tx_desc;
0493 #define TX_DESC_TYPE TxDescType2
0494 #else  /* not ADDR_64BITS */
0495 typedef struct starfire_tx_desc_1 starfire_tx_desc;
0496 #define TX_DESC_TYPE TxDescType1
0497 #endif /* not ADDR_64BITS */
0498 #define TX_DESC_SPACING TxDescSpaceUnlim
0499 
0500 enum tx_desc_bits {
0501     TxDescID=0xB0000000,
0502     TxCRCEn=0x01000000, TxDescIntr=0x08000000,
0503     TxRingWrap=0x04000000, TxCalTCP=0x02000000,
0504 };
0505 struct tx_done_desc {
0506     __le32 status;          /* timestamp, index. */
0507 #if 0
0508     __le32 intrstatus;      /* interrupt status */
0509 #endif
0510 };
0511 
0512 struct rx_ring_info {
0513     struct sk_buff *skb;
0514     dma_addr_t mapping;
0515 };
0516 struct tx_ring_info {
0517     struct sk_buff *skb;
0518     dma_addr_t mapping;
0519     unsigned int used_slots;
0520 };
0521 
0522 #define PHY_CNT     2
0523 struct netdev_private {
0524     /* Descriptor rings first for alignment. */
0525     struct starfire_rx_desc *rx_ring;
0526     starfire_tx_desc *tx_ring;
0527     dma_addr_t rx_ring_dma;
0528     dma_addr_t tx_ring_dma;
0529     /* The addresses of rx/tx-in-place skbuffs. */
0530     struct rx_ring_info rx_info[RX_RING_SIZE];
0531     struct tx_ring_info tx_info[TX_RING_SIZE];
0532     /* Pointers to completion queues (full pages). */
0533     rx_done_desc *rx_done_q;
0534     dma_addr_t rx_done_q_dma;
0535     unsigned int rx_done;
0536     struct tx_done_desc *tx_done_q;
0537     dma_addr_t tx_done_q_dma;
0538     unsigned int tx_done;
0539     struct napi_struct napi;
0540     struct net_device *dev;
0541     struct pci_dev *pci_dev;
0542 #ifdef VLAN_SUPPORT
0543     unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
0544 #endif
0545     void *queue_mem;
0546     dma_addr_t queue_mem_dma;
0547     size_t queue_mem_size;
0548 
0549     /* Frequently used values: keep some adjacent for cache effect. */
0550     spinlock_t lock;
0551     unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
0552     unsigned int cur_tx, dirty_tx, reap_tx;
0553     unsigned int rx_buf_sz;     /* Based on MTU+slack. */
0554     /* These values keep track of the transceiver/media in use. */
0555     int speed100;           /* Set if speed == 100MBit. */
0556     u32 tx_mode;
0557     u32 intr_timer_ctrl;
0558     u8 tx_threshold;
0559     /* MII transceiver section. */
0560     struct mii_if_info mii_if;      /* MII lib hooks/info */
0561     int phy_cnt;            /* MII device addresses. */
0562     unsigned char phys[PHY_CNT];    /* MII device addresses. */
0563     void __iomem *base;
0564 };
0565 
0566 
0567 static int  mdio_read(struct net_device *dev, int phy_id, int location);
0568 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
0569 static int  netdev_open(struct net_device *dev);
0570 static void check_duplex(struct net_device *dev);
0571 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
0572 static void init_ring(struct net_device *dev);
0573 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
0574 static irqreturn_t intr_handler(int irq, void *dev_instance);
0575 static void netdev_error(struct net_device *dev, int intr_status);
0576 static int  __netdev_rx(struct net_device *dev, int *quota);
0577 static int  netdev_poll(struct napi_struct *napi, int budget);
0578 static void refill_rx_ring(struct net_device *dev);
0579 static void netdev_error(struct net_device *dev, int intr_status);
0580 static void set_rx_mode(struct net_device *dev);
0581 static struct net_device_stats *get_stats(struct net_device *dev);
0582 static int  netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0583 static int  netdev_close(struct net_device *dev);
0584 static void netdev_media_change(struct net_device *dev);
0585 static const struct ethtool_ops ethtool_ops;
0586 
0587 
0588 #ifdef VLAN_SUPPORT
0589 static int netdev_vlan_rx_add_vid(struct net_device *dev,
0590                   __be16 proto, u16 vid)
0591 {
0592     struct netdev_private *np = netdev_priv(dev);
0593 
0594     spin_lock(&np->lock);
0595     if (debug > 1)
0596         printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
0597     set_bit(vid, np->active_vlans);
0598     set_rx_mode(dev);
0599     spin_unlock(&np->lock);
0600 
0601     return 0;
0602 }
0603 
0604 static int netdev_vlan_rx_kill_vid(struct net_device *dev,
0605                    __be16 proto, u16 vid)
0606 {
0607     struct netdev_private *np = netdev_priv(dev);
0608 
0609     spin_lock(&np->lock);
0610     if (debug > 1)
0611         printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
0612     clear_bit(vid, np->active_vlans);
0613     set_rx_mode(dev);
0614     spin_unlock(&np->lock);
0615 
0616     return 0;
0617 }
0618 #endif /* VLAN_SUPPORT */
0619 
0620 
0621 static const struct net_device_ops netdev_ops = {
0622     .ndo_open       = netdev_open,
0623     .ndo_stop       = netdev_close,
0624     .ndo_start_xmit     = start_tx,
0625     .ndo_tx_timeout     = tx_timeout,
0626     .ndo_get_stats      = get_stats,
0627     .ndo_set_rx_mode    = set_rx_mode,
0628     .ndo_eth_ioctl      = netdev_ioctl,
0629     .ndo_set_mac_address    = eth_mac_addr,
0630     .ndo_validate_addr  = eth_validate_addr,
0631 #ifdef VLAN_SUPPORT
0632     .ndo_vlan_rx_add_vid    = netdev_vlan_rx_add_vid,
0633     .ndo_vlan_rx_kill_vid   = netdev_vlan_rx_kill_vid,
0634 #endif
0635 };
0636 
0637 static int starfire_init_one(struct pci_dev *pdev,
0638                  const struct pci_device_id *ent)
0639 {
0640     struct device *d = &pdev->dev;
0641     struct netdev_private *np;
0642     int i, irq, chip_idx = ent->driver_data;
0643     struct net_device *dev;
0644     u8 addr[ETH_ALEN];
0645     long ioaddr;
0646     void __iomem *base;
0647     int drv_flags, io_size;
0648     int boguscnt;
0649 
0650     if (pci_enable_device (pdev))
0651         return -EIO;
0652 
0653     ioaddr = pci_resource_start(pdev, 0);
0654     io_size = pci_resource_len(pdev, 0);
0655     if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
0656         dev_err(d, "no PCI MEM resources, aborting\n");
0657         return -ENODEV;
0658     }
0659 
0660     dev = alloc_etherdev(sizeof(*np));
0661     if (!dev)
0662         return -ENOMEM;
0663 
0664     SET_NETDEV_DEV(dev, &pdev->dev);
0665 
0666     irq = pdev->irq;
0667 
0668     if (pci_request_regions (pdev, DRV_NAME)) {
0669         dev_err(d, "cannot reserve PCI resources, aborting\n");
0670         goto err_out_free_netdev;
0671     }
0672 
0673     base = ioremap(ioaddr, io_size);
0674     if (!base) {
0675         dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
0676             io_size, ioaddr);
0677         goto err_out_free_res;
0678     }
0679 
0680     pci_set_master(pdev);
0681 
0682     /* enable MWI -- it vastly improves Rx performance on sparc64 */
0683     pci_try_set_mwi(pdev);
0684 
0685 #ifdef ZEROCOPY
0686     /* Starfire can do TCP/UDP checksumming */
0687     if (enable_hw_cksum)
0688         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
0689 #endif /* ZEROCOPY */
0690 
0691 #ifdef VLAN_SUPPORT
0692     dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
0693 #endif /* VLAN_RX_KILL_VID */
0694 #ifdef ADDR_64BITS
0695     dev->features |= NETIF_F_HIGHDMA;
0696 #endif /* ADDR_64BITS */
0697 
0698     /* Serial EEPROM reads are hidden by the hardware. */
0699     for (i = 0; i < 6; i++)
0700         addr[i] = readb(base + EEPROMCtrl + 20 - i);
0701     eth_hw_addr_set(dev, addr);
0702 
0703 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
0704     if (debug > 4)
0705         for (i = 0; i < 0x20; i++)
0706             printk("%2.2x%s",
0707                    (unsigned int)readb(base + EEPROMCtrl + i),
0708                    i % 16 != 15 ? " " : "\n");
0709 #endif
0710 
0711     /* Issue soft reset */
0712     writel(MiiSoftReset, base + TxMode);
0713     udelay(1000);
0714     writel(0, base + TxMode);
0715 
0716     /* Reset the chip to erase previous misconfiguration. */
0717     writel(1, base + PCIDeviceConfig);
0718     boguscnt = 1000;
0719     while (--boguscnt > 0) {
0720         udelay(10);
0721         if ((readl(base + PCIDeviceConfig) & 1) == 0)
0722             break;
0723     }
0724     if (boguscnt == 0)
0725         printk("%s: chipset reset never completed!\n", dev->name);
0726     /* wait a little longer */
0727     udelay(1000);
0728 
0729     np = netdev_priv(dev);
0730     np->dev = dev;
0731     np->base = base;
0732     spin_lock_init(&np->lock);
0733     pci_set_drvdata(pdev, dev);
0734 
0735     np->pci_dev = pdev;
0736 
0737     np->mii_if.dev = dev;
0738     np->mii_if.mdio_read = mdio_read;
0739     np->mii_if.mdio_write = mdio_write;
0740     np->mii_if.phy_id_mask = 0x1f;
0741     np->mii_if.reg_num_mask = 0x1f;
0742 
0743     drv_flags = netdrv_tbl[chip_idx].drv_flags;
0744 
0745     np->speed100 = 1;
0746 
0747     /* timer resolution is 128 * 0.8us */
0748     np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
0749         Timer10X | EnableIntrMasking;
0750 
0751     if (small_frames > 0) {
0752         np->intr_timer_ctrl |= SmallFrameBypass;
0753         switch (small_frames) {
0754         case 1 ... 64:
0755             np->intr_timer_ctrl |= SmallFrame64;
0756             break;
0757         case 65 ... 128:
0758             np->intr_timer_ctrl |= SmallFrame128;
0759             break;
0760         case 129 ... 256:
0761             np->intr_timer_ctrl |= SmallFrame256;
0762             break;
0763         default:
0764             np->intr_timer_ctrl |= SmallFrame512;
0765             if (small_frames > 512)
0766                 printk("Adjusting small_frames down to 512\n");
0767             break;
0768         }
0769     }
0770 
0771     dev->netdev_ops = &netdev_ops;
0772     dev->watchdog_timeo = TX_TIMEOUT;
0773     dev->ethtool_ops = &ethtool_ops;
0774 
0775     netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work);
0776 
0777     if (mtu)
0778         dev->mtu = mtu;
0779 
0780     if (register_netdev(dev))
0781         goto err_out_cleardev;
0782 
0783     printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
0784            dev->name, netdrv_tbl[chip_idx].name, base,
0785            dev->dev_addr, irq);
0786 
0787     if (drv_flags & CanHaveMII) {
0788         int phy, phy_idx = 0;
0789         int mii_status;
0790         for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
0791             mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
0792             msleep(100);
0793             boguscnt = 1000;
0794             while (--boguscnt > 0)
0795                 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
0796                     break;
0797             if (boguscnt == 0) {
0798                 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
0799                 continue;
0800             }
0801             mii_status = mdio_read(dev, phy, MII_BMSR);
0802             if (mii_status != 0) {
0803                 np->phys[phy_idx++] = phy;
0804                 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
0805                 printk(KERN_INFO "%s: MII PHY found at address %d, status "
0806                        "%#4.4x advertising %#4.4x.\n",
0807                        dev->name, phy, mii_status, np->mii_if.advertising);
0808                 /* there can be only one PHY on-board */
0809                 break;
0810             }
0811         }
0812         np->phy_cnt = phy_idx;
0813         if (np->phy_cnt > 0)
0814             np->mii_if.phy_id = np->phys[0];
0815         else
0816             memset(&np->mii_if, 0, sizeof(np->mii_if));
0817     }
0818 
0819     printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
0820            dev->name, enable_hw_cksum ? "enabled" : "disabled");
0821     return 0;
0822 
0823 err_out_cleardev:
0824     iounmap(base);
0825 err_out_free_res:
0826     pci_release_regions (pdev);
0827 err_out_free_netdev:
0828     free_netdev(dev);
0829     return -ENODEV;
0830 }
0831 
0832 
0833 /* Read the MII Management Data I/O (MDIO) interfaces. */
0834 static int mdio_read(struct net_device *dev, int phy_id, int location)
0835 {
0836     struct netdev_private *np = netdev_priv(dev);
0837     void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
0838     int result, boguscnt=1000;
0839     /* ??? Should we add a busy-wait here? */
0840     do {
0841         result = readl(mdio_addr);
0842     } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
0843     if (boguscnt == 0)
0844         return 0;
0845     if ((result & 0xffff) == 0xffff)
0846         return 0;
0847     return result & 0xffff;
0848 }
0849 
0850 
0851 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
0852 {
0853     struct netdev_private *np = netdev_priv(dev);
0854     void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
0855     writel(value, mdio_addr);
0856     /* The busy-wait will occur before a read. */
0857 }
0858 
0859 
0860 static int netdev_open(struct net_device *dev)
0861 {
0862     const struct firmware *fw_rx, *fw_tx;
0863     const __be32 *fw_rx_data, *fw_tx_data;
0864     struct netdev_private *np = netdev_priv(dev);
0865     void __iomem *ioaddr = np->base;
0866     const int irq = np->pci_dev->irq;
0867     int i, retval;
0868     size_t tx_size, rx_size;
0869     size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
0870 
0871     /* Do we ever need to reset the chip??? */
0872 
0873     retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
0874     if (retval)
0875         return retval;
0876 
0877     /* Disable the Rx and Tx, and reset the chip. */
0878     writel(0, ioaddr + GenCtrl);
0879     writel(1, ioaddr + PCIDeviceConfig);
0880     if (debug > 1)
0881         printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
0882                dev->name, irq);
0883 
0884     /* Allocate the various queues. */
0885     if (!np->queue_mem) {
0886         tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
0887         rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
0888         tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
0889         rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
0890         np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
0891         np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
0892                            np->queue_mem_size,
0893                            &np->queue_mem_dma, GFP_ATOMIC);
0894         if (np->queue_mem == NULL) {
0895             free_irq(irq, dev);
0896             return -ENOMEM;
0897         }
0898 
0899         np->tx_done_q     = np->queue_mem;
0900         np->tx_done_q_dma = np->queue_mem_dma;
0901         np->rx_done_q     = (void *) np->tx_done_q + tx_done_q_size;
0902         np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
0903         np->tx_ring       = (void *) np->rx_done_q + rx_done_q_size;
0904         np->tx_ring_dma   = np->rx_done_q_dma + rx_done_q_size;
0905         np->rx_ring       = (void *) np->tx_ring + tx_ring_size;
0906         np->rx_ring_dma   = np->tx_ring_dma + tx_ring_size;
0907     }
0908 
0909     /* Start with no carrier, it gets adjusted later */
0910     netif_carrier_off(dev);
0911     init_ring(dev);
0912     /* Set the size of the Rx buffers. */
0913     writel((np->rx_buf_sz << RxBufferLenShift) |
0914            (0 << RxMinDescrThreshShift) |
0915            RxPrefetchMode | RxVariableQ |
0916            RX_Q_ENTRIES |
0917            RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
0918            RxDescSpace4,
0919            ioaddr + RxDescQCtrl);
0920 
0921     /* Set up the Rx DMA controller. */
0922     writel(RxChecksumIgnore |
0923            (0 << RxEarlyIntThreshShift) |
0924            (6 << RxHighPrioThreshShift) |
0925            ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
0926            ioaddr + RxDMACtrl);
0927 
0928     /* Set Tx descriptor */
0929     writel((2 << TxHiPriFIFOThreshShift) |
0930            (0 << TxPadLenShift) |
0931            ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
0932            TX_DESC_Q_ADDR_SIZE |
0933            TX_DESC_SPACING | TX_DESC_TYPE,
0934            ioaddr + TxDescCtrl);
0935 
0936     writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
0937     writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
0938     writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
0939     writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
0940     writel(np->tx_ring_dma, ioaddr + TxRingPtr);
0941 
0942     writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
0943     writel(np->rx_done_q_dma |
0944            RxComplType |
0945            (0 << RxComplThreshShift),
0946            ioaddr + RxCompletionAddr);
0947 
0948     if (debug > 1)
0949         printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
0950 
0951     /* Fill both the Tx SA register and the Rx perfect filter. */
0952     for (i = 0; i < 6; i++)
0953         writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
0954     /* The first entry is special because it bypasses the VLAN filter.
0955        Don't use it. */
0956     writew(0, ioaddr + PerfFilterTable);
0957     writew(0, ioaddr + PerfFilterTable + 4);
0958     writew(0, ioaddr + PerfFilterTable + 8);
0959     for (i = 1; i < 16; i++) {
0960         const __be16 *eaddrs = (const __be16 *)dev->dev_addr;
0961         void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
0962         writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
0963         writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
0964         writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
0965     }
0966 
0967     /* Initialize other registers. */
0968     /* Configure the PCI bus bursts and FIFO thresholds. */
0969     np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable;  /* modified when link is up. */
0970     writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
0971     udelay(1000);
0972     writel(np->tx_mode, ioaddr + TxMode);
0973     np->tx_threshold = 4;
0974     writel(np->tx_threshold, ioaddr + TxThreshold);
0975 
0976     writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
0977 
0978     napi_enable(&np->napi);
0979 
0980     netif_start_queue(dev);
0981 
0982     if (debug > 1)
0983         printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
0984     set_rx_mode(dev);
0985 
0986     np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
0987     check_duplex(dev);
0988 
0989     /* Enable GPIO interrupts on link change */
0990     writel(0x0f00ff00, ioaddr + GPIOCtrl);
0991 
0992     /* Set the interrupt mask */
0993     writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
0994            IntrTxDMADone | IntrStatsMax | IntrLinkChange |
0995            IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
0996            ioaddr + IntrEnable);
0997     /* Enable PCI interrupts. */
0998     writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
0999            ioaddr + PCIDeviceConfig);
1000 
1001 #ifdef VLAN_SUPPORT
1002     /* Set VLAN type to 802.1q */
1003     writel(ETH_P_8021Q, ioaddr + VlanType);
1004 #endif /* VLAN_SUPPORT */
1005 
1006     retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1007     if (retval) {
1008         printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1009                FIRMWARE_RX);
1010         goto out_init;
1011     }
1012     if (fw_rx->size % 4) {
1013         printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1014                fw_rx->size, FIRMWARE_RX);
1015         retval = -EINVAL;
1016         goto out_rx;
1017     }
1018     retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1019     if (retval) {
1020         printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1021                FIRMWARE_TX);
1022         goto out_rx;
1023     }
1024     if (fw_tx->size % 4) {
1025         printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1026                fw_tx->size, FIRMWARE_TX);
1027         retval = -EINVAL;
1028         goto out_tx;
1029     }
1030     fw_rx_data = (const __be32 *)&fw_rx->data[0];
1031     fw_tx_data = (const __be32 *)&fw_tx->data[0];
1032     rx_size = fw_rx->size / 4;
1033     tx_size = fw_tx->size / 4;
1034 
1035     /* Load Rx/Tx firmware into the frame processors */
1036     for (i = 0; i < rx_size; i++)
1037         writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1038     for (i = 0; i < tx_size; i++)
1039         writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1040     if (enable_hw_cksum)
1041         /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1042         writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1043     else
1044         /* Enable the Rx and Tx units only. */
1045         writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1046 
1047     if (debug > 1)
1048         printk(KERN_DEBUG "%s: Done netdev_open().\n",
1049                dev->name);
1050 
1051 out_tx:
1052     release_firmware(fw_tx);
1053 out_rx:
1054     release_firmware(fw_rx);
1055 out_init:
1056     if (retval)
1057         netdev_close(dev);
1058     return retval;
1059 }
1060 
1061 
1062 static void check_duplex(struct net_device *dev)
1063 {
1064     struct netdev_private *np = netdev_priv(dev);
1065     u16 reg0;
1066     int silly_count = 1000;
1067 
1068     mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1069     mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1070     udelay(500);
1071     while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1072         /* do nothing */;
1073     if (!silly_count) {
1074         printk("%s: MII reset failed!\n", dev->name);
1075         return;
1076     }
1077 
1078     reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1079 
1080     if (!np->mii_if.force_media) {
1081         reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1082     } else {
1083         reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1084         if (np->speed100)
1085             reg0 |= BMCR_SPEED100;
1086         if (np->mii_if.full_duplex)
1087             reg0 |= BMCR_FULLDPLX;
1088         printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1089                dev->name,
1090                np->speed100 ? "100" : "10",
1091                np->mii_if.full_duplex ? "full" : "half");
1092     }
1093     mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1094 }
1095 
1096 
1097 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
1098 {
1099     struct netdev_private *np = netdev_priv(dev);
1100     void __iomem *ioaddr = np->base;
1101     int old_debug;
1102 
1103     printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1104            "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1105 
1106     /* Perhaps we should reinitialize the hardware here. */
1107 
1108     /*
1109      * Stop and restart the interface.
1110      * Cheat and increase the debug level temporarily.
1111      */
1112     old_debug = debug;
1113     debug = 2;
1114     netdev_close(dev);
1115     netdev_open(dev);
1116     debug = old_debug;
1117 
1118     /* Trigger an immediate transmit demand. */
1119 
1120     netif_trans_update(dev); /* prevent tx timeout */
1121     dev->stats.tx_errors++;
1122     netif_wake_queue(dev);
1123 }
1124 
1125 
1126 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1127 static void init_ring(struct net_device *dev)
1128 {
1129     struct netdev_private *np = netdev_priv(dev);
1130     int i;
1131 
1132     np->cur_rx = np->cur_tx = np->reap_tx = 0;
1133     np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1134 
1135     np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1136 
1137     /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1138     for (i = 0; i < RX_RING_SIZE; i++) {
1139         struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1140         np->rx_info[i].skb = skb;
1141         if (skb == NULL)
1142             break;
1143         np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
1144                             skb->data,
1145                             np->rx_buf_sz,
1146                             DMA_FROM_DEVICE);
1147         if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
1148             dev_kfree_skb(skb);
1149             np->rx_info[i].skb = NULL;
1150             break;
1151         }
1152         /* Grrr, we cannot offset to correctly align the IP header. */
1153         np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1154     }
1155     writew(i - 1, np->base + RxDescQIdx);
1156     np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1157 
1158     /* Clear the remainder of the Rx buffer ring. */
1159     for (  ; i < RX_RING_SIZE; i++) {
1160         np->rx_ring[i].rxaddr = 0;
1161         np->rx_info[i].skb = NULL;
1162         np->rx_info[i].mapping = 0;
1163     }
1164     /* Mark the last entry as wrapping the ring. */
1165     np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1166 
1167     /* Clear the completion rings. */
1168     for (i = 0; i < DONE_Q_SIZE; i++) {
1169         np->rx_done_q[i].status = 0;
1170         np->tx_done_q[i].status = 0;
1171     }
1172 
1173     for (i = 0; i < TX_RING_SIZE; i++)
1174         memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1175 }
1176 
1177 
1178 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1179 {
1180     struct netdev_private *np = netdev_priv(dev);
1181     unsigned int entry;
1182     unsigned int prev_tx;
1183     u32 status;
1184     int i, j;
1185 
1186     /*
1187      * be cautious here, wrapping the queue has weird semantics
1188      * and we may not have enough slots even when it seems we do.
1189      */
1190     if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1191         netif_stop_queue(dev);
1192         return NETDEV_TX_BUSY;
1193     }
1194 
1195 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1196     if (skb->ip_summed == CHECKSUM_PARTIAL) {
1197         if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1198             return NETDEV_TX_OK;
1199     }
1200 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1201 
1202     prev_tx = np->cur_tx;
1203     entry = np->cur_tx % TX_RING_SIZE;
1204     for (i = 0; i < skb_num_frags(skb); i++) {
1205         int wrap_ring = 0;
1206         status = TxDescID;
1207 
1208         if (i == 0) {
1209             np->tx_info[entry].skb = skb;
1210             status |= TxCRCEn;
1211             if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1212                 status |= TxRingWrap;
1213                 wrap_ring = 1;
1214             }
1215             if (np->reap_tx) {
1216                 status |= TxDescIntr;
1217                 np->reap_tx = 0;
1218             }
1219             if (skb->ip_summed == CHECKSUM_PARTIAL) {
1220                 status |= TxCalTCP;
1221                 dev->stats.tx_compressed++;
1222             }
1223             status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1224 
1225             np->tx_info[entry].mapping =
1226                 dma_map_single(&np->pci_dev->dev, skb->data,
1227                            skb_first_frag_len(skb),
1228                            DMA_TO_DEVICE);
1229         } else {
1230             const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1231             status |= skb_frag_size(this_frag);
1232             np->tx_info[entry].mapping =
1233                 dma_map_single(&np->pci_dev->dev,
1234                            skb_frag_address(this_frag),
1235                            skb_frag_size(this_frag),
1236                            DMA_TO_DEVICE);
1237         }
1238         if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
1239             dev->stats.tx_dropped++;
1240             goto err_out;
1241         }
1242 
1243         np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1244         np->tx_ring[entry].status = cpu_to_le32(status);
1245         if (debug > 3)
1246             printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1247                    dev->name, np->cur_tx, np->dirty_tx,
1248                    entry, status);
1249         if (wrap_ring) {
1250             np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1251             np->cur_tx += np->tx_info[entry].used_slots;
1252             entry = 0;
1253         } else {
1254             np->tx_info[entry].used_slots = 1;
1255             np->cur_tx += np->tx_info[entry].used_slots;
1256             entry++;
1257         }
1258         /* scavenge the tx descriptors twice per TX_RING_SIZE */
1259         if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1260             np->reap_tx = 1;
1261     }
1262 
1263     /* Non-x86: explicitly flush descriptor cache lines here. */
1264     /* Ensure all descriptors are written back before the transmit is
1265        initiated. - Jes */
1266     wmb();
1267 
1268     /* Update the producer index. */
1269     writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1270 
1271     /* 4 is arbitrary, but should be ok */
1272     if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1273         netif_stop_queue(dev);
1274 
1275     return NETDEV_TX_OK;
1276 
1277 err_out:
1278     entry = prev_tx % TX_RING_SIZE;
1279     np->tx_info[entry].skb = NULL;
1280     if (i > 0) {
1281         dma_unmap_single(&np->pci_dev->dev,
1282                  np->tx_info[entry].mapping,
1283                  skb_first_frag_len(skb), DMA_TO_DEVICE);
1284         np->tx_info[entry].mapping = 0;
1285         entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1286         for (j = 1; j < i; j++) {
1287             dma_unmap_single(&np->pci_dev->dev,
1288                      np->tx_info[entry].mapping,
1289                      skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
1290                      DMA_TO_DEVICE);
1291             entry++;
1292         }
1293     }
1294     dev_kfree_skb_any(skb);
1295     np->cur_tx = prev_tx;
1296     return NETDEV_TX_OK;
1297 }
1298 
1299 /* The interrupt handler does all of the Rx thread work and cleans up
1300    after the Tx thread. */
1301 static irqreturn_t intr_handler(int irq, void *dev_instance)
1302 {
1303     struct net_device *dev = dev_instance;
1304     struct netdev_private *np = netdev_priv(dev);
1305     void __iomem *ioaddr = np->base;
1306     int boguscnt = max_interrupt_work;
1307     int consumer;
1308     int tx_status;
1309     int handled = 0;
1310 
1311     do {
1312         u32 intr_status = readl(ioaddr + IntrClear);
1313 
1314         if (debug > 4)
1315             printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1316                    dev->name, intr_status);
1317 
1318         if (intr_status == 0 || intr_status == (u32) -1)
1319             break;
1320 
1321         handled = 1;
1322 
1323         if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1324             u32 enable;
1325 
1326             if (likely(napi_schedule_prep(&np->napi))) {
1327                 __napi_schedule(&np->napi);
1328                 enable = readl(ioaddr + IntrEnable);
1329                 enable &= ~(IntrRxDone | IntrRxEmpty);
1330                 writel(enable, ioaddr + IntrEnable);
1331                 /* flush PCI posting buffers */
1332                 readl(ioaddr + IntrEnable);
1333             } else {
1334                 /* Paranoia check */
1335                 enable = readl(ioaddr + IntrEnable);
1336                 if (enable & (IntrRxDone | IntrRxEmpty)) {
1337                     printk(KERN_INFO
1338                            "%s: interrupt while in poll!\n",
1339                            dev->name);
1340                     enable &= ~(IntrRxDone | IntrRxEmpty);
1341                     writel(enable, ioaddr + IntrEnable);
1342                 }
1343             }
1344         }
1345 
1346         /* Scavenge the skbuff list based on the Tx-done queue.
1347            There are redundant checks here that may be cleaned up
1348            after the driver has proven to be reliable. */
1349         consumer = readl(ioaddr + TxConsumerIdx);
1350         if (debug > 3)
1351             printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1352                    dev->name, consumer);
1353 
1354         while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1355             if (debug > 3)
1356                 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1357                        dev->name, np->dirty_tx, np->tx_done, tx_status);
1358             if ((tx_status & 0xe0000000) == 0xa0000000) {
1359                 dev->stats.tx_packets++;
1360             } else if ((tx_status & 0xe0000000) == 0x80000000) {
1361                 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1362                 struct sk_buff *skb = np->tx_info[entry].skb;
1363                 np->tx_info[entry].skb = NULL;
1364                 dma_unmap_single(&np->pci_dev->dev,
1365                          np->tx_info[entry].mapping,
1366                          skb_first_frag_len(skb),
1367                          DMA_TO_DEVICE);
1368                 np->tx_info[entry].mapping = 0;
1369                 np->dirty_tx += np->tx_info[entry].used_slots;
1370                 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1371                 {
1372                     int i;
1373                     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1374                         dma_unmap_single(&np->pci_dev->dev,
1375                                  np->tx_info[entry].mapping,
1376                                  skb_frag_size(&skb_shinfo(skb)->frags[i]),
1377                                  DMA_TO_DEVICE);
1378                         np->dirty_tx++;
1379                         entry++;
1380                     }
1381                 }
1382 
1383                 dev_consume_skb_irq(skb);
1384             }
1385             np->tx_done_q[np->tx_done].status = 0;
1386             np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1387         }
1388         writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1389 
1390         if (netif_queue_stopped(dev) &&
1391             (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1392             /* The ring is no longer full, wake the queue. */
1393             netif_wake_queue(dev);
1394         }
1395 
1396         /* Stats overflow */
1397         if (intr_status & IntrStatsMax)
1398             get_stats(dev);
1399 
1400         /* Media change interrupt. */
1401         if (intr_status & IntrLinkChange)
1402             netdev_media_change(dev);
1403 
1404         /* Abnormal error summary/uncommon events handlers. */
1405         if (intr_status & IntrAbnormalSummary)
1406             netdev_error(dev, intr_status);
1407 
1408         if (--boguscnt < 0) {
1409             if (debug > 1)
1410                 printk(KERN_WARNING "%s: Too much work at interrupt, "
1411                        "status=%#8.8x.\n",
1412                        dev->name, intr_status);
1413             break;
1414         }
1415     } while (1);
1416 
1417     if (debug > 4)
1418         printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1419                dev->name, (int) readl(ioaddr + IntrStatus));
1420     return IRQ_RETVAL(handled);
1421 }
1422 
1423 
1424 /*
1425  * This routine is logically part of the interrupt/poll handler, but separated
1426  * for clarity and better register allocation.
1427  */
1428 static int __netdev_rx(struct net_device *dev, int *quota)
1429 {
1430     struct netdev_private *np = netdev_priv(dev);
1431     u32 desc_status;
1432     int retcode = 0;
1433 
1434     /* If EOP is set on the next entry, it's a new packet. Send it up. */
1435     while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1436         struct sk_buff *skb;
1437         u16 pkt_len;
1438         int entry;
1439         rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1440 
1441         if (debug > 4)
1442             printk(KERN_DEBUG "  netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1443         if (!(desc_status & RxOK)) {
1444             /* There was an error. */
1445             if (debug > 2)
1446                 printk(KERN_DEBUG "  netdev_rx() Rx error was %#8.8x.\n", desc_status);
1447             dev->stats.rx_errors++;
1448             if (desc_status & RxFIFOErr)
1449                 dev->stats.rx_fifo_errors++;
1450             goto next_rx;
1451         }
1452 
1453         if (*quota <= 0) {  /* out of rx quota */
1454             retcode = 1;
1455             goto out;
1456         }
1457         (*quota)--;
1458 
1459         pkt_len = desc_status;  /* Implicitly Truncate */
1460         entry = (desc_status >> 16) & 0x7ff;
1461 
1462         if (debug > 4)
1463             printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1464         /* Check if the packet is long enough to accept without copying
1465            to a minimally-sized skbuff. */
1466         if (pkt_len < rx_copybreak &&
1467             (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1468             skb_reserve(skb, 2);    /* 16 byte align the IP header */
1469             dma_sync_single_for_cpu(&np->pci_dev->dev,
1470                         np->rx_info[entry].mapping,
1471                         pkt_len, DMA_FROM_DEVICE);
1472             skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1473             dma_sync_single_for_device(&np->pci_dev->dev,
1474                            np->rx_info[entry].mapping,
1475                            pkt_len, DMA_FROM_DEVICE);
1476             skb_put(skb, pkt_len);
1477         } else {
1478             dma_unmap_single(&np->pci_dev->dev,
1479                      np->rx_info[entry].mapping,
1480                      np->rx_buf_sz, DMA_FROM_DEVICE);
1481             skb = np->rx_info[entry].skb;
1482             skb_put(skb, pkt_len);
1483             np->rx_info[entry].skb = NULL;
1484             np->rx_info[entry].mapping = 0;
1485         }
1486 #ifndef final_version           /* Remove after testing. */
1487         /* You will want this info for the initial debug. */
1488         if (debug > 5) {
1489             printk(KERN_DEBUG "  Rx data %pM %pM %2.2x%2.2x.\n",
1490                    skb->data, skb->data + 6,
1491                    skb->data[12], skb->data[13]);
1492         }
1493 #endif
1494 
1495         skb->protocol = eth_type_trans(skb, dev);
1496 #ifdef VLAN_SUPPORT
1497         if (debug > 4)
1498             printk(KERN_DEBUG "  netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1499 #endif
1500         if (le16_to_cpu(desc->status2) & 0x0100) {
1501             skb->ip_summed = CHECKSUM_UNNECESSARY;
1502             dev->stats.rx_compressed++;
1503         }
1504         /*
1505          * This feature doesn't seem to be working, at least
1506          * with the two firmware versions I have. If the GFP sees
1507          * an IP fragment, it either ignores it completely, or reports
1508          * "bad checksum" on it.
1509          *
1510          * Maybe I missed something -- corrections are welcome.
1511          * Until then, the printk stays. :-) -Ion
1512          */
1513         else if (le16_to_cpu(desc->status2) & 0x0040) {
1514             skb->ip_summed = CHECKSUM_COMPLETE;
1515             skb->csum = le16_to_cpu(desc->csum);
1516             printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1517         }
1518 #ifdef VLAN_SUPPORT
1519         if (le16_to_cpu(desc->status2) & 0x0200) {
1520             u16 vlid = le16_to_cpu(desc->vlanid);
1521 
1522             if (debug > 4) {
1523                 printk(KERN_DEBUG "  netdev_rx() vlanid = %d\n",
1524                        vlid);
1525             }
1526             __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1527         }
1528 #endif /* VLAN_SUPPORT */
1529         netif_receive_skb(skb);
1530         dev->stats.rx_packets++;
1531 
1532     next_rx:
1533         np->cur_rx++;
1534         desc->status = 0;
1535         np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1536     }
1537 
1538     if (*quota == 0) {  /* out of rx quota */
1539         retcode = 1;
1540         goto out;
1541     }
1542     writew(np->rx_done, np->base + CompletionQConsumerIdx);
1543 
1544  out:
1545     refill_rx_ring(dev);
1546     if (debug > 5)
1547         printk(KERN_DEBUG "  exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1548                retcode, np->rx_done, desc_status);
1549     return retcode;
1550 }
1551 
1552 static int netdev_poll(struct napi_struct *napi, int budget)
1553 {
1554     struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1555     struct net_device *dev = np->dev;
1556     u32 intr_status;
1557     void __iomem *ioaddr = np->base;
1558     int quota = budget;
1559 
1560     do {
1561         writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1562 
1563         if (__netdev_rx(dev, &quota))
1564             goto out;
1565 
1566         intr_status = readl(ioaddr + IntrStatus);
1567     } while (intr_status & (IntrRxDone | IntrRxEmpty));
1568 
1569     napi_complete(napi);
1570     intr_status = readl(ioaddr + IntrEnable);
1571     intr_status |= IntrRxDone | IntrRxEmpty;
1572     writel(intr_status, ioaddr + IntrEnable);
1573 
1574  out:
1575     if (debug > 5)
1576         printk(KERN_DEBUG "  exiting netdev_poll(): %d.\n",
1577                budget - quota);
1578 
1579     /* Restart Rx engine if stopped. */
1580     return budget - quota;
1581 }
1582 
1583 static void refill_rx_ring(struct net_device *dev)
1584 {
1585     struct netdev_private *np = netdev_priv(dev);
1586     struct sk_buff *skb;
1587     int entry = -1;
1588 
1589     /* Refill the Rx ring buffers. */
1590     for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1591         entry = np->dirty_rx % RX_RING_SIZE;
1592         if (np->rx_info[entry].skb == NULL) {
1593             skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1594             np->rx_info[entry].skb = skb;
1595             if (skb == NULL)
1596                 break;  /* Better luck next round. */
1597             np->rx_info[entry].mapping =
1598                 dma_map_single(&np->pci_dev->dev, skb->data,
1599                            np->rx_buf_sz, DMA_FROM_DEVICE);
1600             if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
1601                 dev_kfree_skb(skb);
1602                 np->rx_info[entry].skb = NULL;
1603                 break;
1604             }
1605             np->rx_ring[entry].rxaddr =
1606                 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1607         }
1608         if (entry == RX_RING_SIZE - 1)
1609             np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1610     }
1611     if (entry >= 0)
1612         writew(entry, np->base + RxDescQIdx);
1613 }
1614 
1615 
1616 static void netdev_media_change(struct net_device *dev)
1617 {
1618     struct netdev_private *np = netdev_priv(dev);
1619     void __iomem *ioaddr = np->base;
1620     u16 reg0, reg1, reg4, reg5;
1621     u32 new_tx_mode;
1622     u32 new_intr_timer_ctrl;
1623 
1624     /* reset status first */
1625     mdio_read(dev, np->phys[0], MII_BMCR);
1626     mdio_read(dev, np->phys[0], MII_BMSR);
1627 
1628     reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1629     reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1630 
1631     if (reg1 & BMSR_LSTATUS) {
1632         /* link is up */
1633         if (reg0 & BMCR_ANENABLE) {
1634             /* autonegotiation is enabled */
1635             reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1636             reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1637             if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1638                 np->speed100 = 1;
1639                 np->mii_if.full_duplex = 1;
1640             } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1641                 np->speed100 = 1;
1642                 np->mii_if.full_duplex = 0;
1643             } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1644                 np->speed100 = 0;
1645                 np->mii_if.full_duplex = 1;
1646             } else {
1647                 np->speed100 = 0;
1648                 np->mii_if.full_duplex = 0;
1649             }
1650         } else {
1651             /* autonegotiation is disabled */
1652             if (reg0 & BMCR_SPEED100)
1653                 np->speed100 = 1;
1654             else
1655                 np->speed100 = 0;
1656             if (reg0 & BMCR_FULLDPLX)
1657                 np->mii_if.full_duplex = 1;
1658             else
1659                 np->mii_if.full_duplex = 0;
1660         }
1661         netif_carrier_on(dev);
1662         printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1663                dev->name,
1664                np->speed100 ? "100" : "10",
1665                np->mii_if.full_duplex ? "full" : "half");
1666 
1667         new_tx_mode = np->tx_mode & ~FullDuplex;    /* duplex setting */
1668         if (np->mii_if.full_duplex)
1669             new_tx_mode |= FullDuplex;
1670         if (np->tx_mode != new_tx_mode) {
1671             np->tx_mode = new_tx_mode;
1672             writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1673             udelay(1000);
1674             writel(np->tx_mode, ioaddr + TxMode);
1675         }
1676 
1677         new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1678         if (np->speed100)
1679             new_intr_timer_ctrl |= Timer10X;
1680         if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1681             np->intr_timer_ctrl = new_intr_timer_ctrl;
1682             writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1683         }
1684     } else {
1685         netif_carrier_off(dev);
1686         printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1687     }
1688 }
1689 
1690 
1691 static void netdev_error(struct net_device *dev, int intr_status)
1692 {
1693     struct netdev_private *np = netdev_priv(dev);
1694 
1695     /* Came close to underrunning the Tx FIFO, increase threshold. */
1696     if (intr_status & IntrTxDataLow) {
1697         if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1698             writel(++np->tx_threshold, np->base + TxThreshold);
1699             printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1700                    dev->name, np->tx_threshold * 16);
1701         } else
1702             printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1703     }
1704     if (intr_status & IntrRxGFPDead) {
1705         dev->stats.rx_fifo_errors++;
1706         dev->stats.rx_errors++;
1707     }
1708     if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1709         dev->stats.tx_fifo_errors++;
1710         dev->stats.tx_errors++;
1711     }
1712     if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1713         printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1714                dev->name, intr_status);
1715 }
1716 
1717 
1718 static struct net_device_stats *get_stats(struct net_device *dev)
1719 {
1720     struct netdev_private *np = netdev_priv(dev);
1721     void __iomem *ioaddr = np->base;
1722 
1723     /* This adapter architecture needs no SMP locks. */
1724     dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1725     dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1726     dev->stats.tx_packets = readl(ioaddr + 0x57000);
1727     dev->stats.tx_aborted_errors =
1728         readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1729     dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1730     dev->stats.collisions =
1731         readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1732 
1733     /* The chip only need report frame silently dropped. */
1734     dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1735     writew(0, ioaddr + RxDMAStatus);
1736     dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1737     dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1738     dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1739     dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1740 
1741     return &dev->stats;
1742 }
1743 
1744 #ifdef VLAN_SUPPORT
1745 static u32 set_vlan_mode(struct netdev_private *np)
1746 {
1747     u32 ret = VlanMode;
1748     u16 vid;
1749     void __iomem *filter_addr = np->base + HashTable + 8;
1750     int vlan_count = 0;
1751 
1752     for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1753         if (vlan_count == 32)
1754             break;
1755         writew(vid, filter_addr);
1756         filter_addr += 16;
1757         vlan_count++;
1758     }
1759     if (vlan_count == 32) {
1760         ret |= PerfectFilterVlan;
1761         while (vlan_count < 32) {
1762             writew(0, filter_addr);
1763             filter_addr += 16;
1764             vlan_count++;
1765         }
1766     }
1767     return ret;
1768 }
1769 #endif /* VLAN_SUPPORT */
1770 
1771 static void set_rx_mode(struct net_device *dev)
1772 {
1773     struct netdev_private *np = netdev_priv(dev);
1774     void __iomem *ioaddr = np->base;
1775     u32 rx_mode = MinVLANPrio;
1776     struct netdev_hw_addr *ha;
1777     int i;
1778 
1779 #ifdef VLAN_SUPPORT
1780     rx_mode |= set_vlan_mode(np);
1781 #endif /* VLAN_SUPPORT */
1782 
1783     if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1784         rx_mode |= AcceptAll;
1785     } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1786            (dev->flags & IFF_ALLMULTI)) {
1787         /* Too many to match, or accept all multicasts. */
1788         rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1789     } else if (netdev_mc_count(dev) <= 14) {
1790         /* Use the 16 element perfect filter, skip first two entries. */
1791         void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1792         const __be16 *eaddrs;
1793         netdev_for_each_mc_addr(ha, dev) {
1794             eaddrs = (__be16 *) ha->addr;
1795             writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1796             writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1797             writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1798         }
1799         eaddrs = (const __be16 *)dev->dev_addr;
1800         i = netdev_mc_count(dev) + 2;
1801         while (i++ < 16) {
1802             writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1803             writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1804             writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1805         }
1806         rx_mode |= AcceptBroadcast|PerfectFilter;
1807     } else {
1808         /* Must use a multicast hash table. */
1809         void __iomem *filter_addr;
1810         const __be16 *eaddrs;
1811         __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long))));   /* Multicast hash filter */
1812 
1813         memset(mc_filter, 0, sizeof(mc_filter));
1814         netdev_for_each_mc_addr(ha, dev) {
1815             /* The chip uses the upper 9 CRC bits
1816                as index into the hash table */
1817             int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1818             __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1819 
1820             *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1821         }
1822         /* Clear the perfect filter list, skip first two entries. */
1823         filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1824         eaddrs = (const __be16 *)dev->dev_addr;
1825         for (i = 2; i < 16; i++) {
1826             writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1827             writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1828             writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1829         }
1830         for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1831             writew(mc_filter[i], filter_addr);
1832         rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1833     }
1834     writel(rx_mode, ioaddr + RxFilterMode);
1835 }
1836 
1837 static int check_if_running(struct net_device *dev)
1838 {
1839     if (!netif_running(dev))
1840         return -EINVAL;
1841     return 0;
1842 }
1843 
1844 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1845 {
1846     struct netdev_private *np = netdev_priv(dev);
1847     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1848     strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1849 }
1850 
1851 static int get_link_ksettings(struct net_device *dev,
1852                   struct ethtool_link_ksettings *cmd)
1853 {
1854     struct netdev_private *np = netdev_priv(dev);
1855     spin_lock_irq(&np->lock);
1856     mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1857     spin_unlock_irq(&np->lock);
1858     return 0;
1859 }
1860 
1861 static int set_link_ksettings(struct net_device *dev,
1862                   const struct ethtool_link_ksettings *cmd)
1863 {
1864     struct netdev_private *np = netdev_priv(dev);
1865     int res;
1866     spin_lock_irq(&np->lock);
1867     res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1868     spin_unlock_irq(&np->lock);
1869     check_duplex(dev);
1870     return res;
1871 }
1872 
1873 static int nway_reset(struct net_device *dev)
1874 {
1875     struct netdev_private *np = netdev_priv(dev);
1876     return mii_nway_restart(&np->mii_if);
1877 }
1878 
1879 static u32 get_link(struct net_device *dev)
1880 {
1881     struct netdev_private *np = netdev_priv(dev);
1882     return mii_link_ok(&np->mii_if);
1883 }
1884 
1885 static u32 get_msglevel(struct net_device *dev)
1886 {
1887     return debug;
1888 }
1889 
1890 static void set_msglevel(struct net_device *dev, u32 val)
1891 {
1892     debug = val;
1893 }
1894 
1895 static const struct ethtool_ops ethtool_ops = {
1896     .begin = check_if_running,
1897     .get_drvinfo = get_drvinfo,
1898     .nway_reset = nway_reset,
1899     .get_link = get_link,
1900     .get_msglevel = get_msglevel,
1901     .set_msglevel = set_msglevel,
1902     .get_link_ksettings = get_link_ksettings,
1903     .set_link_ksettings = set_link_ksettings,
1904 };
1905 
1906 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1907 {
1908     struct netdev_private *np = netdev_priv(dev);
1909     struct mii_ioctl_data *data = if_mii(rq);
1910     int rc;
1911 
1912     if (!netif_running(dev))
1913         return -EINVAL;
1914 
1915     spin_lock_irq(&np->lock);
1916     rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1917     spin_unlock_irq(&np->lock);
1918 
1919     if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1920         check_duplex(dev);
1921 
1922     return rc;
1923 }
1924 
1925 static int netdev_close(struct net_device *dev)
1926 {
1927     struct netdev_private *np = netdev_priv(dev);
1928     void __iomem *ioaddr = np->base;
1929     int i;
1930 
1931     netif_stop_queue(dev);
1932 
1933     napi_disable(&np->napi);
1934 
1935     if (debug > 1) {
1936         printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1937                dev->name, (int) readl(ioaddr + IntrStatus));
1938         printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1939                dev->name, np->cur_tx, np->dirty_tx,
1940                np->cur_rx, np->dirty_rx);
1941     }
1942 
1943     /* Disable interrupts by clearing the interrupt mask. */
1944     writel(0, ioaddr + IntrEnable);
1945 
1946     /* Stop the chip's Tx and Rx processes. */
1947     writel(0, ioaddr + GenCtrl);
1948     readl(ioaddr + GenCtrl);
1949 
1950     if (debug > 5) {
1951         printk(KERN_DEBUG"  Tx ring at %#llx:\n",
1952                (long long) np->tx_ring_dma);
1953         for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1954             printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1955                    i, le32_to_cpu(np->tx_ring[i].status),
1956                    (long long) dma_to_cpu(np->tx_ring[i].addr),
1957                    le32_to_cpu(np->tx_done_q[i].status));
1958         printk(KERN_DEBUG "  Rx ring at %#llx -> %p:\n",
1959                (long long) np->rx_ring_dma, np->rx_done_q);
1960         if (np->rx_done_q)
1961             for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1962                 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1963                        i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1964         }
1965     }
1966 
1967     free_irq(np->pci_dev->irq, dev);
1968 
1969     /* Free all the skbuffs in the Rx queue. */
1970     for (i = 0; i < RX_RING_SIZE; i++) {
1971         np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1972         if (np->rx_info[i].skb != NULL) {
1973             dma_unmap_single(&np->pci_dev->dev,
1974                      np->rx_info[i].mapping,
1975                      np->rx_buf_sz, DMA_FROM_DEVICE);
1976             dev_kfree_skb(np->rx_info[i].skb);
1977         }
1978         np->rx_info[i].skb = NULL;
1979         np->rx_info[i].mapping = 0;
1980     }
1981     for (i = 0; i < TX_RING_SIZE; i++) {
1982         struct sk_buff *skb = np->tx_info[i].skb;
1983         if (skb == NULL)
1984             continue;
1985         dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
1986                  skb_first_frag_len(skb), DMA_TO_DEVICE);
1987         np->tx_info[i].mapping = 0;
1988         dev_kfree_skb(skb);
1989         np->tx_info[i].skb = NULL;
1990     }
1991 
1992     return 0;
1993 }
1994 
1995 static int __maybe_unused starfire_suspend(struct device *dev_d)
1996 {
1997     struct net_device *dev = dev_get_drvdata(dev_d);
1998 
1999     if (netif_running(dev)) {
2000         netif_device_detach(dev);
2001         netdev_close(dev);
2002     }
2003 
2004     return 0;
2005 }
2006 
2007 static int __maybe_unused starfire_resume(struct device *dev_d)
2008 {
2009     struct net_device *dev = dev_get_drvdata(dev_d);
2010 
2011     if (netif_running(dev)) {
2012         netdev_open(dev);
2013         netif_device_attach(dev);
2014     }
2015 
2016     return 0;
2017 }
2018 
2019 static void starfire_remove_one(struct pci_dev *pdev)
2020 {
2021     struct net_device *dev = pci_get_drvdata(pdev);
2022     struct netdev_private *np = netdev_priv(dev);
2023 
2024     BUG_ON(!dev);
2025 
2026     unregister_netdev(dev);
2027 
2028     if (np->queue_mem)
2029         dma_free_coherent(&pdev->dev, np->queue_mem_size,
2030                   np->queue_mem, np->queue_mem_dma);
2031 
2032 
2033     /* XXX: add wakeup code -- requires firmware for MagicPacket */
2034     pci_set_power_state(pdev, PCI_D3hot);   /* go to sleep in D3 mode */
2035     pci_disable_device(pdev);
2036 
2037     iounmap(np->base);
2038     pci_release_regions(pdev);
2039 
2040     free_netdev(dev);           /* Will also free np!! */
2041 }
2042 
2043 static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
2044 
2045 static struct pci_driver starfire_driver = {
2046     .name       = DRV_NAME,
2047     .probe      = starfire_init_one,
2048     .remove     = starfire_remove_one,
2049     .driver.pm  = &starfire_pm_ops,
2050     .id_table   = starfire_pci_tbl,
2051 };
2052 
2053 
2054 static int __init starfire_init (void)
2055 {
2056 /* when a module, this is printed whether or not devices are found in probe */
2057 #ifdef MODULE
2058     printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2059 #endif
2060 
2061     BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2062 
2063     return pci_register_driver(&starfire_driver);
2064 }
2065 
2066 
2067 static void __exit starfire_cleanup (void)
2068 {
2069     pci_unregister_driver (&starfire_driver);
2070 }
2071 
2072 
2073 module_init(starfire_init);
2074 module_exit(starfire_cleanup);