Back to home page

OSCL-LXR

 
 

    


0001 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
0002 /*
0003     Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
0004 
0005     Copyright 1994, 1995 Digital Equipment Corporation.     [de4x5.c]
0006     Written/copyright 1994-2001 by Donald Becker.           [tulip.c]
0007 
0008     This software may be used and distributed according to the terms of
0009     the GNU General Public License (GPL), incorporated herein by reference.
0010     Drivers based on or derived from this code fall under the GPL and must
0011     retain the authorship, copyright and license notice.  This file is not
0012     a complete program and may only be used when the entire operating
0013     system is licensed under the GPL.
0014 
0015     See the file COPYING in this distribution for more information.
0016 
0017     TODO, in rough priority order:
0018     * Support forcing media type with a module parameter,
0019       like dl2k.c/sundance.c
0020     * Constants (module parms?) for Rx work limit
0021     * Complete reset on PciErr
0022     * Jumbo frames / dev->change_mtu
0023     * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
0024     * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
0025     * Implement Tx software interrupt mitigation via
0026       Tx descriptor bit
0027 
0028  */
0029 
0030 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0031 
0032 #define DRV_NAME        "de2104x"
0033 #define DRV_RELDATE     "Mar 17, 2004"
0034 
0035 #include <linux/module.h>
0036 #include <linux/kernel.h>
0037 #include <linux/netdevice.h>
0038 #include <linux/etherdevice.h>
0039 #include <linux/init.h>
0040 #include <linux/interrupt.h>
0041 #include <linux/pci.h>
0042 #include <linux/delay.h>
0043 #include <linux/ethtool.h>
0044 #include <linux/compiler.h>
0045 #include <linux/rtnetlink.h>
0046 #include <linux/crc32.h>
0047 #include <linux/slab.h>
0048 
0049 #include <asm/io.h>
0050 #include <asm/irq.h>
0051 #include <linux/uaccess.h>
0052 #include <asm/unaligned.h>
0053 
0054 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
0055 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
0056 MODULE_LICENSE("GPL");
0057 
0058 static int debug = -1;
0059 module_param (debug, int, 0);
0060 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
0061 
0062 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
0063 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
0064         defined(CONFIG_SPARC) || defined(__ia64__) ||          \
0065         defined(__sh__) || defined(__mips__)
0066 static int rx_copybreak = 1518;
0067 #else
0068 static int rx_copybreak = 100;
0069 #endif
0070 module_param (rx_copybreak, int, 0);
0071 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
0072 
0073 #define DE_DEF_MSG_ENABLE   (NETIF_MSG_DRV      | \
0074                  NETIF_MSG_PROBE    | \
0075                  NETIF_MSG_LINK     | \
0076                  NETIF_MSG_IFDOWN   | \
0077                  NETIF_MSG_IFUP     | \
0078                  NETIF_MSG_RX_ERR   | \
0079                  NETIF_MSG_TX_ERR)
0080 
0081 /* Descriptor skip length in 32 bit longwords. */
0082 #ifndef CONFIG_DE2104X_DSL
0083 #define DSL         0
0084 #else
0085 #define DSL         CONFIG_DE2104X_DSL
0086 #endif
0087 
0088 #define DE_RX_RING_SIZE     128
0089 #define DE_TX_RING_SIZE     64
0090 #define DE_RING_BYTES       \
0091         ((sizeof(struct de_desc) * DE_RX_RING_SIZE) +   \
0092         (sizeof(struct de_desc) * DE_TX_RING_SIZE))
0093 #define NEXT_TX(N)      (((N) + 1) & (DE_TX_RING_SIZE - 1))
0094 #define NEXT_RX(N)      (((N) + 1) & (DE_RX_RING_SIZE - 1))
0095 #define TX_BUFFS_AVAIL(CP)                  \
0096     (((CP)->tx_tail <= (CP)->tx_head) ?         \
0097       (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head :   \
0098       (CP)->tx_tail - (CP)->tx_head - 1)
0099 
0100 #define PKT_BUF_SZ      1536    /* Size of each temporary Rx buffer.*/
0101 #define RX_OFFSET       2
0102 
0103 #define DE_SETUP_SKB        ((struct sk_buff *) 1)
0104 #define DE_DUMMY_SKB        ((struct sk_buff *) 2)
0105 #define DE_SETUP_FRAME_WORDS    96
0106 #define DE_EEPROM_WORDS     256
0107 #define DE_EEPROM_SIZE      (DE_EEPROM_WORDS * sizeof(u16))
0108 #define DE_MAX_MEDIA        5
0109 
0110 #define DE_MEDIA_TP_AUTO    0
0111 #define DE_MEDIA_BNC        1
0112 #define DE_MEDIA_AUI        2
0113 #define DE_MEDIA_TP     3
0114 #define DE_MEDIA_TP_FD      4
0115 #define DE_MEDIA_INVALID    DE_MAX_MEDIA
0116 #define DE_MEDIA_FIRST      0
0117 #define DE_MEDIA_LAST       (DE_MAX_MEDIA - 1)
0118 #define DE_AUI_BNC      (SUPPORTED_AUI | SUPPORTED_BNC)
0119 
0120 #define DE_TIMER_LINK       (60 * HZ)
0121 #define DE_TIMER_NO_LINK    (5 * HZ)
0122 
0123 #define DE_NUM_REGS     16
0124 #define DE_REGS_SIZE        (DE_NUM_REGS * sizeof(u32))
0125 #define DE_REGS_VER     1
0126 
0127 /* Time in jiffies before concluding the transmitter is hung. */
0128 #define TX_TIMEOUT      (6*HZ)
0129 
0130 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
0131    to support a pre-NWay full-duplex signaling mechanism using short frames.
0132    No one knows what it should be, but if left at its default value some
0133    10base2(!) packets trigger a full-duplex-request interrupt. */
0134 #define FULL_DUPLEX_MAGIC   0x6969
0135 
0136 enum {
0137     /* NIC registers */
0138     BusMode         = 0x00,
0139     TxPoll          = 0x08,
0140     RxPoll          = 0x10,
0141     RxRingAddr      = 0x18,
0142     TxRingAddr      = 0x20,
0143     MacStatus       = 0x28,
0144     MacMode         = 0x30,
0145     IntrMask        = 0x38,
0146     RxMissed        = 0x40,
0147     ROMCmd          = 0x48,
0148     CSR11           = 0x58,
0149     SIAStatus       = 0x60,
0150     CSR13           = 0x68,
0151     CSR14           = 0x70,
0152     CSR15           = 0x78,
0153     PCIPM           = 0x40,
0154 
0155     /* BusMode bits */
0156     CmdReset        = (1 << 0),
0157     CacheAlign16        = 0x00008000,
0158     BurstLen4       = 0x00000400,
0159     DescSkipLen     = (DSL << 2),
0160 
0161     /* Rx/TxPoll bits */
0162     NormalTxPoll        = (1 << 0),
0163     NormalRxPoll        = (1 << 0),
0164 
0165     /* Tx/Rx descriptor status bits */
0166     DescOwn         = (1 << 31),
0167     RxError         = (1 << 15),
0168     RxErrLong       = (1 << 7),
0169     RxErrCRC        = (1 << 1),
0170     RxErrFIFO       = (1 << 0),
0171     RxErrRunt       = (1 << 11),
0172     RxErrFrame      = (1 << 14),
0173     RingEnd         = (1 << 25),
0174     FirstFrag       = (1 << 29),
0175     LastFrag        = (1 << 30),
0176     TxError         = (1 << 15),
0177     TxFIFOUnder     = (1 << 1),
0178     TxLinkFail      = (1 << 2) | (1 << 10) | (1 << 11),
0179     TxMaxCol        = (1 << 8),
0180     TxOWC           = (1 << 9),
0181     TxJabber        = (1 << 14),
0182     SetupFrame      = (1 << 27),
0183     TxSwInt         = (1 << 31),
0184 
0185     /* MacStatus bits */
0186     IntrOK          = (1 << 16),
0187     IntrErr         = (1 << 15),
0188     RxIntr          = (1 << 6),
0189     RxEmpty         = (1 << 7),
0190     TxIntr          = (1 << 0),
0191     TxEmpty         = (1 << 2),
0192     PciErr          = (1 << 13),
0193     TxState         = (1 << 22) | (1 << 21) | (1 << 20),
0194     RxState         = (1 << 19) | (1 << 18) | (1 << 17),
0195     LinkFail        = (1 << 12),
0196     LinkPass        = (1 << 4),
0197     RxStopped       = (1 << 8),
0198     TxStopped       = (1 << 1),
0199 
0200     /* MacMode bits */
0201     TxEnable        = (1 << 13),
0202     RxEnable        = (1 << 1),
0203     RxTx            = TxEnable | RxEnable,
0204     FullDuplex      = (1 << 9),
0205     AcceptAllMulticast  = (1 << 7),
0206     AcceptAllPhys       = (1 << 6),
0207     BOCnt           = (1 << 5),
0208     MacModeClear        = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
0209                   RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
0210 
0211     /* ROMCmd bits */
0212     EE_SHIFT_CLK        = 0x02, /* EEPROM shift clock. */
0213     EE_CS           = 0x01, /* EEPROM chip select. */
0214     EE_DATA_WRITE       = 0x04, /* Data from the Tulip to EEPROM. */
0215     EE_WRITE_0      = 0x01,
0216     EE_WRITE_1      = 0x05,
0217     EE_DATA_READ        = 0x08, /* Data from the EEPROM chip. */
0218     EE_ENB          = (0x4800 | EE_CS),
0219 
0220     /* The EEPROM commands include the alway-set leading bit. */
0221     EE_READ_CMD     = 6,
0222 
0223     /* RxMissed bits */
0224     RxMissedOver        = (1 << 16),
0225     RxMissedMask        = 0xffff,
0226 
0227     /* SROM-related bits */
0228     SROMC0InfoLeaf      = 27,
0229     MediaBlockMask      = 0x3f,
0230     MediaCustomCSRs     = (1 << 6),
0231 
0232     /* PCIPM bits */
0233     PM_Sleep        = (1 << 31),
0234     PM_Snooze       = (1 << 30),
0235     PM_Mask         = PM_Sleep | PM_Snooze,
0236 
0237     /* SIAStatus bits */
0238     NWayState       = (1 << 14) | (1 << 13) | (1 << 12),
0239     NWayRestart     = (1 << 12),
0240     NonselPortActive    = (1 << 9),
0241     SelPortActive       = (1 << 8),
0242     LinkFailStatus      = (1 << 2),
0243     NetCxnErr       = (1 << 1),
0244 };
0245 
0246 static const u32 de_intr_mask =
0247     IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
0248     LinkPass | LinkFail | PciErr;
0249 
0250 /*
0251  * Set the programmable burst length to 4 longwords for all:
0252  * DMA errors result without these values. Cache align 16 long.
0253  */
0254 static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
0255 
0256 struct de_srom_media_block {
0257     u8          opts;
0258     u16         csr13;
0259     u16         csr14;
0260     u16         csr15;
0261 } __packed;
0262 
0263 struct de_srom_info_leaf {
0264     u16         default_media;
0265     u8          n_blocks;
0266     u8          unused;
0267 } __packed;
0268 
0269 struct de_desc {
0270     __le32          opts1;
0271     __le32          opts2;
0272     __le32          addr1;
0273     __le32          addr2;
0274 #if DSL
0275     __le32          skip[DSL];
0276 #endif
0277 };
0278 
0279 struct media_info {
0280     u16         type;   /* DE_MEDIA_xxx */
0281     u16         csr13;
0282     u16         csr14;
0283     u16         csr15;
0284 };
0285 
0286 struct ring_info {
0287     struct sk_buff      *skb;
0288     dma_addr_t      mapping;
0289 };
0290 
0291 struct de_private {
0292     unsigned        tx_head;
0293     unsigned        tx_tail;
0294     unsigned        rx_tail;
0295 
0296     void            __iomem *regs;
0297     struct net_device   *dev;
0298     spinlock_t      lock;
0299 
0300     struct de_desc      *rx_ring;
0301     struct de_desc      *tx_ring;
0302     struct ring_info    tx_skb[DE_TX_RING_SIZE];
0303     struct ring_info    rx_skb[DE_RX_RING_SIZE];
0304     unsigned        rx_buf_sz;
0305     dma_addr_t      ring_dma;
0306 
0307     u32         msg_enable;
0308 
0309     struct pci_dev      *pdev;
0310 
0311     u16         setup_frame[DE_SETUP_FRAME_WORDS];
0312 
0313     u32         media_type;
0314     u32         media_supported;
0315     u32         media_advertise;
0316     struct media_info   media[DE_MAX_MEDIA];
0317     struct timer_list   media_timer;
0318 
0319     u8          *ee_data;
0320     unsigned        board_idx;
0321     unsigned        de21040 : 1;
0322     unsigned        media_lock : 1;
0323 };
0324 
0325 
0326 static void de_set_rx_mode (struct net_device *dev);
0327 static void de_tx (struct de_private *de);
0328 static void de_clean_rings (struct de_private *de);
0329 static void de_media_interrupt (struct de_private *de, u32 status);
0330 static void de21040_media_timer (struct timer_list *t);
0331 static void de21041_media_timer (struct timer_list *t);
0332 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
0333 
0334 
0335 static const struct pci_device_id de_pci_tbl[] = {
0336     { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
0337       PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
0338     { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
0339       PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
0340     { },
0341 };
0342 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
0343 
0344 static const char * const media_name[DE_MAX_MEDIA] = {
0345     "10baseT auto",
0346     "BNC",
0347     "AUI",
0348     "10baseT-HD",
0349     "10baseT-FD"
0350 };
0351 
0352 /* 21040 transceiver register settings:
0353  * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
0354 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
0355 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
0356 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
0357 
0358 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
0359 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
0360 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
0361 /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
0362 static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
0363 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
0364 
0365 
0366 #define dr32(reg)   ioread32(de->regs + (reg))
0367 #define dw32(reg, val)  iowrite32((val), de->regs + (reg))
0368 
0369 
0370 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
0371                 u32 status, u32 len)
0372 {
0373     netif_dbg(de, rx_err, de->dev,
0374           "rx err, slot %d status 0x%x len %d\n",
0375           rx_tail, status, len);
0376 
0377     if ((status & 0x38000300) != 0x0300) {
0378         /* Ingore earlier buffers. */
0379         if ((status & 0xffff) != 0x7fff) {
0380             netif_warn(de, rx_err, de->dev,
0381                    "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
0382                    status);
0383             de->dev->stats.rx_length_errors++;
0384         }
0385     } else if (status & RxError) {
0386         /* There was a fatal error. */
0387         de->dev->stats.rx_errors++; /* end of a packet.*/
0388         if (status & 0x0890) de->dev->stats.rx_length_errors++;
0389         if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
0390         if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
0391     }
0392 }
0393 
0394 static void de_rx (struct de_private *de)
0395 {
0396     unsigned rx_tail = de->rx_tail;
0397     unsigned rx_work = DE_RX_RING_SIZE;
0398     unsigned drop = 0;
0399     int rc;
0400 
0401     while (--rx_work) {
0402         u32 status, len;
0403         dma_addr_t mapping;
0404         struct sk_buff *skb, *copy_skb;
0405         unsigned copying_skb, buflen;
0406 
0407         skb = de->rx_skb[rx_tail].skb;
0408         BUG_ON(!skb);
0409         rmb();
0410         status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
0411         if (status & DescOwn)
0412             break;
0413 
0414         /* the length is actually a 15 bit value here according
0415          * to Table 4-1 in the DE2104x spec so mask is 0x7fff
0416          */
0417         len = ((status >> 16) & 0x7fff) - 4;
0418         mapping = de->rx_skb[rx_tail].mapping;
0419 
0420         if (unlikely(drop)) {
0421             de->dev->stats.rx_dropped++;
0422             goto rx_next;
0423         }
0424 
0425         if (unlikely((status & 0x38008300) != 0x0300)) {
0426             de_rx_err_acct(de, rx_tail, status, len);
0427             goto rx_next;
0428         }
0429 
0430         copying_skb = (len <= rx_copybreak);
0431 
0432         netif_dbg(de, rx_status, de->dev,
0433               "rx slot %d status 0x%x len %d copying? %d\n",
0434               rx_tail, status, len, copying_skb);
0435 
0436         buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
0437         copy_skb = netdev_alloc_skb(de->dev, buflen);
0438         if (unlikely(!copy_skb)) {
0439             de->dev->stats.rx_dropped++;
0440             drop = 1;
0441             rx_work = 100;
0442             goto rx_next;
0443         }
0444 
0445         if (!copying_skb) {
0446             dma_unmap_single(&de->pdev->dev, mapping, buflen,
0447                      DMA_FROM_DEVICE);
0448             skb_put(skb, len);
0449 
0450             mapping =
0451             de->rx_skb[rx_tail].mapping =
0452                 dma_map_single(&de->pdev->dev, copy_skb->data,
0453                            buflen, DMA_FROM_DEVICE);
0454             de->rx_skb[rx_tail].skb = copy_skb;
0455         } else {
0456             dma_sync_single_for_cpu(&de->pdev->dev, mapping, len,
0457                         DMA_FROM_DEVICE);
0458             skb_reserve(copy_skb, RX_OFFSET);
0459             skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
0460                           len);
0461             dma_sync_single_for_device(&de->pdev->dev, mapping,
0462                            len, DMA_FROM_DEVICE);
0463 
0464             /* We'll reuse the original ring buffer. */
0465             skb = copy_skb;
0466         }
0467 
0468         skb->protocol = eth_type_trans (skb, de->dev);
0469 
0470         de->dev->stats.rx_packets++;
0471         de->dev->stats.rx_bytes += skb->len;
0472         rc = netif_rx (skb);
0473         if (rc == NET_RX_DROP)
0474             drop = 1;
0475 
0476 rx_next:
0477         if (rx_tail == (DE_RX_RING_SIZE - 1))
0478             de->rx_ring[rx_tail].opts2 =
0479                 cpu_to_le32(RingEnd | de->rx_buf_sz);
0480         else
0481             de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
0482         de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
0483         wmb();
0484         de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
0485         rx_tail = NEXT_RX(rx_tail);
0486     }
0487 
0488     if (!rx_work)
0489         netdev_warn(de->dev, "rx work limit reached\n");
0490 
0491     de->rx_tail = rx_tail;
0492 }
0493 
0494 static irqreturn_t de_interrupt (int irq, void *dev_instance)
0495 {
0496     struct net_device *dev = dev_instance;
0497     struct de_private *de = netdev_priv(dev);
0498     u32 status;
0499 
0500     status = dr32(MacStatus);
0501     if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
0502         return IRQ_NONE;
0503 
0504     netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
0505           status, dr32(MacMode),
0506           de->rx_tail, de->tx_head, de->tx_tail);
0507 
0508     dw32(MacStatus, status);
0509 
0510     if (status & (RxIntr | RxEmpty)) {
0511         de_rx(de);
0512         if (status & RxEmpty)
0513             dw32(RxPoll, NormalRxPoll);
0514     }
0515 
0516     spin_lock(&de->lock);
0517 
0518     if (status & (TxIntr | TxEmpty))
0519         de_tx(de);
0520 
0521     if (status & (LinkPass | LinkFail))
0522         de_media_interrupt(de, status);
0523 
0524     spin_unlock(&de->lock);
0525 
0526     if (status & PciErr) {
0527         u16 pci_status;
0528 
0529         pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
0530         pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
0531         netdev_err(de->dev,
0532                "PCI bus error, status=%08x, PCI status=%04x\n",
0533                status, pci_status);
0534     }
0535 
0536     return IRQ_HANDLED;
0537 }
0538 
0539 static void de_tx (struct de_private *de)
0540 {
0541     unsigned tx_head = de->tx_head;
0542     unsigned tx_tail = de->tx_tail;
0543 
0544     while (tx_tail != tx_head) {
0545         struct sk_buff *skb;
0546         u32 status;
0547 
0548         rmb();
0549         status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
0550         if (status & DescOwn)
0551             break;
0552 
0553         skb = de->tx_skb[tx_tail].skb;
0554         BUG_ON(!skb);
0555         if (unlikely(skb == DE_DUMMY_SKB))
0556             goto next;
0557 
0558         if (unlikely(skb == DE_SETUP_SKB)) {
0559             dma_unmap_single(&de->pdev->dev,
0560                      de->tx_skb[tx_tail].mapping,
0561                      sizeof(de->setup_frame),
0562                      DMA_TO_DEVICE);
0563             goto next;
0564         }
0565 
0566         dma_unmap_single(&de->pdev->dev, de->tx_skb[tx_tail].mapping,
0567                  skb->len, DMA_TO_DEVICE);
0568 
0569         if (status & LastFrag) {
0570             if (status & TxError) {
0571                 netif_dbg(de, tx_err, de->dev,
0572                       "tx err, status 0x%x\n",
0573                       status);
0574                 de->dev->stats.tx_errors++;
0575                 if (status & TxOWC)
0576                     de->dev->stats.tx_window_errors++;
0577                 if (status & TxMaxCol)
0578                     de->dev->stats.tx_aborted_errors++;
0579                 if (status & TxLinkFail)
0580                     de->dev->stats.tx_carrier_errors++;
0581                 if (status & TxFIFOUnder)
0582                     de->dev->stats.tx_fifo_errors++;
0583             } else {
0584                 de->dev->stats.tx_packets++;
0585                 de->dev->stats.tx_bytes += skb->len;
0586                 netif_dbg(de, tx_done, de->dev,
0587                       "tx done, slot %d\n", tx_tail);
0588             }
0589             dev_consume_skb_irq(skb);
0590         }
0591 
0592 next:
0593         de->tx_skb[tx_tail].skb = NULL;
0594 
0595         tx_tail = NEXT_TX(tx_tail);
0596     }
0597 
0598     de->tx_tail = tx_tail;
0599 
0600     if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
0601         netif_wake_queue(de->dev);
0602 }
0603 
0604 static netdev_tx_t de_start_xmit (struct sk_buff *skb,
0605                     struct net_device *dev)
0606 {
0607     struct de_private *de = netdev_priv(dev);
0608     unsigned int entry, tx_free;
0609     u32 mapping, len, flags = FirstFrag | LastFrag;
0610     struct de_desc *txd;
0611 
0612     spin_lock_irq(&de->lock);
0613 
0614     tx_free = TX_BUFFS_AVAIL(de);
0615     if (tx_free == 0) {
0616         netif_stop_queue(dev);
0617         spin_unlock_irq(&de->lock);
0618         return NETDEV_TX_BUSY;
0619     }
0620     tx_free--;
0621 
0622     entry = de->tx_head;
0623 
0624     txd = &de->tx_ring[entry];
0625 
0626     len = skb->len;
0627     mapping = dma_map_single(&de->pdev->dev, skb->data, len,
0628                  DMA_TO_DEVICE);
0629     if (entry == (DE_TX_RING_SIZE - 1))
0630         flags |= RingEnd;
0631     if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
0632         flags |= TxSwInt;
0633     flags |= len;
0634     txd->opts2 = cpu_to_le32(flags);
0635     txd->addr1 = cpu_to_le32(mapping);
0636 
0637     de->tx_skb[entry].skb = skb;
0638     de->tx_skb[entry].mapping = mapping;
0639     wmb();
0640 
0641     txd->opts1 = cpu_to_le32(DescOwn);
0642     wmb();
0643 
0644     de->tx_head = NEXT_TX(entry);
0645     netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
0646           entry, skb->len);
0647 
0648     if (tx_free == 0)
0649         netif_stop_queue(dev);
0650 
0651     spin_unlock_irq(&de->lock);
0652 
0653     /* Trigger an immediate transmit demand. */
0654     dw32(TxPoll, NormalTxPoll);
0655 
0656     return NETDEV_TX_OK;
0657 }
0658 
0659 /* Set or clear the multicast filter for this adaptor.
0660    Note that we only use exclusion around actually queueing the
0661    new frame, not around filling de->setup_frame.  This is non-deterministic
0662    when re-entered but still correct. */
0663 
0664 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
0665 {
0666     struct de_private *de = netdev_priv(dev);
0667     u16 hash_table[32];
0668     struct netdev_hw_addr *ha;
0669     const u16 *eaddrs;
0670     int i;
0671 
0672     memset(hash_table, 0, sizeof(hash_table));
0673     __set_bit_le(255, hash_table);          /* Broadcast entry */
0674     /* This should work on big-endian machines as well. */
0675     netdev_for_each_mc_addr(ha, dev) {
0676         int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
0677 
0678         __set_bit_le(index, hash_table);
0679     }
0680 
0681     for (i = 0; i < 32; i++) {
0682         *setup_frm++ = hash_table[i];
0683         *setup_frm++ = hash_table[i];
0684     }
0685     setup_frm = &de->setup_frame[13*6];
0686 
0687     /* Fill the final entry with our physical address. */
0688     eaddrs = (const u16 *)dev->dev_addr;
0689     *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
0690     *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
0691     *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
0692 }
0693 
0694 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
0695 {
0696     struct de_private *de = netdev_priv(dev);
0697     struct netdev_hw_addr *ha;
0698     const u16 *eaddrs;
0699 
0700     /* We have <= 14 addresses so we can use the wonderful
0701        16 address perfect filtering of the Tulip. */
0702     netdev_for_each_mc_addr(ha, dev) {
0703         eaddrs = (u16 *) ha->addr;
0704         *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
0705         *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
0706         *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
0707     }
0708     /* Fill the unused entries with the broadcast address. */
0709     memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
0710     setup_frm = &de->setup_frame[15*6];
0711 
0712     /* Fill the final entry with our physical address. */
0713     eaddrs = (const u16 *)dev->dev_addr;
0714     *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
0715     *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
0716     *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
0717 }
0718 
0719 
0720 static void __de_set_rx_mode (struct net_device *dev)
0721 {
0722     struct de_private *de = netdev_priv(dev);
0723     u32 macmode;
0724     unsigned int entry;
0725     u32 mapping;
0726     struct de_desc *txd;
0727     struct de_desc *dummy_txd = NULL;
0728 
0729     macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
0730 
0731     if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
0732         macmode |= AcceptAllMulticast | AcceptAllPhys;
0733         goto out;
0734     }
0735 
0736     if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
0737         /* Too many to filter well -- accept all multicasts. */
0738         macmode |= AcceptAllMulticast;
0739         goto out;
0740     }
0741 
0742     /* Note that only the low-address shortword of setup_frame is valid!
0743        The values are doubled for big-endian architectures. */
0744     if (netdev_mc_count(dev) > 14)  /* Must use a multicast hash table. */
0745         build_setup_frame_hash (de->setup_frame, dev);
0746     else
0747         build_setup_frame_perfect (de->setup_frame, dev);
0748 
0749     /*
0750      * Now add this frame to the Tx list.
0751      */
0752 
0753     entry = de->tx_head;
0754 
0755     /* Avoid a chip errata by prefixing a dummy entry. */
0756     if (entry != 0) {
0757         de->tx_skb[entry].skb = DE_DUMMY_SKB;
0758 
0759         dummy_txd = &de->tx_ring[entry];
0760         dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
0761                    cpu_to_le32(RingEnd) : 0;
0762         dummy_txd->addr1 = 0;
0763 
0764         /* Must set DescOwned later to avoid race with chip */
0765 
0766         entry = NEXT_TX(entry);
0767     }
0768 
0769     de->tx_skb[entry].skb = DE_SETUP_SKB;
0770     de->tx_skb[entry].mapping = mapping =
0771         dma_map_single(&de->pdev->dev, de->setup_frame,
0772                sizeof(de->setup_frame), DMA_TO_DEVICE);
0773 
0774     /* Put the setup frame on the Tx list. */
0775     txd = &de->tx_ring[entry];
0776     if (entry == (DE_TX_RING_SIZE - 1))
0777         txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
0778     else
0779         txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
0780     txd->addr1 = cpu_to_le32(mapping);
0781     wmb();
0782 
0783     txd->opts1 = cpu_to_le32(DescOwn);
0784     wmb();
0785 
0786     if (dummy_txd) {
0787         dummy_txd->opts1 = cpu_to_le32(DescOwn);
0788         wmb();
0789     }
0790 
0791     de->tx_head = NEXT_TX(entry);
0792 
0793     if (TX_BUFFS_AVAIL(de) == 0)
0794         netif_stop_queue(dev);
0795 
0796     /* Trigger an immediate transmit demand. */
0797     dw32(TxPoll, NormalTxPoll);
0798 
0799 out:
0800     if (macmode != dr32(MacMode))
0801         dw32(MacMode, macmode);
0802 }
0803 
0804 static void de_set_rx_mode (struct net_device *dev)
0805 {
0806     unsigned long flags;
0807     struct de_private *de = netdev_priv(dev);
0808 
0809     spin_lock_irqsave (&de->lock, flags);
0810     __de_set_rx_mode(dev);
0811     spin_unlock_irqrestore (&de->lock, flags);
0812 }
0813 
0814 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
0815 {
0816     if (unlikely(rx_missed & RxMissedOver))
0817         de->dev->stats.rx_missed_errors += RxMissedMask;
0818     else
0819         de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
0820 }
0821 
0822 static void __de_get_stats(struct de_private *de)
0823 {
0824     u32 tmp = dr32(RxMissed); /* self-clearing */
0825 
0826     de_rx_missed(de, tmp);
0827 }
0828 
0829 static struct net_device_stats *de_get_stats(struct net_device *dev)
0830 {
0831     struct de_private *de = netdev_priv(dev);
0832 
0833     /* The chip only need report frame silently dropped. */
0834     spin_lock_irq(&de->lock);
0835     if (netif_running(dev) && netif_device_present(dev))
0836         __de_get_stats(de);
0837     spin_unlock_irq(&de->lock);
0838 
0839     return &dev->stats;
0840 }
0841 
0842 static inline int de_is_running (struct de_private *de)
0843 {
0844     return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
0845 }
0846 
0847 static void de_stop_rxtx (struct de_private *de)
0848 {
0849     u32 macmode;
0850     unsigned int i = 1300/100;
0851 
0852     macmode = dr32(MacMode);
0853     if (macmode & RxTx) {
0854         dw32(MacMode, macmode & ~RxTx);
0855         dr32(MacMode);
0856     }
0857 
0858     /* wait until in-flight frame completes.
0859      * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
0860      * Typically expect this loop to end in < 50 us on 100BT.
0861      */
0862     while (--i) {
0863         if (!de_is_running(de))
0864             return;
0865         udelay(100);
0866     }
0867 
0868     netdev_warn(de->dev, "timeout expired, stopping DMA\n");
0869 }
0870 
0871 static inline void de_start_rxtx (struct de_private *de)
0872 {
0873     u32 macmode;
0874 
0875     macmode = dr32(MacMode);
0876     if ((macmode & RxTx) != RxTx) {
0877         dw32(MacMode, macmode | RxTx);
0878         dr32(MacMode);
0879     }
0880 }
0881 
0882 static void de_stop_hw (struct de_private *de)
0883 {
0884 
0885     udelay(5);
0886     dw32(IntrMask, 0);
0887 
0888     de_stop_rxtx(de);
0889 
0890     dw32(MacStatus, dr32(MacStatus));
0891 
0892     udelay(10);
0893 
0894     de->rx_tail = 0;
0895     de->tx_head = de->tx_tail = 0;
0896 }
0897 
0898 static void de_link_up(struct de_private *de)
0899 {
0900     if (!netif_carrier_ok(de->dev)) {
0901         netif_carrier_on(de->dev);
0902         netif_info(de, link, de->dev, "link up, media %s\n",
0903                media_name[de->media_type]);
0904     }
0905 }
0906 
0907 static void de_link_down(struct de_private *de)
0908 {
0909     if (netif_carrier_ok(de->dev)) {
0910         netif_carrier_off(de->dev);
0911         netif_info(de, link, de->dev, "link down\n");
0912     }
0913 }
0914 
0915 static void de_set_media (struct de_private *de)
0916 {
0917     unsigned media = de->media_type;
0918     u32 macmode = dr32(MacMode);
0919 
0920     if (de_is_running(de))
0921         netdev_warn(de->dev, "chip is running while changing media!\n");
0922 
0923     if (de->de21040)
0924         dw32(CSR11, FULL_DUPLEX_MAGIC);
0925     dw32(CSR13, 0); /* Reset phy */
0926     dw32(CSR14, de->media[media].csr14);
0927     dw32(CSR15, de->media[media].csr15);
0928     dw32(CSR13, de->media[media].csr13);
0929 
0930     /* must delay 10ms before writing to other registers,
0931      * especially CSR6
0932      */
0933     mdelay(10);
0934 
0935     if (media == DE_MEDIA_TP_FD)
0936         macmode |= FullDuplex;
0937     else
0938         macmode &= ~FullDuplex;
0939 
0940     netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
0941     netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
0942            dr32(MacMode), dr32(SIAStatus),
0943            dr32(CSR13), dr32(CSR14), dr32(CSR15));
0944     netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
0945            macmode, de->media[media].csr13,
0946            de->media[media].csr14, de->media[media].csr15);
0947     if (macmode != dr32(MacMode))
0948         dw32(MacMode, macmode);
0949 }
0950 
0951 static void de_next_media (struct de_private *de, const u32 *media,
0952                unsigned int n_media)
0953 {
0954     unsigned int i;
0955 
0956     for (i = 0; i < n_media; i++) {
0957         if (de_ok_to_advertise(de, media[i])) {
0958             de->media_type = media[i];
0959             return;
0960         }
0961     }
0962 }
0963 
0964 static void de21040_media_timer (struct timer_list *t)
0965 {
0966     struct de_private *de = from_timer(de, t, media_timer);
0967     struct net_device *dev = de->dev;
0968     u32 status = dr32(SIAStatus);
0969     unsigned int carrier;
0970     unsigned long flags;
0971 
0972     carrier = (status & NetCxnErr) ? 0 : 1;
0973 
0974     if (carrier) {
0975         if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
0976             goto no_link_yet;
0977 
0978         de->media_timer.expires = jiffies + DE_TIMER_LINK;
0979         add_timer(&de->media_timer);
0980         if (!netif_carrier_ok(dev))
0981             de_link_up(de);
0982         else
0983             netif_info(de, timer, dev, "%s link ok, status %x\n",
0984                    media_name[de->media_type], status);
0985         return;
0986     }
0987 
0988     de_link_down(de);
0989 
0990     if (de->media_lock)
0991         return;
0992 
0993     if (de->media_type == DE_MEDIA_AUI) {
0994         static const u32 next_state = DE_MEDIA_TP;
0995         de_next_media(de, &next_state, 1);
0996     } else {
0997         static const u32 next_state = DE_MEDIA_AUI;
0998         de_next_media(de, &next_state, 1);
0999     }
1000 
1001     spin_lock_irqsave(&de->lock, flags);
1002     de_stop_rxtx(de);
1003     spin_unlock_irqrestore(&de->lock, flags);
1004     de_set_media(de);
1005     de_start_rxtx(de);
1006 
1007 no_link_yet:
1008     de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1009     add_timer(&de->media_timer);
1010 
1011     netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1012            media_name[de->media_type], status);
1013 }
1014 
1015 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1016 {
1017     switch (new_media) {
1018     case DE_MEDIA_TP_AUTO:
1019         if (!(de->media_advertise & ADVERTISED_Autoneg))
1020             return 0;
1021         if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1022             return 0;
1023         break;
1024     case DE_MEDIA_BNC:
1025         if (!(de->media_advertise & ADVERTISED_BNC))
1026             return 0;
1027         break;
1028     case DE_MEDIA_AUI:
1029         if (!(de->media_advertise & ADVERTISED_AUI))
1030             return 0;
1031         break;
1032     case DE_MEDIA_TP:
1033         if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1034             return 0;
1035         break;
1036     case DE_MEDIA_TP_FD:
1037         if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1038             return 0;
1039         break;
1040     }
1041 
1042     return 1;
1043 }
1044 
1045 static void de21041_media_timer (struct timer_list *t)
1046 {
1047     struct de_private *de = from_timer(de, t, media_timer);
1048     struct net_device *dev = de->dev;
1049     u32 status = dr32(SIAStatus);
1050     unsigned int carrier;
1051     unsigned long flags;
1052 
1053     /* clear port active bits */
1054     dw32(SIAStatus, NonselPortActive | SelPortActive);
1055 
1056     carrier = (status & NetCxnErr) ? 0 : 1;
1057 
1058     if (carrier) {
1059         if ((de->media_type == DE_MEDIA_TP_AUTO ||
1060              de->media_type == DE_MEDIA_TP ||
1061              de->media_type == DE_MEDIA_TP_FD) &&
1062             (status & LinkFailStatus))
1063             goto no_link_yet;
1064 
1065         de->media_timer.expires = jiffies + DE_TIMER_LINK;
1066         add_timer(&de->media_timer);
1067         if (!netif_carrier_ok(dev))
1068             de_link_up(de);
1069         else
1070             netif_info(de, timer, dev,
1071                    "%s link ok, mode %x status %x\n",
1072                    media_name[de->media_type],
1073                    dr32(MacMode), status);
1074         return;
1075     }
1076 
1077     de_link_down(de);
1078 
1079     /* if media type locked, don't switch media */
1080     if (de->media_lock)
1081         goto set_media;
1082 
1083     /* if activity detected, use that as hint for new media type */
1084     if (status & NonselPortActive) {
1085         unsigned int have_media = 1;
1086 
1087         /* if AUI/BNC selected, then activity is on TP port */
1088         if (de->media_type == DE_MEDIA_AUI ||
1089             de->media_type == DE_MEDIA_BNC) {
1090             if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1091                 de->media_type = DE_MEDIA_TP_AUTO;
1092             else
1093                 have_media = 0;
1094         }
1095 
1096         /* TP selected.  If there is only TP and BNC, then it's BNC */
1097         else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1098              de_ok_to_advertise(de, DE_MEDIA_BNC))
1099             de->media_type = DE_MEDIA_BNC;
1100 
1101         /* TP selected.  If there is only TP and AUI, then it's AUI */
1102         else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1103              de_ok_to_advertise(de, DE_MEDIA_AUI))
1104             de->media_type = DE_MEDIA_AUI;
1105 
1106         /* otherwise, ignore the hint */
1107         else
1108             have_media = 0;
1109 
1110         if (have_media)
1111             goto set_media;
1112     }
1113 
1114     /*
1115      * Absent or ambiguous activity hint, move to next advertised
1116      * media state.  If de->media_type is left unchanged, this
1117      * simply resets the PHY and reloads the current media settings.
1118      */
1119     if (de->media_type == DE_MEDIA_AUI) {
1120         static const u32 next_states[] = {
1121             DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1122         };
1123         de_next_media(de, next_states, ARRAY_SIZE(next_states));
1124     } else if (de->media_type == DE_MEDIA_BNC) {
1125         static const u32 next_states[] = {
1126             DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1127         };
1128         de_next_media(de, next_states, ARRAY_SIZE(next_states));
1129     } else {
1130         static const u32 next_states[] = {
1131             DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1132         };
1133         de_next_media(de, next_states, ARRAY_SIZE(next_states));
1134     }
1135 
1136 set_media:
1137     spin_lock_irqsave(&de->lock, flags);
1138     de_stop_rxtx(de);
1139     spin_unlock_irqrestore(&de->lock, flags);
1140     de_set_media(de);
1141     de_start_rxtx(de);
1142 
1143 no_link_yet:
1144     de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1145     add_timer(&de->media_timer);
1146 
1147     netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1148            media_name[de->media_type], status);
1149 }
1150 
1151 static void de_media_interrupt (struct de_private *de, u32 status)
1152 {
1153     if (status & LinkPass) {
1154         /* Ignore if current media is AUI or BNC and we can't use TP */
1155         if ((de->media_type == DE_MEDIA_AUI ||
1156              de->media_type == DE_MEDIA_BNC) &&
1157             (de->media_lock ||
1158              !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1159             return;
1160         /* If current media is not TP, change it to TP */
1161         if ((de->media_type == DE_MEDIA_AUI ||
1162              de->media_type == DE_MEDIA_BNC)) {
1163             de->media_type = DE_MEDIA_TP_AUTO;
1164             de_stop_rxtx(de);
1165             de_set_media(de);
1166             de_start_rxtx(de);
1167         }
1168         de_link_up(de);
1169         mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1170         return;
1171     }
1172 
1173     BUG_ON(!(status & LinkFail));
1174     /* Mark the link as down only if current media is TP */
1175     if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1176         de->media_type != DE_MEDIA_BNC) {
1177         de_link_down(de);
1178         mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1179     }
1180 }
1181 
1182 static int de_reset_mac (struct de_private *de)
1183 {
1184     u32 status, tmp;
1185 
1186     /*
1187      * Reset MAC.  de4x5.c and tulip.c examined for "advice"
1188      * in this area.
1189      */
1190 
1191     if (dr32(BusMode) == 0xffffffff)
1192         return -EBUSY;
1193 
1194     /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1195     dw32 (BusMode, CmdReset);
1196     mdelay (1);
1197 
1198     dw32 (BusMode, de_bus_mode);
1199     mdelay (1);
1200 
1201     for (tmp = 0; tmp < 5; tmp++) {
1202         dr32 (BusMode);
1203         mdelay (1);
1204     }
1205 
1206     mdelay (1);
1207 
1208     status = dr32(MacStatus);
1209     if (status & (RxState | TxState))
1210         return -EBUSY;
1211     if (status == 0xffffffff)
1212         return -ENODEV;
1213     return 0;
1214 }
1215 
1216 static void de_adapter_wake (struct de_private *de)
1217 {
1218     u32 pmctl;
1219 
1220     if (de->de21040)
1221         return;
1222 
1223     pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1224     if (pmctl & PM_Mask) {
1225         pmctl &= ~PM_Mask;
1226         pci_write_config_dword(de->pdev, PCIPM, pmctl);
1227 
1228         /* de4x5.c delays, so we do too */
1229         msleep(10);
1230     }
1231 }
1232 
1233 static void de_adapter_sleep (struct de_private *de)
1234 {
1235     u32 pmctl;
1236 
1237     if (de->de21040)
1238         return;
1239 
1240     dw32(CSR13, 0); /* Reset phy */
1241     pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1242     pmctl |= PM_Sleep;
1243     pci_write_config_dword(de->pdev, PCIPM, pmctl);
1244 }
1245 
1246 static int de_init_hw (struct de_private *de)
1247 {
1248     struct net_device *dev = de->dev;
1249     u32 macmode;
1250     int rc;
1251 
1252     de_adapter_wake(de);
1253 
1254     macmode = dr32(MacMode) & ~MacModeClear;
1255 
1256     rc = de_reset_mac(de);
1257     if (rc)
1258         return rc;
1259 
1260     de_set_media(de); /* reset phy */
1261 
1262     dw32(RxRingAddr, de->ring_dma);
1263     dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1264 
1265     dw32(MacMode, RxTx | macmode);
1266 
1267     dr32(RxMissed); /* self-clearing */
1268 
1269     dw32(IntrMask, de_intr_mask);
1270 
1271     de_set_rx_mode(dev);
1272 
1273     return 0;
1274 }
1275 
1276 static int de_refill_rx (struct de_private *de)
1277 {
1278     unsigned i;
1279 
1280     for (i = 0; i < DE_RX_RING_SIZE; i++) {
1281         struct sk_buff *skb;
1282 
1283         skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
1284         if (!skb)
1285             goto err_out;
1286 
1287         de->rx_skb[i].mapping = dma_map_single(&de->pdev->dev,
1288                                skb->data,
1289                                de->rx_buf_sz,
1290                                DMA_FROM_DEVICE);
1291         de->rx_skb[i].skb = skb;
1292 
1293         de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1294         if (i == (DE_RX_RING_SIZE - 1))
1295             de->rx_ring[i].opts2 =
1296                 cpu_to_le32(RingEnd | de->rx_buf_sz);
1297         else
1298             de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1299         de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1300         de->rx_ring[i].addr2 = 0;
1301     }
1302 
1303     return 0;
1304 
1305 err_out:
1306     de_clean_rings(de);
1307     return -ENOMEM;
1308 }
1309 
1310 static int de_init_rings (struct de_private *de)
1311 {
1312     memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1313     de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1314 
1315     de->rx_tail = 0;
1316     de->tx_head = de->tx_tail = 0;
1317 
1318     return de_refill_rx (de);
1319 }
1320 
1321 static int de_alloc_rings (struct de_private *de)
1322 {
1323     de->rx_ring = dma_alloc_coherent(&de->pdev->dev, DE_RING_BYTES,
1324                      &de->ring_dma, GFP_KERNEL);
1325     if (!de->rx_ring)
1326         return -ENOMEM;
1327     de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1328     return de_init_rings(de);
1329 }
1330 
1331 static void de_clean_rings (struct de_private *de)
1332 {
1333     unsigned i;
1334 
1335     memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1336     de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1337     wmb();
1338     memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1339     de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1340     wmb();
1341 
1342     for (i = 0; i < DE_RX_RING_SIZE; i++) {
1343         if (de->rx_skb[i].skb) {
1344             dma_unmap_single(&de->pdev->dev,
1345                      de->rx_skb[i].mapping, de->rx_buf_sz,
1346                      DMA_FROM_DEVICE);
1347             dev_kfree_skb(de->rx_skb[i].skb);
1348         }
1349     }
1350 
1351     for (i = 0; i < DE_TX_RING_SIZE; i++) {
1352         struct sk_buff *skb = de->tx_skb[i].skb;
1353         if ((skb) && (skb != DE_DUMMY_SKB)) {
1354             if (skb != DE_SETUP_SKB) {
1355                 de->dev->stats.tx_dropped++;
1356                 dma_unmap_single(&de->pdev->dev,
1357                          de->tx_skb[i].mapping,
1358                          skb->len, DMA_TO_DEVICE);
1359                 dev_kfree_skb(skb);
1360             } else {
1361                 dma_unmap_single(&de->pdev->dev,
1362                          de->tx_skb[i].mapping,
1363                          sizeof(de->setup_frame),
1364                          DMA_TO_DEVICE);
1365             }
1366         }
1367     }
1368 
1369     memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1370     memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1371 }
1372 
1373 static void de_free_rings (struct de_private *de)
1374 {
1375     de_clean_rings(de);
1376     dma_free_coherent(&de->pdev->dev, DE_RING_BYTES, de->rx_ring,
1377               de->ring_dma);
1378     de->rx_ring = NULL;
1379     de->tx_ring = NULL;
1380 }
1381 
1382 static int de_open (struct net_device *dev)
1383 {
1384     struct de_private *de = netdev_priv(dev);
1385     const int irq = de->pdev->irq;
1386     int rc;
1387 
1388     netif_dbg(de, ifup, dev, "enabling interface\n");
1389 
1390     de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1391 
1392     rc = de_alloc_rings(de);
1393     if (rc) {
1394         netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1395         return rc;
1396     }
1397 
1398     dw32(IntrMask, 0);
1399 
1400     rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1401     if (rc) {
1402         netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1403         goto err_out_free;
1404     }
1405 
1406     rc = de_init_hw(de);
1407     if (rc) {
1408         netdev_err(dev, "h/w init failure, err=%d\n", rc);
1409         goto err_out_free_irq;
1410     }
1411 
1412     netif_start_queue(dev);
1413     mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1414 
1415     return 0;
1416 
1417 err_out_free_irq:
1418     free_irq(irq, dev);
1419 err_out_free:
1420     de_free_rings(de);
1421     return rc;
1422 }
1423 
1424 static int de_close (struct net_device *dev)
1425 {
1426     struct de_private *de = netdev_priv(dev);
1427     unsigned long flags;
1428 
1429     netif_dbg(de, ifdown, dev, "disabling interface\n");
1430 
1431     del_timer_sync(&de->media_timer);
1432 
1433     spin_lock_irqsave(&de->lock, flags);
1434     de_stop_hw(de);
1435     netif_stop_queue(dev);
1436     netif_carrier_off(dev);
1437     spin_unlock_irqrestore(&de->lock, flags);
1438 
1439     free_irq(de->pdev->irq, dev);
1440 
1441     de_free_rings(de);
1442     de_adapter_sleep(de);
1443     return 0;
1444 }
1445 
1446 static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
1447 {
1448     struct de_private *de = netdev_priv(dev);
1449     const int irq = de->pdev->irq;
1450 
1451     netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1452            dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1453            de->rx_tail, de->tx_head, de->tx_tail);
1454 
1455     del_timer_sync(&de->media_timer);
1456 
1457     disable_irq(irq);
1458     spin_lock_irq(&de->lock);
1459 
1460     de_stop_hw(de);
1461     netif_stop_queue(dev);
1462     netif_carrier_off(dev);
1463 
1464     spin_unlock_irq(&de->lock);
1465     enable_irq(irq);
1466 
1467     /* Update the error counts. */
1468     __de_get_stats(de);
1469 
1470     synchronize_irq(irq);
1471     de_clean_rings(de);
1472 
1473     de_init_rings(de);
1474 
1475     de_init_hw(de);
1476 
1477     netif_wake_queue(dev);
1478 }
1479 
1480 static void __de_get_regs(struct de_private *de, u8 *buf)
1481 {
1482     int i;
1483     u32 *rbuf = (u32 *)buf;
1484 
1485     /* read all CSRs */
1486     for (i = 0; i < DE_NUM_REGS; i++)
1487         rbuf[i] = dr32(i * 8);
1488 
1489     /* handle self-clearing RxMissed counter, CSR8 */
1490     de_rx_missed(de, rbuf[8]);
1491 }
1492 
1493 static void __de_get_link_ksettings(struct de_private *de,
1494                     struct ethtool_link_ksettings *cmd)
1495 {
1496     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1497                         de->media_supported);
1498     cmd->base.phy_address = 0;
1499     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1500                         de->media_advertise);
1501 
1502     switch (de->media_type) {
1503     case DE_MEDIA_AUI:
1504         cmd->base.port = PORT_AUI;
1505         break;
1506     case DE_MEDIA_BNC:
1507         cmd->base.port = PORT_BNC;
1508         break;
1509     default:
1510         cmd->base.port = PORT_TP;
1511         break;
1512     }
1513 
1514     cmd->base.speed = 10;
1515 
1516     if (dr32(MacMode) & FullDuplex)
1517         cmd->base.duplex = DUPLEX_FULL;
1518     else
1519         cmd->base.duplex = DUPLEX_HALF;
1520 
1521     if (de->media_lock)
1522         cmd->base.autoneg = AUTONEG_DISABLE;
1523     else
1524         cmd->base.autoneg = AUTONEG_ENABLE;
1525 
1526     /* ignore maxtxpkt, maxrxpkt for now */
1527 }
1528 
1529 static int __de_set_link_ksettings(struct de_private *de,
1530                    const struct ethtool_link_ksettings *cmd)
1531 {
1532     u32 new_media;
1533     unsigned int media_lock;
1534     u8 duplex = cmd->base.duplex;
1535     u8 port = cmd->base.port;
1536     u8 autoneg = cmd->base.autoneg;
1537     u32 advertising;
1538 
1539     ethtool_convert_link_mode_to_legacy_u32(&advertising,
1540                         cmd->link_modes.advertising);
1541 
1542     if (cmd->base.speed != 10)
1543         return -EINVAL;
1544     if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL)
1545         return -EINVAL;
1546     if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC)
1547         return -EINVAL;
1548     if (de->de21040 && port == PORT_BNC)
1549         return -EINVAL;
1550     if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE)
1551         return -EINVAL;
1552     if (advertising & ~de->media_supported)
1553         return -EINVAL;
1554     if (autoneg == AUTONEG_ENABLE &&
1555         (!(advertising & ADVERTISED_Autoneg)))
1556         return -EINVAL;
1557 
1558     switch (port) {
1559     case PORT_AUI:
1560         new_media = DE_MEDIA_AUI;
1561         if (!(advertising & ADVERTISED_AUI))
1562             return -EINVAL;
1563         break;
1564     case PORT_BNC:
1565         new_media = DE_MEDIA_BNC;
1566         if (!(advertising & ADVERTISED_BNC))
1567             return -EINVAL;
1568         break;
1569     default:
1570         if (autoneg == AUTONEG_ENABLE)
1571             new_media = DE_MEDIA_TP_AUTO;
1572         else if (duplex == DUPLEX_FULL)
1573             new_media = DE_MEDIA_TP_FD;
1574         else
1575             new_media = DE_MEDIA_TP;
1576         if (!(advertising & ADVERTISED_TP))
1577             return -EINVAL;
1578         if (!(advertising & (ADVERTISED_10baseT_Full |
1579                      ADVERTISED_10baseT_Half)))
1580             return -EINVAL;
1581         break;
1582     }
1583 
1584     media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1;
1585 
1586     if ((new_media == de->media_type) &&
1587         (media_lock == de->media_lock) &&
1588         (advertising == de->media_advertise))
1589         return 0; /* nothing to change */
1590 
1591     de_link_down(de);
1592     mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1593     de_stop_rxtx(de);
1594 
1595     de->media_type = new_media;
1596     de->media_lock = media_lock;
1597     de->media_advertise = advertising;
1598     de_set_media(de);
1599     if (netif_running(de->dev))
1600         de_start_rxtx(de);
1601 
1602     return 0;
1603 }
1604 
1605 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1606 {
1607     struct de_private *de = netdev_priv(dev);
1608 
1609     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1610     strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
1611 }
1612 
1613 static int de_get_regs_len(struct net_device *dev)
1614 {
1615     return DE_REGS_SIZE;
1616 }
1617 
1618 static int de_get_link_ksettings(struct net_device *dev,
1619                  struct ethtool_link_ksettings *cmd)
1620 {
1621     struct de_private *de = netdev_priv(dev);
1622 
1623     spin_lock_irq(&de->lock);
1624     __de_get_link_ksettings(de, cmd);
1625     spin_unlock_irq(&de->lock);
1626 
1627     return 0;
1628 }
1629 
1630 static int de_set_link_ksettings(struct net_device *dev,
1631                  const struct ethtool_link_ksettings *cmd)
1632 {
1633     struct de_private *de = netdev_priv(dev);
1634     int rc;
1635 
1636     spin_lock_irq(&de->lock);
1637     rc = __de_set_link_ksettings(de, cmd);
1638     spin_unlock_irq(&de->lock);
1639 
1640     return rc;
1641 }
1642 
1643 static u32 de_get_msglevel(struct net_device *dev)
1644 {
1645     struct de_private *de = netdev_priv(dev);
1646 
1647     return de->msg_enable;
1648 }
1649 
1650 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1651 {
1652     struct de_private *de = netdev_priv(dev);
1653 
1654     de->msg_enable = msglvl;
1655 }
1656 
1657 static int de_get_eeprom(struct net_device *dev,
1658              struct ethtool_eeprom *eeprom, u8 *data)
1659 {
1660     struct de_private *de = netdev_priv(dev);
1661 
1662     if (!de->ee_data)
1663         return -EOPNOTSUPP;
1664     if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1665         (eeprom->len != DE_EEPROM_SIZE))
1666         return -EINVAL;
1667     memcpy(data, de->ee_data, eeprom->len);
1668 
1669     return 0;
1670 }
1671 
1672 static int de_nway_reset(struct net_device *dev)
1673 {
1674     struct de_private *de = netdev_priv(dev);
1675     u32 status;
1676 
1677     if (de->media_type != DE_MEDIA_TP_AUTO)
1678         return -EINVAL;
1679     if (netif_carrier_ok(de->dev))
1680         de_link_down(de);
1681 
1682     status = dr32(SIAStatus);
1683     dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1684     netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1685            status, dr32(SIAStatus));
1686     return 0;
1687 }
1688 
1689 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1690             void *data)
1691 {
1692     struct de_private *de = netdev_priv(dev);
1693 
1694     regs->version = (DE_REGS_VER << 2) | de->de21040;
1695 
1696     spin_lock_irq(&de->lock);
1697     __de_get_regs(de, data);
1698     spin_unlock_irq(&de->lock);
1699 }
1700 
1701 static const struct ethtool_ops de_ethtool_ops = {
1702     .get_link       = ethtool_op_get_link,
1703     .get_drvinfo        = de_get_drvinfo,
1704     .get_regs_len       = de_get_regs_len,
1705     .get_msglevel       = de_get_msglevel,
1706     .set_msglevel       = de_set_msglevel,
1707     .get_eeprom     = de_get_eeprom,
1708     .nway_reset     = de_nway_reset,
1709     .get_regs       = de_get_regs,
1710     .get_link_ksettings = de_get_link_ksettings,
1711     .set_link_ksettings = de_set_link_ksettings,
1712 };
1713 
1714 static void de21040_get_mac_address(struct de_private *de)
1715 {
1716     u8 addr[ETH_ALEN];
1717     unsigned i;
1718 
1719     dw32 (ROMCmd, 0);   /* Reset the pointer with a dummy write. */
1720     udelay(5);
1721 
1722     for (i = 0; i < 6; i++) {
1723         int value, boguscnt = 100000;
1724         do {
1725             value = dr32(ROMCmd);
1726             rmb();
1727         } while (value < 0 && --boguscnt > 0);
1728         addr[i] = value;
1729         udelay(1);
1730         if (boguscnt <= 0)
1731             pr_warn("timeout reading 21040 MAC address byte %u\n",
1732                 i);
1733     }
1734     eth_hw_addr_set(de->dev, addr);
1735 }
1736 
1737 static void de21040_get_media_info(struct de_private *de)
1738 {
1739     unsigned int i;
1740 
1741     de->media_type = DE_MEDIA_TP;
1742     de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1743                    SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1744     de->media_advertise = de->media_supported;
1745 
1746     for (i = 0; i < DE_MAX_MEDIA; i++) {
1747         switch (i) {
1748         case DE_MEDIA_AUI:
1749         case DE_MEDIA_TP:
1750         case DE_MEDIA_TP_FD:
1751             de->media[i].type = i;
1752             de->media[i].csr13 = t21040_csr13[i];
1753             de->media[i].csr14 = t21040_csr14[i];
1754             de->media[i].csr15 = t21040_csr15[i];
1755             break;
1756         default:
1757             de->media[i].type = DE_MEDIA_INVALID;
1758             break;
1759         }
1760     }
1761 }
1762 
1763 /* Note: this routine returns extra data bits for size detection. */
1764 static unsigned tulip_read_eeprom(void __iomem *regs, int location,
1765                   int addr_len)
1766 {
1767     int i;
1768     unsigned retval = 0;
1769     void __iomem *ee_addr = regs + ROMCmd;
1770     int read_cmd = location | (EE_READ_CMD << addr_len);
1771 
1772     writel(EE_ENB & ~EE_CS, ee_addr);
1773     writel(EE_ENB, ee_addr);
1774 
1775     /* Shift the read command bits out. */
1776     for (i = 4 + addr_len; i >= 0; i--) {
1777         short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1778         writel(EE_ENB | dataval, ee_addr);
1779         readl(ee_addr);
1780         writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1781         readl(ee_addr);
1782         retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1783     }
1784     writel(EE_ENB, ee_addr);
1785     readl(ee_addr);
1786 
1787     for (i = 16; i > 0; i--) {
1788         writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1789         readl(ee_addr);
1790         retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1791         writel(EE_ENB, ee_addr);
1792         readl(ee_addr);
1793     }
1794 
1795     /* Terminate the EEPROM access. */
1796     writel(EE_ENB & ~EE_CS, ee_addr);
1797     return retval;
1798 }
1799 
1800 static void de21041_get_srom_info(struct de_private *de)
1801 {
1802     unsigned i, sa_offset = 0, ofs;
1803     u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1804     unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1805     struct de_srom_info_leaf *il;
1806     void *bufp;
1807 
1808     /* download entire eeprom */
1809     for (i = 0; i < DE_EEPROM_WORDS; i++)
1810         ((__le16 *)ee_data)[i] =
1811             cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1812 
1813     /* DEC now has a specification but early board makers
1814        just put the address in the first EEPROM locations. */
1815     /* This does  memcmp(eedata, eedata+16, 8) */
1816 
1817 #ifndef CONFIG_MIPS_COBALT
1818 
1819     for (i = 0; i < 8; i ++)
1820         if (ee_data[i] != ee_data[16+i])
1821             sa_offset = 20;
1822 
1823 #endif
1824 
1825     /* store MAC address */
1826     eth_hw_addr_set(de->dev, &ee_data[sa_offset]);
1827 
1828     /* get offset of controller 0 info leaf.  ignore 2nd byte. */
1829     ofs = ee_data[SROMC0InfoLeaf];
1830     if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1831         goto bad_srom;
1832 
1833     /* get pointer to info leaf */
1834     il = (struct de_srom_info_leaf *) &ee_data[ofs];
1835 
1836     /* paranoia checks */
1837     if (il->n_blocks == 0)
1838         goto bad_srom;
1839     if ((sizeof(ee_data) - ofs) <
1840         (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1841         goto bad_srom;
1842 
1843     /* get default media type */
1844     switch (get_unaligned(&il->default_media)) {
1845     case 0x0001:  de->media_type = DE_MEDIA_BNC; break;
1846     case 0x0002:  de->media_type = DE_MEDIA_AUI; break;
1847     case 0x0204:  de->media_type = DE_MEDIA_TP_FD; break;
1848     default: de->media_type = DE_MEDIA_TP_AUTO; break;
1849     }
1850 
1851     if (netif_msg_probe(de))
1852         pr_info("de%d: SROM leaf offset %u, default media %s\n",
1853                de->board_idx, ofs, media_name[de->media_type]);
1854 
1855     /* init SIA register values to defaults */
1856     for (i = 0; i < DE_MAX_MEDIA; i++) {
1857         de->media[i].type = DE_MEDIA_INVALID;
1858         de->media[i].csr13 = 0xffff;
1859         de->media[i].csr14 = 0xffff;
1860         de->media[i].csr15 = 0xffff;
1861     }
1862 
1863     /* parse media blocks to see what medias are supported,
1864      * and if any custom CSR values are provided
1865      */
1866     bufp = ((void *)il) + sizeof(*il);
1867     for (i = 0; i < il->n_blocks; i++) {
1868         struct de_srom_media_block *ib = bufp;
1869         unsigned idx;
1870 
1871         /* index based on media type in media block */
1872         switch(ib->opts & MediaBlockMask) {
1873         case 0: /* 10baseT */
1874             de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1875                       | SUPPORTED_Autoneg;
1876             idx = DE_MEDIA_TP;
1877             de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1878             break;
1879         case 1: /* BNC */
1880             de->media_supported |= SUPPORTED_BNC;
1881             idx = DE_MEDIA_BNC;
1882             break;
1883         case 2: /* AUI */
1884             de->media_supported |= SUPPORTED_AUI;
1885             idx = DE_MEDIA_AUI;
1886             break;
1887         case 4: /* 10baseT-FD */
1888             de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1889                       | SUPPORTED_Autoneg;
1890             idx = DE_MEDIA_TP_FD;
1891             de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1892             break;
1893         default:
1894             goto bad_srom;
1895         }
1896 
1897         de->media[idx].type = idx;
1898 
1899         if (netif_msg_probe(de))
1900             pr_info("de%d:   media block #%u: %s",
1901                 de->board_idx, i,
1902                 media_name[de->media[idx].type]);
1903 
1904         bufp += sizeof (ib->opts);
1905 
1906         if (ib->opts & MediaCustomCSRs) {
1907             de->media[idx].csr13 = get_unaligned(&ib->csr13);
1908             de->media[idx].csr14 = get_unaligned(&ib->csr14);
1909             de->media[idx].csr15 = get_unaligned(&ib->csr15);
1910             bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1911                 sizeof(ib->csr15);
1912 
1913             if (netif_msg_probe(de))
1914                 pr_cont(" (%x,%x,%x)\n",
1915                     de->media[idx].csr13,
1916                     de->media[idx].csr14,
1917                     de->media[idx].csr15);
1918 
1919         } else {
1920             if (netif_msg_probe(de))
1921                 pr_cont("\n");
1922         }
1923 
1924         if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1925             break;
1926     }
1927 
1928     de->media_advertise = de->media_supported;
1929 
1930 fill_defaults:
1931     /* fill in defaults, for cases where custom CSRs not used */
1932     for (i = 0; i < DE_MAX_MEDIA; i++) {
1933         if (de->media[i].csr13 == 0xffff)
1934             de->media[i].csr13 = t21041_csr13[i];
1935         if (de->media[i].csr14 == 0xffff) {
1936             /* autonegotiation is broken at least on some chip
1937                revisions - rev. 0x21 works, 0x11 does not */
1938             if (de->pdev->revision < 0x20)
1939                 de->media[i].csr14 = t21041_csr14_brk[i];
1940             else
1941                 de->media[i].csr14 = t21041_csr14[i];
1942         }
1943         if (de->media[i].csr15 == 0xffff)
1944             de->media[i].csr15 = t21041_csr15[i];
1945     }
1946 
1947     de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1948 
1949     return;
1950 
1951 bad_srom:
1952     /* for error cases, it's ok to assume we support all these */
1953     for (i = 0; i < DE_MAX_MEDIA; i++)
1954         de->media[i].type = i;
1955     de->media_supported =
1956         SUPPORTED_10baseT_Half |
1957         SUPPORTED_10baseT_Full |
1958         SUPPORTED_Autoneg |
1959         SUPPORTED_TP |
1960         SUPPORTED_AUI |
1961         SUPPORTED_BNC;
1962     goto fill_defaults;
1963 }
1964 
1965 static const struct net_device_ops de_netdev_ops = {
1966     .ndo_open       = de_open,
1967     .ndo_stop       = de_close,
1968     .ndo_set_rx_mode    = de_set_rx_mode,
1969     .ndo_start_xmit     = de_start_xmit,
1970     .ndo_get_stats      = de_get_stats,
1971     .ndo_tx_timeout     = de_tx_timeout,
1972     .ndo_set_mac_address    = eth_mac_addr,
1973     .ndo_validate_addr  = eth_validate_addr,
1974 };
1975 
1976 static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1977 {
1978     struct net_device *dev;
1979     struct de_private *de;
1980     int rc;
1981     void __iomem *regs;
1982     unsigned long pciaddr;
1983     static int board_idx = -1;
1984 
1985     board_idx++;
1986 
1987     /* allocate a new ethernet device structure, and fill in defaults */
1988     dev = alloc_etherdev(sizeof(struct de_private));
1989     if (!dev)
1990         return -ENOMEM;
1991 
1992     dev->netdev_ops = &de_netdev_ops;
1993     SET_NETDEV_DEV(dev, &pdev->dev);
1994     dev->ethtool_ops = &de_ethtool_ops;
1995     dev->watchdog_timeo = TX_TIMEOUT;
1996 
1997     de = netdev_priv(dev);
1998     de->de21040 = ent->driver_data == 0 ? 1 : 0;
1999     de->pdev = pdev;
2000     de->dev = dev;
2001     de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2002     de->board_idx = board_idx;
2003     spin_lock_init (&de->lock);
2004     timer_setup(&de->media_timer,
2005             de->de21040 ? de21040_media_timer : de21041_media_timer,
2006             0);
2007 
2008     netif_carrier_off(dev);
2009 
2010     /* wake up device, assign resources */
2011     rc = pci_enable_device(pdev);
2012     if (rc)
2013         goto err_out_free;
2014 
2015     /* reserve PCI resources to ensure driver atomicity */
2016     rc = pci_request_regions(pdev, DRV_NAME);
2017     if (rc)
2018         goto err_out_disable;
2019 
2020     /* check for invalid IRQ value */
2021     if (pdev->irq < 2) {
2022         rc = -EIO;
2023         pr_err("invalid irq (%d) for pci dev %s\n",
2024                pdev->irq, pci_name(pdev));
2025         goto err_out_res;
2026     }
2027 
2028     /* obtain and check validity of PCI I/O address */
2029     pciaddr = pci_resource_start(pdev, 1);
2030     if (!pciaddr) {
2031         rc = -EIO;
2032         pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2033         goto err_out_res;
2034     }
2035     if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2036         rc = -EIO;
2037         pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2038                (unsigned long long)pci_resource_len(pdev, 1),
2039                pci_name(pdev));
2040         goto err_out_res;
2041     }
2042 
2043     /* remap CSR registers */
2044     regs = ioremap(pciaddr, DE_REGS_SIZE);
2045     if (!regs) {
2046         rc = -EIO;
2047         pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2048                (unsigned long long)pci_resource_len(pdev, 1),
2049                pciaddr, pci_name(pdev));
2050         goto err_out_res;
2051     }
2052     de->regs = regs;
2053 
2054     de_adapter_wake(de);
2055 
2056     /* make sure hardware is not running */
2057     rc = de_reset_mac(de);
2058     if (rc) {
2059         pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2060         goto err_out_iomap;
2061     }
2062 
2063     /* get MAC address, initialize default media type and
2064      * get list of supported media
2065      */
2066     if (de->de21040) {
2067         de21040_get_mac_address(de);
2068         de21040_get_media_info(de);
2069     } else {
2070         de21041_get_srom_info(de);
2071     }
2072 
2073     /* register new network interface with kernel */
2074     rc = register_netdev(dev);
2075     if (rc)
2076         goto err_out_iomap;
2077 
2078     /* print info about board and interface just registered */
2079     netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2080             de->de21040 ? "21040" : "21041",
2081             regs, dev->dev_addr, pdev->irq);
2082 
2083     pci_set_drvdata(pdev, dev);
2084 
2085     /* enable busmastering */
2086     pci_set_master(pdev);
2087 
2088     /* put adapter to sleep */
2089     de_adapter_sleep(de);
2090 
2091     return 0;
2092 
2093 err_out_iomap:
2094     kfree(de->ee_data);
2095     iounmap(regs);
2096 err_out_res:
2097     pci_release_regions(pdev);
2098 err_out_disable:
2099     pci_disable_device(pdev);
2100 err_out_free:
2101     free_netdev(dev);
2102     return rc;
2103 }
2104 
2105 static void de_remove_one(struct pci_dev *pdev)
2106 {
2107     struct net_device *dev = pci_get_drvdata(pdev);
2108     struct de_private *de = netdev_priv(dev);
2109 
2110     BUG_ON(!dev);
2111     unregister_netdev(dev);
2112     kfree(de->ee_data);
2113     iounmap(de->regs);
2114     pci_release_regions(pdev);
2115     pci_disable_device(pdev);
2116     free_netdev(dev);
2117 }
2118 
2119 static int __maybe_unused de_suspend(struct device *dev_d)
2120 {
2121     struct pci_dev *pdev = to_pci_dev(dev_d);
2122     struct net_device *dev = pci_get_drvdata(pdev);
2123     struct de_private *de = netdev_priv(dev);
2124 
2125     rtnl_lock();
2126     if (netif_running (dev)) {
2127         const int irq = pdev->irq;
2128 
2129         del_timer_sync(&de->media_timer);
2130 
2131         disable_irq(irq);
2132         spin_lock_irq(&de->lock);
2133 
2134         de_stop_hw(de);
2135         netif_stop_queue(dev);
2136         netif_device_detach(dev);
2137         netif_carrier_off(dev);
2138 
2139         spin_unlock_irq(&de->lock);
2140         enable_irq(irq);
2141 
2142         /* Update the error counts. */
2143         __de_get_stats(de);
2144 
2145         synchronize_irq(irq);
2146         de_clean_rings(de);
2147 
2148         de_adapter_sleep(de);
2149     } else {
2150         netif_device_detach(dev);
2151     }
2152     rtnl_unlock();
2153     return 0;
2154 }
2155 
2156 static int __maybe_unused de_resume(struct device *dev_d)
2157 {
2158     struct pci_dev *pdev = to_pci_dev(dev_d);
2159     struct net_device *dev = pci_get_drvdata(pdev);
2160     struct de_private *de = netdev_priv(dev);
2161 
2162     rtnl_lock();
2163     if (netif_device_present(dev))
2164         goto out;
2165     if (!netif_running(dev))
2166         goto out_attach;
2167     pci_set_master(pdev);
2168     de_init_rings(de);
2169     de_init_hw(de);
2170 out_attach:
2171     netif_device_attach(dev);
2172 out:
2173     rtnl_unlock();
2174     return 0;
2175 }
2176 
2177 static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume);
2178 
2179 static void de_shutdown(struct pci_dev *pdev)
2180 {
2181     struct net_device *dev = pci_get_drvdata(pdev);
2182 
2183     rtnl_lock();
2184     dev_close(dev);
2185     rtnl_unlock();
2186 }
2187 
2188 static struct pci_driver de_driver = {
2189     .name       = DRV_NAME,
2190     .id_table   = de_pci_tbl,
2191     .probe      = de_init_one,
2192     .remove     = de_remove_one,
2193     .shutdown   = de_shutdown,
2194     .driver.pm  = &de_pm_ops,
2195 };
2196 
2197 module_pci_driver(de_driver);