Back to home page

OSCL-LXR

 
 

    


0001 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
0002 /*
0003     Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
0004 
0005     Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
0006     Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
0007     Copyright 2001 Manfred Spraul                   [natsemi.c]
0008     Copyright 1999-2001 by Donald Becker.               [natsemi.c]
0009     Written 1997-2001 by Donald Becker.             [8139too.c]
0010     Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
0011 
0012     This software may be used and distributed according to the terms of
0013     the GNU General Public License (GPL), incorporated herein by reference.
0014     Drivers based on or derived from this code fall under the GPL and must
0015     retain the authorship, copyright and license notice.  This file is not
0016     a complete program and may only be used when the entire operating
0017     system is licensed under the GPL.
0018 
0019     See the file COPYING in this distribution for more information.
0020 
0021     Contributors:
0022 
0023         Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
0024         PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
0025         LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
0026 
0027     TODO:
0028     * Test Tx checksumming thoroughly
0029 
0030     Low priority TODO:
0031     * Complete reset on PciErr
0032     * Consider Rx interrupt mitigation using TimerIntr
0033     * Investigate using skb->priority with h/w VLAN priority
0034     * Investigate using High Priority Tx Queue with skb->priority
0035     * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
0036     * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
0037     * Implement Tx software interrupt mitigation via
0038       Tx descriptor bit
0039     * The real minimum of CP_MIN_MTU is 4 bytes.  However,
0040       for this to be supported, one must(?) turn on packet padding.
0041     * Support external MII transceivers (patch available)
0042 
0043     NOTES:
0044     * TX checksumming is considered experimental.  It is off by
0045       default, use ethtool to turn it on.
0046 
0047  */
0048 
0049 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0050 
0051 #define DRV_NAME        "8139cp"
0052 #define DRV_VERSION     "1.3"
0053 #define DRV_RELDATE     "Mar 22, 2004"
0054 
0055 
0056 #include <linux/module.h>
0057 #include <linux/moduleparam.h>
0058 #include <linux/kernel.h>
0059 #include <linux/compiler.h>
0060 #include <linux/netdevice.h>
0061 #include <linux/etherdevice.h>
0062 #include <linux/init.h>
0063 #include <linux/interrupt.h>
0064 #include <linux/pci.h>
0065 #include <linux/dma-mapping.h>
0066 #include <linux/delay.h>
0067 #include <linux/ethtool.h>
0068 #include <linux/gfp.h>
0069 #include <linux/mii.h>
0070 #include <linux/if_vlan.h>
0071 #include <linux/crc32.h>
0072 #include <linux/in.h>
0073 #include <linux/ip.h>
0074 #include <linux/tcp.h>
0075 #include <linux/udp.h>
0076 #include <linux/cache.h>
0077 #include <asm/io.h>
0078 #include <asm/irq.h>
0079 #include <linux/uaccess.h>
0080 
0081 /* These identify the driver base version and may not be removed. */
0082 static char version[] =
0083 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
0084 
0085 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
0086 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
0087 MODULE_VERSION(DRV_VERSION);
0088 MODULE_LICENSE("GPL");
0089 
0090 static int debug = -1;
0091 module_param(debug, int, 0);
0092 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
0093 
0094 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
0095    The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
0096 static int multicast_filter_limit = 32;
0097 module_param(multicast_filter_limit, int, 0);
0098 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
0099 
0100 #define CP_DEF_MSG_ENABLE   (NETIF_MSG_DRV      | \
0101                  NETIF_MSG_PROBE    | \
0102                  NETIF_MSG_LINK)
0103 #define CP_NUM_STATS        14  /* struct cp_dma_stats, plus one */
0104 #define CP_STATS_SIZE       64  /* size in bytes of DMA stats block */
0105 #define CP_REGS_SIZE        (0xff + 1)
0106 #define CP_REGS_VER     1       /* version 1 */
0107 #define CP_RX_RING_SIZE     64
0108 #define CP_TX_RING_SIZE     64
0109 #define CP_RING_BYTES       \
0110         ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
0111          (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
0112          CP_STATS_SIZE)
0113 #define NEXT_TX(N)      (((N) + 1) & (CP_TX_RING_SIZE - 1))
0114 #define NEXT_RX(N)      (((N) + 1) & (CP_RX_RING_SIZE - 1))
0115 #define TX_BUFFS_AVAIL(CP)                  \
0116     (((CP)->tx_tail <= (CP)->tx_head) ?         \
0117       (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :   \
0118       (CP)->tx_tail - (CP)->tx_head - 1)
0119 
0120 #define PKT_BUF_SZ      1536    /* Size of each temporary Rx buffer.*/
0121 #define CP_INTERNAL_PHY     32
0122 
0123 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
0124 #define RX_FIFO_THRESH      5   /* Rx buffer level before first PCI xfer.  */
0125 #define RX_DMA_BURST        4   /* Maximum PCI burst, '4' is 256 */
0126 #define TX_DMA_BURST        6   /* Maximum PCI burst, '6' is 1024 */
0127 #define TX_EARLY_THRESH     256 /* Early Tx threshold, in bytes */
0128 
0129 /* Time in jiffies before concluding the transmitter is hung. */
0130 #define TX_TIMEOUT      (6*HZ)
0131 
0132 /* hardware minimum and maximum for a single frame's data payload */
0133 #define CP_MIN_MTU      60  /* TODO: allow lower, but pad */
0134 #define CP_MAX_MTU      4096
0135 
0136 enum {
0137     /* NIC register offsets */
0138     MAC0        = 0x00, /* Ethernet hardware address. */
0139     MAR0        = 0x08, /* Multicast filter. */
0140     StatsAddr   = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
0141     TxRingAddr  = 0x20, /* 64-bit start addr of Tx ring */
0142     HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
0143     Cmd     = 0x37, /* Command register */
0144     IntrMask    = 0x3C, /* Interrupt mask */
0145     IntrStatus  = 0x3E, /* Interrupt status */
0146     TxConfig    = 0x40, /* Tx configuration */
0147     ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
0148     RxConfig    = 0x44, /* Rx configuration */
0149     RxMissed    = 0x4C, /* 24 bits valid, write clears */
0150     Cfg9346     = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
0151     Config1     = 0x52, /* Config1 */
0152     Config3     = 0x59, /* Config3 */
0153     Config4     = 0x5A, /* Config4 */
0154     MultiIntr   = 0x5C, /* Multiple interrupt select */
0155     BasicModeCtrl   = 0x62, /* MII BMCR */
0156     BasicModeStatus = 0x64, /* MII BMSR */
0157     NWayAdvert  = 0x66, /* MII ADVERTISE */
0158     NWayLPAR    = 0x68, /* MII LPA */
0159     NWayExpansion   = 0x6A, /* MII Expansion */
0160     TxDmaOkLowDesc  = 0x82, /* Low 16 bit address of a Tx descriptor. */
0161     Config5     = 0xD8, /* Config5 */
0162     TxPoll      = 0xD9, /* Tell chip to check Tx descriptors for work */
0163     RxMaxSize   = 0xDA, /* Max size of an Rx packet (8169 only) */
0164     CpCmd       = 0xE0, /* C+ Command register (C+ mode only) */
0165     IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
0166     RxRingAddr  = 0xE4, /* 64-bit start addr of Rx ring */
0167     TxThresh    = 0xEC, /* Early Tx threshold */
0168     OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
0169     OldTSD0     = 0x10, /* DMA address of first Tx desc (C mode) */
0170 
0171     /* Tx and Rx status descriptors */
0172     DescOwn     = (1 << 31), /* Descriptor is owned by NIC */
0173     RingEnd     = (1 << 30), /* End of descriptor ring */
0174     FirstFrag   = (1 << 29), /* First segment of a packet */
0175     LastFrag    = (1 << 28), /* Final segment of a packet */
0176     LargeSend   = (1 << 27), /* TCP Large Send Offload (TSO) */
0177     MSSShift    = 16,        /* MSS value position */
0178     MSSMask     = 0x7ff,     /* MSS value: 11 bits */
0179     TxError     = (1 << 23), /* Tx error summary */
0180     RxError     = (1 << 20), /* Rx error summary */
0181     IPCS        = (1 << 18), /* Calculate IP checksum */
0182     UDPCS       = (1 << 17), /* Calculate UDP/IP checksum */
0183     TCPCS       = (1 << 16), /* Calculate TCP/IP checksum */
0184     TxVlanTag   = (1 << 17), /* Add VLAN tag */
0185     RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
0186     IPFail      = (1 << 15), /* IP checksum failed */
0187     UDPFail     = (1 << 14), /* UDP/IP checksum failed */
0188     TCPFail     = (1 << 13), /* TCP/IP checksum failed */
0189     NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
0190     PID1        = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
0191     PID0        = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
0192     RxProtoTCP  = 1,
0193     RxProtoUDP  = 2,
0194     RxProtoIP   = 3,
0195     TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
0196     TxOWC       = (1 << 22), /* Tx Out-of-window collision */
0197     TxLinkFail  = (1 << 21), /* Link failed during Tx of packet */
0198     TxMaxCol    = (1 << 20), /* Tx aborted due to excessive collisions */
0199     TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
0200     TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
0201     RxErrFrame  = (1 << 27), /* Rx frame alignment error */
0202     RxMcast     = (1 << 26), /* Rx multicast packet rcv'd */
0203     RxErrCRC    = (1 << 18), /* Rx CRC error */
0204     RxErrRunt   = (1 << 19), /* Rx error, packet < 64 bytes */
0205     RxErrLong   = (1 << 21), /* Rx error, packet > 4096 bytes */
0206     RxErrFIFO   = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
0207 
0208     /* StatsAddr register */
0209     DumpStats   = (1 << 3),  /* Begin stats dump */
0210 
0211     /* RxConfig register */
0212     RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
0213     RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
0214     AcceptErr   = 0x20,      /* Accept packets with CRC errors */
0215     AcceptRunt  = 0x10,      /* Accept runt (<64 bytes) packets */
0216     AcceptBroadcast = 0x08,      /* Accept broadcast packets */
0217     AcceptMulticast = 0x04,      /* Accept multicast packets */
0218     AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
0219     AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
0220 
0221     /* IntrMask / IntrStatus registers */
0222     PciErr      = (1 << 15), /* System error on the PCI bus */
0223     TimerIntr   = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
0224     LenChg      = (1 << 13), /* Cable length change */
0225     SWInt       = (1 << 8),  /* Software-requested interrupt */
0226     TxEmpty     = (1 << 7),  /* No Tx descriptors available */
0227     RxFIFOOvr   = (1 << 6),  /* Rx FIFO Overflow */
0228     LinkChg     = (1 << 5),  /* Packet underrun, or link change */
0229     RxEmpty     = (1 << 4),  /* No Rx descriptors available */
0230     TxErr       = (1 << 3),  /* Tx error */
0231     TxOK        = (1 << 2),  /* Tx packet sent */
0232     RxErr       = (1 << 1),  /* Rx error */
0233     RxOK        = (1 << 0),  /* Rx packet received */
0234     IntrResvd   = (1 << 10), /* reserved, according to RealTek engineers,
0235                     but hardware likes to raise it */
0236 
0237     IntrAll     = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
0238               RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
0239               RxErr | RxOK | IntrResvd,
0240 
0241     /* C mode command register */
0242     CmdReset    = (1 << 4),  /* Enable to reset; self-clearing */
0243     RxOn        = (1 << 3),  /* Rx mode enable */
0244     TxOn        = (1 << 2),  /* Tx mode enable */
0245 
0246     /* C+ mode command register */
0247     RxVlanOn    = (1 << 6),  /* Rx VLAN de-tagging enable */
0248     RxChkSum    = (1 << 5),  /* Rx checksum offload enable */
0249     PCIDAC      = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
0250     PCIMulRW    = (1 << 3),  /* Enable PCI read/write multiple */
0251     CpRxOn      = (1 << 1),  /* Rx mode enable */
0252     CpTxOn      = (1 << 0),  /* Tx mode enable */
0253 
0254     /* Cfg9436 EEPROM control register */
0255     Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
0256     Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
0257 
0258     /* TxConfig register */
0259     IFG     = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
0260     TxDMAShift  = 8,         /* DMA burst value (0-7) is shift this many bits */
0261 
0262     /* Early Tx Threshold register */
0263     TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
0264     TxThreshMax = 2048,      /* Max early Tx threshold */
0265 
0266     /* Config1 register */
0267     DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
0268     LWACT           = (1 << 4),  /* LWAKE active mode */
0269     PMEnable    = (1 << 0),  /* Enable various PM features of chip */
0270 
0271     /* Config3 register */
0272     PARMEnable  = (1 << 6),  /* Enable auto-loading of PHY parms */
0273     MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
0274     LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
0275 
0276     /* Config4 register */
0277     LWPTN           = (1 << 1),  /* LWAKE Pattern */
0278     LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
0279 
0280     /* Config5 register */
0281     BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
0282     MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
0283     UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
0284     LANWake         = (1 << 1),  /* Enable LANWake signal */
0285     PMEStatus   = (1 << 0),  /* PME status can be reset by PCI RST# */
0286 
0287     cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
0288     cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
0289     cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
0290 };
0291 
0292 static const unsigned int cp_rx_config =
0293       (RX_FIFO_THRESH << RxCfgFIFOShift) |
0294       (RX_DMA_BURST << RxCfgDMAShift);
0295 
0296 struct cp_desc {
0297     __le32      opts1;
0298     __le32      opts2;
0299     __le64      addr;
0300 };
0301 
0302 struct cp_dma_stats {
0303     __le64          tx_ok;
0304     __le64          rx_ok;
0305     __le64          tx_err;
0306     __le32          rx_err;
0307     __le16          rx_fifo;
0308     __le16          frame_align;
0309     __le32          tx_ok_1col;
0310     __le32          tx_ok_mcol;
0311     __le64          rx_ok_phys;
0312     __le64          rx_ok_bcast;
0313     __le32          rx_ok_mcast;
0314     __le16          tx_abort;
0315     __le16          tx_underrun;
0316 } __packed;
0317 
0318 struct cp_extra_stats {
0319     unsigned long       rx_frags;
0320 };
0321 
0322 struct cp_private {
0323     void            __iomem *regs;
0324     struct net_device   *dev;
0325     spinlock_t      lock;
0326     u32         msg_enable;
0327 
0328     struct napi_struct  napi;
0329 
0330     struct pci_dev      *pdev;
0331     u32         rx_config;
0332     u16         cpcmd;
0333 
0334     struct cp_extra_stats   cp_stats;
0335 
0336     unsigned        rx_head     ____cacheline_aligned;
0337     unsigned        rx_tail;
0338     struct cp_desc      *rx_ring;
0339     struct sk_buff      *rx_skb[CP_RX_RING_SIZE];
0340 
0341     unsigned        tx_head     ____cacheline_aligned;
0342     unsigned        tx_tail;
0343     struct cp_desc      *tx_ring;
0344     struct sk_buff      *tx_skb[CP_TX_RING_SIZE];
0345     u32         tx_opts[CP_TX_RING_SIZE];
0346 
0347     unsigned        rx_buf_sz;
0348     unsigned        wol_enabled : 1; /* Is Wake-on-LAN enabled? */
0349 
0350     dma_addr_t      ring_dma;
0351 
0352     struct mii_if_info  mii_if;
0353 };
0354 
0355 #define cpr8(reg)   readb(cp->regs + (reg))
0356 #define cpr16(reg)  readw(cp->regs + (reg))
0357 #define cpr32(reg)  readl(cp->regs + (reg))
0358 #define cpw8(reg,val)   writeb((val), cp->regs + (reg))
0359 #define cpw16(reg,val)  writew((val), cp->regs + (reg))
0360 #define cpw32(reg,val)  writel((val), cp->regs + (reg))
0361 #define cpw8_f(reg,val) do {            \
0362     writeb((val), cp->regs + (reg));    \
0363     readb(cp->regs + (reg));        \
0364     } while (0)
0365 #define cpw16_f(reg,val) do {           \
0366     writew((val), cp->regs + (reg));    \
0367     readw(cp->regs + (reg));        \
0368     } while (0)
0369 #define cpw32_f(reg,val) do {           \
0370     writel((val), cp->regs + (reg));    \
0371     readl(cp->regs + (reg));        \
0372     } while (0)
0373 
0374 
0375 static void __cp_set_rx_mode (struct net_device *dev);
0376 static void cp_tx (struct cp_private *cp);
0377 static void cp_clean_rings (struct cp_private *cp);
0378 #ifdef CONFIG_NET_POLL_CONTROLLER
0379 static void cp_poll_controller(struct net_device *dev);
0380 #endif
0381 static int cp_get_eeprom_len(struct net_device *dev);
0382 static int cp_get_eeprom(struct net_device *dev,
0383              struct ethtool_eeprom *eeprom, u8 *data);
0384 static int cp_set_eeprom(struct net_device *dev,
0385              struct ethtool_eeprom *eeprom, u8 *data);
0386 
0387 static struct {
0388     const char str[ETH_GSTRING_LEN];
0389 } ethtool_stats_keys[] = {
0390     { "tx_ok" },
0391     { "rx_ok" },
0392     { "tx_err" },
0393     { "rx_err" },
0394     { "rx_fifo" },
0395     { "frame_align" },
0396     { "tx_ok_1col" },
0397     { "tx_ok_mcol" },
0398     { "rx_ok_phys" },
0399     { "rx_ok_bcast" },
0400     { "rx_ok_mcast" },
0401     { "tx_abort" },
0402     { "tx_underrun" },
0403     { "rx_frags" },
0404 };
0405 
0406 
0407 static inline void cp_set_rxbufsize (struct cp_private *cp)
0408 {
0409     unsigned int mtu = cp->dev->mtu;
0410 
0411     if (mtu > ETH_DATA_LEN)
0412         /* MTU + ethernet header + FCS + optional VLAN tag */
0413         cp->rx_buf_sz = mtu + ETH_HLEN + 8;
0414     else
0415         cp->rx_buf_sz = PKT_BUF_SZ;
0416 }
0417 
0418 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
0419                   struct cp_desc *desc)
0420 {
0421     u32 opts2 = le32_to_cpu(desc->opts2);
0422 
0423     skb->protocol = eth_type_trans (skb, cp->dev);
0424 
0425     cp->dev->stats.rx_packets++;
0426     cp->dev->stats.rx_bytes += skb->len;
0427 
0428     if (opts2 & RxVlanTagged)
0429         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
0430 
0431     napi_gro_receive(&cp->napi, skb);
0432 }
0433 
0434 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
0435                 u32 status, u32 len)
0436 {
0437     netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
0438           rx_tail, status, len);
0439     cp->dev->stats.rx_errors++;
0440     if (status & RxErrFrame)
0441         cp->dev->stats.rx_frame_errors++;
0442     if (status & RxErrCRC)
0443         cp->dev->stats.rx_crc_errors++;
0444     if ((status & RxErrRunt) || (status & RxErrLong))
0445         cp->dev->stats.rx_length_errors++;
0446     if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
0447         cp->dev->stats.rx_length_errors++;
0448     if (status & RxErrFIFO)
0449         cp->dev->stats.rx_fifo_errors++;
0450 }
0451 
0452 static inline unsigned int cp_rx_csum_ok (u32 status)
0453 {
0454     unsigned int protocol = (status >> 16) & 0x3;
0455 
0456     if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
0457         ((protocol == RxProtoUDP) && !(status & UDPFail)))
0458         return 1;
0459     else
0460         return 0;
0461 }
0462 
0463 static int cp_rx_poll(struct napi_struct *napi, int budget)
0464 {
0465     struct cp_private *cp = container_of(napi, struct cp_private, napi);
0466     struct net_device *dev = cp->dev;
0467     unsigned int rx_tail = cp->rx_tail;
0468     int rx = 0;
0469 
0470     cpw16(IntrStatus, cp_rx_intr_mask);
0471 
0472     while (rx < budget) {
0473         u32 status, len;
0474         dma_addr_t mapping, new_mapping;
0475         struct sk_buff *skb, *new_skb;
0476         struct cp_desc *desc;
0477         const unsigned buflen = cp->rx_buf_sz;
0478 
0479         skb = cp->rx_skb[rx_tail];
0480         BUG_ON(!skb);
0481 
0482         desc = &cp->rx_ring[rx_tail];
0483         status = le32_to_cpu(desc->opts1);
0484         if (status & DescOwn)
0485             break;
0486 
0487         len = (status & 0x1fff) - 4;
0488         mapping = le64_to_cpu(desc->addr);
0489 
0490         if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
0491             /* we don't support incoming fragmented frames.
0492              * instead, we attempt to ensure that the
0493              * pre-allocated RX skbs are properly sized such
0494              * that RX fragments are never encountered
0495              */
0496             cp_rx_err_acct(cp, rx_tail, status, len);
0497             dev->stats.rx_dropped++;
0498             cp->cp_stats.rx_frags++;
0499             goto rx_next;
0500         }
0501 
0502         if (status & (RxError | RxErrFIFO)) {
0503             cp_rx_err_acct(cp, rx_tail, status, len);
0504             goto rx_next;
0505         }
0506 
0507         netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
0508               rx_tail, status, len);
0509 
0510         new_skb = napi_alloc_skb(napi, buflen);
0511         if (!new_skb) {
0512             dev->stats.rx_dropped++;
0513             goto rx_next;
0514         }
0515 
0516         new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
0517                      DMA_FROM_DEVICE);
0518         if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
0519             dev->stats.rx_dropped++;
0520             kfree_skb(new_skb);
0521             goto rx_next;
0522         }
0523 
0524         dma_unmap_single(&cp->pdev->dev, mapping,
0525                  buflen, DMA_FROM_DEVICE);
0526 
0527         /* Handle checksum offloading for incoming packets. */
0528         if (cp_rx_csum_ok(status))
0529             skb->ip_summed = CHECKSUM_UNNECESSARY;
0530         else
0531             skb_checksum_none_assert(skb);
0532 
0533         skb_put(skb, len);
0534 
0535         cp->rx_skb[rx_tail] = new_skb;
0536 
0537         cp_rx_skb(cp, skb, desc);
0538         rx++;
0539         mapping = new_mapping;
0540 
0541 rx_next:
0542         cp->rx_ring[rx_tail].opts2 = 0;
0543         cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
0544         if (rx_tail == (CP_RX_RING_SIZE - 1))
0545             desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
0546                           cp->rx_buf_sz);
0547         else
0548             desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
0549         rx_tail = NEXT_RX(rx_tail);
0550     }
0551 
0552     cp->rx_tail = rx_tail;
0553 
0554     /* if we did not reach work limit, then we're done with
0555      * this round of polling
0556      */
0557     if (rx < budget && napi_complete_done(napi, rx)) {
0558         unsigned long flags;
0559 
0560         spin_lock_irqsave(&cp->lock, flags);
0561         cpw16_f(IntrMask, cp_intr_mask);
0562         spin_unlock_irqrestore(&cp->lock, flags);
0563     }
0564 
0565     return rx;
0566 }
0567 
0568 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
0569 {
0570     struct net_device *dev = dev_instance;
0571     struct cp_private *cp;
0572     int handled = 0;
0573     u16 status;
0574     u16 mask;
0575 
0576     if (unlikely(dev == NULL))
0577         return IRQ_NONE;
0578     cp = netdev_priv(dev);
0579 
0580     spin_lock(&cp->lock);
0581 
0582     mask = cpr16(IntrMask);
0583     if (!mask)
0584         goto out_unlock;
0585 
0586     status = cpr16(IntrStatus);
0587     if (!status || (status == 0xFFFF))
0588         goto out_unlock;
0589 
0590     handled = 1;
0591 
0592     netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
0593           status, cpr8(Cmd), cpr16(CpCmd));
0594 
0595     cpw16(IntrStatus, status & ~cp_rx_intr_mask);
0596 
0597     /* close possible race's with dev_close */
0598     if (unlikely(!netif_running(dev))) {
0599         cpw16(IntrMask, 0);
0600         goto out_unlock;
0601     }
0602 
0603     if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
0604         if (napi_schedule_prep(&cp->napi)) {
0605             cpw16_f(IntrMask, cp_norx_intr_mask);
0606             __napi_schedule(&cp->napi);
0607         }
0608 
0609     if (status & (TxOK | TxErr | TxEmpty | SWInt))
0610         cp_tx(cp);
0611     if (status & LinkChg)
0612         mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
0613 
0614 
0615     if (status & PciErr) {
0616         u16 pci_status;
0617 
0618         pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
0619         pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
0620         netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
0621                status, pci_status);
0622 
0623         /* TODO: reset hardware */
0624     }
0625 
0626 out_unlock:
0627     spin_unlock(&cp->lock);
0628 
0629     return IRQ_RETVAL(handled);
0630 }
0631 
0632 #ifdef CONFIG_NET_POLL_CONTROLLER
0633 /*
0634  * Polling receive - used by netconsole and other diagnostic tools
0635  * to allow network i/o with interrupts disabled.
0636  */
0637 static void cp_poll_controller(struct net_device *dev)
0638 {
0639     struct cp_private *cp = netdev_priv(dev);
0640     const int irq = cp->pdev->irq;
0641 
0642     disable_irq(irq);
0643     cp_interrupt(irq, dev);
0644     enable_irq(irq);
0645 }
0646 #endif
0647 
0648 static void cp_tx (struct cp_private *cp)
0649 {
0650     unsigned tx_head = cp->tx_head;
0651     unsigned tx_tail = cp->tx_tail;
0652     unsigned bytes_compl = 0, pkts_compl = 0;
0653 
0654     while (tx_tail != tx_head) {
0655         struct cp_desc *txd = cp->tx_ring + tx_tail;
0656         struct sk_buff *skb;
0657         u32 status;
0658 
0659         rmb();
0660         status = le32_to_cpu(txd->opts1);
0661         if (status & DescOwn)
0662             break;
0663 
0664         skb = cp->tx_skb[tx_tail];
0665         BUG_ON(!skb);
0666 
0667         dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
0668                  cp->tx_opts[tx_tail] & 0xffff,
0669                  DMA_TO_DEVICE);
0670 
0671         if (status & LastFrag) {
0672             if (status & (TxError | TxFIFOUnder)) {
0673                 netif_dbg(cp, tx_err, cp->dev,
0674                       "tx err, status 0x%x\n", status);
0675                 cp->dev->stats.tx_errors++;
0676                 if (status & TxOWC)
0677                     cp->dev->stats.tx_window_errors++;
0678                 if (status & TxMaxCol)
0679                     cp->dev->stats.tx_aborted_errors++;
0680                 if (status & TxLinkFail)
0681                     cp->dev->stats.tx_carrier_errors++;
0682                 if (status & TxFIFOUnder)
0683                     cp->dev->stats.tx_fifo_errors++;
0684             } else {
0685                 cp->dev->stats.collisions +=
0686                     ((status >> TxColCntShift) & TxColCntMask);
0687                 cp->dev->stats.tx_packets++;
0688                 cp->dev->stats.tx_bytes += skb->len;
0689                 netif_dbg(cp, tx_done, cp->dev,
0690                       "tx done, slot %d\n", tx_tail);
0691             }
0692             bytes_compl += skb->len;
0693             pkts_compl++;
0694             dev_consume_skb_irq(skb);
0695         }
0696 
0697         cp->tx_skb[tx_tail] = NULL;
0698 
0699         tx_tail = NEXT_TX(tx_tail);
0700     }
0701 
0702     cp->tx_tail = tx_tail;
0703 
0704     netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
0705     if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
0706         netif_wake_queue(cp->dev);
0707 }
0708 
0709 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
0710 {
0711     return skb_vlan_tag_present(skb) ?
0712         TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
0713 }
0714 
0715 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
0716                    int first, int entry_last)
0717 {
0718     int frag, index;
0719     struct cp_desc *txd;
0720     skb_frag_t *this_frag;
0721     for (frag = 0; frag+first < entry_last; frag++) {
0722         index = first+frag;
0723         cp->tx_skb[index] = NULL;
0724         txd = &cp->tx_ring[index];
0725         this_frag = &skb_shinfo(skb)->frags[frag];
0726         dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
0727                  skb_frag_size(this_frag), DMA_TO_DEVICE);
0728     }
0729 }
0730 
0731 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
0732                     struct net_device *dev)
0733 {
0734     struct cp_private *cp = netdev_priv(dev);
0735     unsigned entry;
0736     u32 eor, opts1;
0737     unsigned long intr_flags;
0738     __le32 opts2;
0739     int mss = 0;
0740 
0741     spin_lock_irqsave(&cp->lock, intr_flags);
0742 
0743     /* This is a hard error, log it. */
0744     if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
0745         netif_stop_queue(dev);
0746         spin_unlock_irqrestore(&cp->lock, intr_flags);
0747         netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
0748         return NETDEV_TX_BUSY;
0749     }
0750 
0751     entry = cp->tx_head;
0752     eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
0753     mss = skb_shinfo(skb)->gso_size;
0754 
0755     if (mss > MSSMask) {
0756         netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
0757                  mss);
0758         goto out_dma_error;
0759     }
0760 
0761     opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
0762     opts1 = DescOwn;
0763     if (mss)
0764         opts1 |= LargeSend | (mss << MSSShift);
0765     else if (skb->ip_summed == CHECKSUM_PARTIAL) {
0766         const struct iphdr *ip = ip_hdr(skb);
0767         if (ip->protocol == IPPROTO_TCP)
0768             opts1 |= IPCS | TCPCS;
0769         else if (ip->protocol == IPPROTO_UDP)
0770             opts1 |= IPCS | UDPCS;
0771         else {
0772             WARN_ONCE(1,
0773                   "Net bug: asked to checksum invalid Legacy IP packet\n");
0774             goto out_dma_error;
0775         }
0776     }
0777 
0778     if (skb_shinfo(skb)->nr_frags == 0) {
0779         struct cp_desc *txd = &cp->tx_ring[entry];
0780         u32 len;
0781         dma_addr_t mapping;
0782 
0783         len = skb->len;
0784         mapping = dma_map_single(&cp->pdev->dev, skb->data, len, DMA_TO_DEVICE);
0785         if (dma_mapping_error(&cp->pdev->dev, mapping))
0786             goto out_dma_error;
0787 
0788         txd->opts2 = opts2;
0789         txd->addr = cpu_to_le64(mapping);
0790         wmb();
0791 
0792         opts1 |= eor | len | FirstFrag | LastFrag;
0793 
0794         txd->opts1 = cpu_to_le32(opts1);
0795         wmb();
0796 
0797         cp->tx_skb[entry] = skb;
0798         cp->tx_opts[entry] = opts1;
0799         netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
0800               entry, skb->len);
0801     } else {
0802         struct cp_desc *txd;
0803         u32 first_len, first_eor, ctrl;
0804         dma_addr_t first_mapping;
0805         int frag, first_entry = entry;
0806 
0807         /* We must give this initial chunk to the device last.
0808          * Otherwise we could race with the device.
0809          */
0810         first_eor = eor;
0811         first_len = skb_headlen(skb);
0812         first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
0813                            first_len, DMA_TO_DEVICE);
0814         if (dma_mapping_error(&cp->pdev->dev, first_mapping))
0815             goto out_dma_error;
0816 
0817         cp->tx_skb[entry] = skb;
0818 
0819         for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
0820             const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
0821             u32 len;
0822             dma_addr_t mapping;
0823 
0824             entry = NEXT_TX(entry);
0825 
0826             len = skb_frag_size(this_frag);
0827             mapping = dma_map_single(&cp->pdev->dev,
0828                          skb_frag_address(this_frag),
0829                          len, DMA_TO_DEVICE);
0830             if (dma_mapping_error(&cp->pdev->dev, mapping)) {
0831                 unwind_tx_frag_mapping(cp, skb, first_entry, entry);
0832                 goto out_dma_error;
0833             }
0834 
0835             eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
0836 
0837             ctrl = opts1 | eor | len;
0838 
0839             if (frag == skb_shinfo(skb)->nr_frags - 1)
0840                 ctrl |= LastFrag;
0841 
0842             txd = &cp->tx_ring[entry];
0843             txd->opts2 = opts2;
0844             txd->addr = cpu_to_le64(mapping);
0845             wmb();
0846 
0847             txd->opts1 = cpu_to_le32(ctrl);
0848             wmb();
0849 
0850             cp->tx_opts[entry] = ctrl;
0851             cp->tx_skb[entry] = skb;
0852         }
0853 
0854         txd = &cp->tx_ring[first_entry];
0855         txd->opts2 = opts2;
0856         txd->addr = cpu_to_le64(first_mapping);
0857         wmb();
0858 
0859         ctrl = opts1 | first_eor | first_len | FirstFrag;
0860         txd->opts1 = cpu_to_le32(ctrl);
0861         wmb();
0862 
0863         cp->tx_opts[first_entry] = ctrl;
0864         netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
0865               first_entry, entry, skb->len);
0866     }
0867     cp->tx_head = NEXT_TX(entry);
0868 
0869     netdev_sent_queue(dev, skb->len);
0870     if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
0871         netif_stop_queue(dev);
0872 
0873 out_unlock:
0874     spin_unlock_irqrestore(&cp->lock, intr_flags);
0875 
0876     cpw8(TxPoll, NormalTxPoll);
0877 
0878     return NETDEV_TX_OK;
0879 out_dma_error:
0880     dev_kfree_skb_any(skb);
0881     cp->dev->stats.tx_dropped++;
0882     goto out_unlock;
0883 }
0884 
0885 /* Set or clear the multicast filter for this adaptor.
0886    This routine is not state sensitive and need not be SMP locked. */
0887 
0888 static void __cp_set_rx_mode (struct net_device *dev)
0889 {
0890     struct cp_private *cp = netdev_priv(dev);
0891     u32 mc_filter[2];   /* Multicast hash filter */
0892     int rx_mode;
0893 
0894     /* Note: do not reorder, GCC is clever about common statements. */
0895     if (dev->flags & IFF_PROMISC) {
0896         /* Unconditionally log net taps. */
0897         rx_mode =
0898             AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
0899             AcceptAllPhys;
0900         mc_filter[1] = mc_filter[0] = 0xffffffff;
0901     } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
0902            (dev->flags & IFF_ALLMULTI)) {
0903         /* Too many to filter perfectly -- accept all multicasts. */
0904         rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
0905         mc_filter[1] = mc_filter[0] = 0xffffffff;
0906     } else {
0907         struct netdev_hw_addr *ha;
0908         rx_mode = AcceptBroadcast | AcceptMyPhys;
0909         mc_filter[1] = mc_filter[0] = 0;
0910         netdev_for_each_mc_addr(ha, dev) {
0911             int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
0912 
0913             mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
0914             rx_mode |= AcceptMulticast;
0915         }
0916     }
0917 
0918     /* We can safely update without stopping the chip. */
0919     cp->rx_config = cp_rx_config | rx_mode;
0920     cpw32_f(RxConfig, cp->rx_config);
0921 
0922     cpw32_f (MAR0 + 0, mc_filter[0]);
0923     cpw32_f (MAR0 + 4, mc_filter[1]);
0924 }
0925 
0926 static void cp_set_rx_mode (struct net_device *dev)
0927 {
0928     unsigned long flags;
0929     struct cp_private *cp = netdev_priv(dev);
0930 
0931     spin_lock_irqsave (&cp->lock, flags);
0932     __cp_set_rx_mode(dev);
0933     spin_unlock_irqrestore (&cp->lock, flags);
0934 }
0935 
0936 static void __cp_get_stats(struct cp_private *cp)
0937 {
0938     /* only lower 24 bits valid; write any value to clear */
0939     cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
0940     cpw32 (RxMissed, 0);
0941 }
0942 
0943 static struct net_device_stats *cp_get_stats(struct net_device *dev)
0944 {
0945     struct cp_private *cp = netdev_priv(dev);
0946     unsigned long flags;
0947 
0948     /* The chip only need report frame silently dropped. */
0949     spin_lock_irqsave(&cp->lock, flags);
0950     if (netif_running(dev) && netif_device_present(dev))
0951         __cp_get_stats(cp);
0952     spin_unlock_irqrestore(&cp->lock, flags);
0953 
0954     return &dev->stats;
0955 }
0956 
0957 static void cp_stop_hw (struct cp_private *cp)
0958 {
0959     cpw16(IntrStatus, ~(cpr16(IntrStatus)));
0960     cpw16_f(IntrMask, 0);
0961     cpw8(Cmd, 0);
0962     cpw16_f(CpCmd, 0);
0963     cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
0964 
0965     cp->rx_tail = 0;
0966     cp->tx_head = cp->tx_tail = 0;
0967 
0968     netdev_reset_queue(cp->dev);
0969 }
0970 
0971 static void cp_reset_hw (struct cp_private *cp)
0972 {
0973     unsigned work = 1000;
0974 
0975     cpw8(Cmd, CmdReset);
0976 
0977     while (work--) {
0978         if (!(cpr8(Cmd) & CmdReset))
0979             return;
0980 
0981         schedule_timeout_uninterruptible(10);
0982     }
0983 
0984     netdev_err(cp->dev, "hardware reset timeout\n");
0985 }
0986 
0987 static inline void cp_start_hw (struct cp_private *cp)
0988 {
0989     dma_addr_t ring_dma;
0990 
0991     cpw16(CpCmd, cp->cpcmd);
0992 
0993     /*
0994      * These (at least TxRingAddr) need to be configured after the
0995      * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
0996      * (C+ Command Register) recommends that these and more be configured
0997      * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
0998      * it's been observed that the TxRingAddr is actually reset to garbage
0999      * when C+ mode Tx is enabled in CpCmd.
1000      */
1001     cpw32_f(HiTxRingAddr, 0);
1002     cpw32_f(HiTxRingAddr + 4, 0);
1003 
1004     ring_dma = cp->ring_dma;
1005     cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1006     cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1007 
1008     ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1009     cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1010     cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1011 
1012     /*
1013      * Strictly speaking, the datasheet says this should be enabled
1014      * *before* setting the descriptor addresses. But what, then, would
1015      * prevent it from doing DMA to random unconfigured addresses?
1016      * This variant appears to work fine.
1017      */
1018     cpw8(Cmd, RxOn | TxOn);
1019 
1020     netdev_reset_queue(cp->dev);
1021 }
1022 
1023 static void cp_enable_irq(struct cp_private *cp)
1024 {
1025     cpw16_f(IntrMask, cp_intr_mask);
1026 }
1027 
1028 static void cp_init_hw (struct cp_private *cp)
1029 {
1030     struct net_device *dev = cp->dev;
1031 
1032     cp_reset_hw(cp);
1033 
1034     cpw8_f (Cfg9346, Cfg9346_Unlock);
1035 
1036     /* Restore our idea of the MAC address. */
1037     cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1038     cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1039 
1040     cp_start_hw(cp);
1041     cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1042 
1043     __cp_set_rx_mode(dev);
1044     cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1045 
1046     cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1047     /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1048     cpw8(Config3, PARMEnable);
1049     cp->wol_enabled = 0;
1050 
1051     cpw8(Config5, cpr8(Config5) & PMEStatus);
1052 
1053     cpw16(MultiIntr, 0);
1054 
1055     cpw8_f(Cfg9346, Cfg9346_Lock);
1056 }
1057 
1058 static int cp_refill_rx(struct cp_private *cp)
1059 {
1060     struct net_device *dev = cp->dev;
1061     unsigned i;
1062 
1063     for (i = 0; i < CP_RX_RING_SIZE; i++) {
1064         struct sk_buff *skb;
1065         dma_addr_t mapping;
1066 
1067         skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1068         if (!skb)
1069             goto err_out;
1070 
1071         mapping = dma_map_single(&cp->pdev->dev, skb->data,
1072                      cp->rx_buf_sz, DMA_FROM_DEVICE);
1073         if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1074             kfree_skb(skb);
1075             goto err_out;
1076         }
1077         cp->rx_skb[i] = skb;
1078 
1079         cp->rx_ring[i].opts2 = 0;
1080         cp->rx_ring[i].addr = cpu_to_le64(mapping);
1081         if (i == (CP_RX_RING_SIZE - 1))
1082             cp->rx_ring[i].opts1 =
1083                 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1084         else
1085             cp->rx_ring[i].opts1 =
1086                 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1087     }
1088 
1089     return 0;
1090 
1091 err_out:
1092     cp_clean_rings(cp);
1093     return -ENOMEM;
1094 }
1095 
1096 static void cp_init_rings_index (struct cp_private *cp)
1097 {
1098     cp->rx_tail = 0;
1099     cp->tx_head = cp->tx_tail = 0;
1100 }
1101 
1102 static int cp_init_rings (struct cp_private *cp)
1103 {
1104     memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1105     cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1106     memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1107 
1108     cp_init_rings_index(cp);
1109 
1110     return cp_refill_rx (cp);
1111 }
1112 
1113 static int cp_alloc_rings (struct cp_private *cp)
1114 {
1115     struct device *d = &cp->pdev->dev;
1116     void *mem;
1117     int rc;
1118 
1119     mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1120     if (!mem)
1121         return -ENOMEM;
1122 
1123     cp->rx_ring = mem;
1124     cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1125 
1126     rc = cp_init_rings(cp);
1127     if (rc < 0)
1128         dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1129 
1130     return rc;
1131 }
1132 
1133 static void cp_clean_rings (struct cp_private *cp)
1134 {
1135     struct cp_desc *desc;
1136     unsigned i;
1137 
1138     for (i = 0; i < CP_RX_RING_SIZE; i++) {
1139         if (cp->rx_skb[i]) {
1140             desc = cp->rx_ring + i;
1141             dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1142                      cp->rx_buf_sz, DMA_FROM_DEVICE);
1143             dev_kfree_skb_any(cp->rx_skb[i]);
1144         }
1145     }
1146 
1147     for (i = 0; i < CP_TX_RING_SIZE; i++) {
1148         if (cp->tx_skb[i]) {
1149             struct sk_buff *skb = cp->tx_skb[i];
1150 
1151             desc = cp->tx_ring + i;
1152             dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153                      le32_to_cpu(desc->opts1) & 0xffff,
1154                      DMA_TO_DEVICE);
1155             if (le32_to_cpu(desc->opts1) & LastFrag)
1156                 dev_kfree_skb_any(skb);
1157             cp->dev->stats.tx_dropped++;
1158         }
1159     }
1160     netdev_reset_queue(cp->dev);
1161 
1162     memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1163     memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1164     memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1165 
1166     memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1167     memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1168 }
1169 
1170 static void cp_free_rings (struct cp_private *cp)
1171 {
1172     cp_clean_rings(cp);
1173     dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1174               cp->ring_dma);
1175     cp->rx_ring = NULL;
1176     cp->tx_ring = NULL;
1177 }
1178 
1179 static int cp_open (struct net_device *dev)
1180 {
1181     struct cp_private *cp = netdev_priv(dev);
1182     const int irq = cp->pdev->irq;
1183     int rc;
1184 
1185     netif_dbg(cp, ifup, dev, "enabling interface\n");
1186 
1187     rc = cp_alloc_rings(cp);
1188     if (rc)
1189         return rc;
1190 
1191     napi_enable(&cp->napi);
1192 
1193     cp_init_hw(cp);
1194 
1195     rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1196     if (rc)
1197         goto err_out_hw;
1198 
1199     cp_enable_irq(cp);
1200 
1201     netif_carrier_off(dev);
1202     mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1203     netif_start_queue(dev);
1204 
1205     return 0;
1206 
1207 err_out_hw:
1208     napi_disable(&cp->napi);
1209     cp_stop_hw(cp);
1210     cp_free_rings(cp);
1211     return rc;
1212 }
1213 
1214 static int cp_close (struct net_device *dev)
1215 {
1216     struct cp_private *cp = netdev_priv(dev);
1217     unsigned long flags;
1218 
1219     napi_disable(&cp->napi);
1220 
1221     netif_dbg(cp, ifdown, dev, "disabling interface\n");
1222 
1223     spin_lock_irqsave(&cp->lock, flags);
1224 
1225     netif_stop_queue(dev);
1226     netif_carrier_off(dev);
1227 
1228     cp_stop_hw(cp);
1229 
1230     spin_unlock_irqrestore(&cp->lock, flags);
1231 
1232     free_irq(cp->pdev->irq, dev);
1233 
1234     cp_free_rings(cp);
1235     return 0;
1236 }
1237 
1238 static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1239 {
1240     struct cp_private *cp = netdev_priv(dev);
1241     unsigned long flags;
1242     int i;
1243 
1244     netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1245             cpr8(Cmd), cpr16(CpCmd),
1246             cpr16(IntrStatus), cpr16(IntrMask));
1247 
1248     spin_lock_irqsave(&cp->lock, flags);
1249 
1250     netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1251           cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1252     for (i = 0; i < CP_TX_RING_SIZE; i++) {
1253         netif_dbg(cp, tx_err, cp->dev,
1254               "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1255               i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1256               cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1257               le64_to_cpu(cp->tx_ring[i].addr),
1258               cp->tx_skb[i]);
1259     }
1260 
1261     cp_stop_hw(cp);
1262     cp_clean_rings(cp);
1263     cp_init_rings(cp);
1264     cp_start_hw(cp);
1265     __cp_set_rx_mode(dev);
1266     cpw16_f(IntrMask, cp_norx_intr_mask);
1267 
1268     netif_wake_queue(dev);
1269     napi_schedule_irqoff(&cp->napi);
1270 
1271     spin_unlock_irqrestore(&cp->lock, flags);
1272 }
1273 
1274 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1275 {
1276     struct cp_private *cp = netdev_priv(dev);
1277 
1278     /* if network interface not up, no need for complexity */
1279     if (!netif_running(dev)) {
1280         dev->mtu = new_mtu;
1281         cp_set_rxbufsize(cp);   /* set new rx buf size */
1282         return 0;
1283     }
1284 
1285     /* network IS up, close it, reset MTU, and come up again. */
1286     cp_close(dev);
1287     dev->mtu = new_mtu;
1288     cp_set_rxbufsize(cp);
1289     return cp_open(dev);
1290 }
1291 
1292 static const char mii_2_8139_map[8] = {
1293     BasicModeCtrl,
1294     BasicModeStatus,
1295     0,
1296     0,
1297     NWayAdvert,
1298     NWayLPAR,
1299     NWayExpansion,
1300     0
1301 };
1302 
1303 static int mdio_read(struct net_device *dev, int phy_id, int location)
1304 {
1305     struct cp_private *cp = netdev_priv(dev);
1306 
1307     return location < 8 && mii_2_8139_map[location] ?
1308            readw(cp->regs + mii_2_8139_map[location]) : 0;
1309 }
1310 
1311 
1312 static void mdio_write(struct net_device *dev, int phy_id, int location,
1313                int value)
1314 {
1315     struct cp_private *cp = netdev_priv(dev);
1316 
1317     if (location == 0) {
1318         cpw8(Cfg9346, Cfg9346_Unlock);
1319         cpw16(BasicModeCtrl, value);
1320         cpw8(Cfg9346, Cfg9346_Lock);
1321     } else if (location < 8 && mii_2_8139_map[location])
1322         cpw16(mii_2_8139_map[location], value);
1323 }
1324 
1325 /* Set the ethtool Wake-on-LAN settings */
1326 static int netdev_set_wol (struct cp_private *cp,
1327                const struct ethtool_wolinfo *wol)
1328 {
1329     u8 options;
1330 
1331     options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1332     /* If WOL is being disabled, no need for complexity */
1333     if (wol->wolopts) {
1334         if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1335         if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1336     }
1337 
1338     cpw8 (Cfg9346, Cfg9346_Unlock);
1339     cpw8 (Config3, options);
1340     cpw8 (Cfg9346, Cfg9346_Lock);
1341 
1342     options = 0; /* Paranoia setting */
1343     options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1344     /* If WOL is being disabled, no need for complexity */
1345     if (wol->wolopts) {
1346         if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1347         if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1348         if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1349     }
1350 
1351     cpw8 (Config5, options);
1352 
1353     cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1354 
1355     return 0;
1356 }
1357 
1358 /* Get the ethtool Wake-on-LAN settings */
1359 static void netdev_get_wol (struct cp_private *cp,
1360                  struct ethtool_wolinfo *wol)
1361 {
1362     u8 options;
1363 
1364     wol->wolopts   = 0; /* Start from scratch */
1365     wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1366                  WAKE_MCAST | WAKE_UCAST;
1367     /* We don't need to go on if WOL is disabled */
1368     if (!cp->wol_enabled) return;
1369 
1370     options        = cpr8 (Config3);
1371     if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1372     if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1373 
1374     options        = 0; /* Paranoia setting */
1375     options        = cpr8 (Config5);
1376     if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1377     if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1378     if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1379 }
1380 
1381 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1382 {
1383     struct cp_private *cp = netdev_priv(dev);
1384 
1385     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1386     strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1387     strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1388 }
1389 
1390 static void cp_get_ringparam(struct net_device *dev,
1391                  struct ethtool_ringparam *ring,
1392                  struct kernel_ethtool_ringparam *kernel_ring,
1393                  struct netlink_ext_ack *extack)
1394 {
1395     ring->rx_max_pending = CP_RX_RING_SIZE;
1396     ring->tx_max_pending = CP_TX_RING_SIZE;
1397     ring->rx_pending = CP_RX_RING_SIZE;
1398     ring->tx_pending = CP_TX_RING_SIZE;
1399 }
1400 
1401 static int cp_get_regs_len(struct net_device *dev)
1402 {
1403     return CP_REGS_SIZE;
1404 }
1405 
1406 static int cp_get_sset_count (struct net_device *dev, int sset)
1407 {
1408     switch (sset) {
1409     case ETH_SS_STATS:
1410         return CP_NUM_STATS;
1411     default:
1412         return -EOPNOTSUPP;
1413     }
1414 }
1415 
1416 static int cp_get_link_ksettings(struct net_device *dev,
1417                  struct ethtool_link_ksettings *cmd)
1418 {
1419     struct cp_private *cp = netdev_priv(dev);
1420     unsigned long flags;
1421 
1422     spin_lock_irqsave(&cp->lock, flags);
1423     mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
1424     spin_unlock_irqrestore(&cp->lock, flags);
1425 
1426     return 0;
1427 }
1428 
1429 static int cp_set_link_ksettings(struct net_device *dev,
1430                  const struct ethtool_link_ksettings *cmd)
1431 {
1432     struct cp_private *cp = netdev_priv(dev);
1433     int rc;
1434     unsigned long flags;
1435 
1436     spin_lock_irqsave(&cp->lock, flags);
1437     rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd);
1438     spin_unlock_irqrestore(&cp->lock, flags);
1439 
1440     return rc;
1441 }
1442 
1443 static int cp_nway_reset(struct net_device *dev)
1444 {
1445     struct cp_private *cp = netdev_priv(dev);
1446     return mii_nway_restart(&cp->mii_if);
1447 }
1448 
1449 static u32 cp_get_msglevel(struct net_device *dev)
1450 {
1451     struct cp_private *cp = netdev_priv(dev);
1452     return cp->msg_enable;
1453 }
1454 
1455 static void cp_set_msglevel(struct net_device *dev, u32 value)
1456 {
1457     struct cp_private *cp = netdev_priv(dev);
1458     cp->msg_enable = value;
1459 }
1460 
1461 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1462 {
1463     struct cp_private *cp = netdev_priv(dev);
1464     unsigned long flags;
1465 
1466     if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1467         return 0;
1468 
1469     spin_lock_irqsave(&cp->lock, flags);
1470 
1471     if (features & NETIF_F_RXCSUM)
1472         cp->cpcmd |= RxChkSum;
1473     else
1474         cp->cpcmd &= ~RxChkSum;
1475 
1476     if (features & NETIF_F_HW_VLAN_CTAG_RX)
1477         cp->cpcmd |= RxVlanOn;
1478     else
1479         cp->cpcmd &= ~RxVlanOn;
1480 
1481     cpw16_f(CpCmd, cp->cpcmd);
1482     spin_unlock_irqrestore(&cp->lock, flags);
1483 
1484     return 0;
1485 }
1486 
1487 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1488                 void *p)
1489 {
1490     struct cp_private *cp = netdev_priv(dev);
1491     unsigned long flags;
1492 
1493     if (regs->len < CP_REGS_SIZE)
1494         return /* -EINVAL */;
1495 
1496     regs->version = CP_REGS_VER;
1497 
1498     spin_lock_irqsave(&cp->lock, flags);
1499     memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1500     spin_unlock_irqrestore(&cp->lock, flags);
1501 }
1502 
1503 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1504 {
1505     struct cp_private *cp = netdev_priv(dev);
1506     unsigned long flags;
1507 
1508     spin_lock_irqsave (&cp->lock, flags);
1509     netdev_get_wol (cp, wol);
1510     spin_unlock_irqrestore (&cp->lock, flags);
1511 }
1512 
1513 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1514 {
1515     struct cp_private *cp = netdev_priv(dev);
1516     unsigned long flags;
1517     int rc;
1518 
1519     spin_lock_irqsave (&cp->lock, flags);
1520     rc = netdev_set_wol (cp, wol);
1521     spin_unlock_irqrestore (&cp->lock, flags);
1522 
1523     return rc;
1524 }
1525 
1526 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1527 {
1528     switch (stringset) {
1529     case ETH_SS_STATS:
1530         memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1531         break;
1532     default:
1533         BUG();
1534         break;
1535     }
1536 }
1537 
1538 static void cp_get_ethtool_stats (struct net_device *dev,
1539                   struct ethtool_stats *estats, u64 *tmp_stats)
1540 {
1541     struct cp_private *cp = netdev_priv(dev);
1542     struct cp_dma_stats *nic_stats;
1543     dma_addr_t dma;
1544     int i;
1545 
1546     nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1547                        &dma, GFP_KERNEL);
1548     if (!nic_stats)
1549         return;
1550 
1551     /* begin NIC statistics dump */
1552     cpw32(StatsAddr + 4, (u64)dma >> 32);
1553     cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1554     cpr32(StatsAddr);
1555 
1556     for (i = 0; i < 1000; i++) {
1557         if ((cpr32(StatsAddr) & DumpStats) == 0)
1558             break;
1559         udelay(10);
1560     }
1561     cpw32(StatsAddr, 0);
1562     cpw32(StatsAddr + 4, 0);
1563     cpr32(StatsAddr);
1564 
1565     i = 0;
1566     tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1567     tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1568     tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1569     tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1570     tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1571     tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1572     tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1573     tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1574     tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1575     tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1576     tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1577     tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1578     tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1579     tmp_stats[i++] = cp->cp_stats.rx_frags;
1580     BUG_ON(i != CP_NUM_STATS);
1581 
1582     dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1583 }
1584 
1585 static const struct ethtool_ops cp_ethtool_ops = {
1586     .get_drvinfo        = cp_get_drvinfo,
1587     .get_regs_len       = cp_get_regs_len,
1588     .get_sset_count     = cp_get_sset_count,
1589     .nway_reset     = cp_nway_reset,
1590     .get_link       = ethtool_op_get_link,
1591     .get_msglevel       = cp_get_msglevel,
1592     .set_msglevel       = cp_set_msglevel,
1593     .get_regs       = cp_get_regs,
1594     .get_wol        = cp_get_wol,
1595     .set_wol        = cp_set_wol,
1596     .get_strings        = cp_get_strings,
1597     .get_ethtool_stats  = cp_get_ethtool_stats,
1598     .get_eeprom_len     = cp_get_eeprom_len,
1599     .get_eeprom     = cp_get_eeprom,
1600     .set_eeprom     = cp_set_eeprom,
1601     .get_ringparam      = cp_get_ringparam,
1602     .get_link_ksettings = cp_get_link_ksettings,
1603     .set_link_ksettings = cp_set_link_ksettings,
1604 };
1605 
1606 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1607 {
1608     struct cp_private *cp = netdev_priv(dev);
1609     int rc;
1610     unsigned long flags;
1611 
1612     if (!netif_running(dev))
1613         return -EINVAL;
1614 
1615     spin_lock_irqsave(&cp->lock, flags);
1616     rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1617     spin_unlock_irqrestore(&cp->lock, flags);
1618     return rc;
1619 }
1620 
1621 static int cp_set_mac_address(struct net_device *dev, void *p)
1622 {
1623     struct cp_private *cp = netdev_priv(dev);
1624     struct sockaddr *addr = p;
1625 
1626     if (!is_valid_ether_addr(addr->sa_data))
1627         return -EADDRNOTAVAIL;
1628 
1629     eth_hw_addr_set(dev, addr->sa_data);
1630 
1631     spin_lock_irq(&cp->lock);
1632 
1633     cpw8_f(Cfg9346, Cfg9346_Unlock);
1634     cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1635     cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1636     cpw8_f(Cfg9346, Cfg9346_Lock);
1637 
1638     spin_unlock_irq(&cp->lock);
1639 
1640     return 0;
1641 }
1642 
1643 /* Serial EEPROM section. */
1644 
1645 /*  EEPROM_Ctrl bits. */
1646 #define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1647 #define EE_CS           0x08    /* EEPROM chip select. */
1648 #define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1649 #define EE_WRITE_0      0x00
1650 #define EE_WRITE_1      0x02
1651 #define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1652 #define EE_ENB          (0x80 | EE_CS)
1653 
1654 /* Delay between EEPROM clock transitions.
1655    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1656  */
1657 
1658 #define eeprom_delay()  readb(ee_addr)
1659 
1660 /* The EEPROM commands include the alway-set leading bit. */
1661 #define EE_EXTEND_CMD   (4)
1662 #define EE_WRITE_CMD    (5)
1663 #define EE_READ_CMD     (6)
1664 #define EE_ERASE_CMD    (7)
1665 
1666 #define EE_EWDS_ADDR    (0)
1667 #define EE_WRAL_ADDR    (1)
1668 #define EE_ERAL_ADDR    (2)
1669 #define EE_EWEN_ADDR    (3)
1670 
1671 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1672 
1673 static void eeprom_cmd_start(void __iomem *ee_addr)
1674 {
1675     writeb (EE_ENB & ~EE_CS, ee_addr);
1676     writeb (EE_ENB, ee_addr);
1677     eeprom_delay ();
1678 }
1679 
1680 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1681 {
1682     int i;
1683 
1684     /* Shift the command bits out. */
1685     for (i = cmd_len - 1; i >= 0; i--) {
1686         int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1687         writeb (EE_ENB | dataval, ee_addr);
1688         eeprom_delay ();
1689         writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1690         eeprom_delay ();
1691     }
1692     writeb (EE_ENB, ee_addr);
1693     eeprom_delay ();
1694 }
1695 
1696 static void eeprom_cmd_end(void __iomem *ee_addr)
1697 {
1698     writeb(0, ee_addr);
1699     eeprom_delay ();
1700 }
1701 
1702 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1703                   int addr_len)
1704 {
1705     int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1706 
1707     eeprom_cmd_start(ee_addr);
1708     eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1709     eeprom_cmd_end(ee_addr);
1710 }
1711 
1712 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1713 {
1714     int i;
1715     u16 retval = 0;
1716     void __iomem *ee_addr = ioaddr + Cfg9346;
1717     int read_cmd = location | (EE_READ_CMD << addr_len);
1718 
1719     eeprom_cmd_start(ee_addr);
1720     eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1721 
1722     for (i = 16; i > 0; i--) {
1723         writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1724         eeprom_delay ();
1725         retval =
1726             (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1727                      0);
1728         writeb (EE_ENB, ee_addr);
1729         eeprom_delay ();
1730     }
1731 
1732     eeprom_cmd_end(ee_addr);
1733 
1734     return retval;
1735 }
1736 
1737 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1738              int addr_len)
1739 {
1740     int i;
1741     void __iomem *ee_addr = ioaddr + Cfg9346;
1742     int write_cmd = location | (EE_WRITE_CMD << addr_len);
1743 
1744     eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1745 
1746     eeprom_cmd_start(ee_addr);
1747     eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1748     eeprom_cmd(ee_addr, val, 16);
1749     eeprom_cmd_end(ee_addr);
1750 
1751     eeprom_cmd_start(ee_addr);
1752     for (i = 0; i < 20000; i++)
1753         if (readb(ee_addr) & EE_DATA_READ)
1754             break;
1755     eeprom_cmd_end(ee_addr);
1756 
1757     eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1758 }
1759 
1760 static int cp_get_eeprom_len(struct net_device *dev)
1761 {
1762     struct cp_private *cp = netdev_priv(dev);
1763     int size;
1764 
1765     spin_lock_irq(&cp->lock);
1766     size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1767     spin_unlock_irq(&cp->lock);
1768 
1769     return size;
1770 }
1771 
1772 static int cp_get_eeprom(struct net_device *dev,
1773              struct ethtool_eeprom *eeprom, u8 *data)
1774 {
1775     struct cp_private *cp = netdev_priv(dev);
1776     unsigned int addr_len;
1777     u16 val;
1778     u32 offset = eeprom->offset >> 1;
1779     u32 len = eeprom->len;
1780     u32 i = 0;
1781 
1782     eeprom->magic = CP_EEPROM_MAGIC;
1783 
1784     spin_lock_irq(&cp->lock);
1785 
1786     addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1787 
1788     if (eeprom->offset & 1) {
1789         val = read_eeprom(cp->regs, offset, addr_len);
1790         data[i++] = (u8)(val >> 8);
1791         offset++;
1792     }
1793 
1794     while (i < len - 1) {
1795         val = read_eeprom(cp->regs, offset, addr_len);
1796         data[i++] = (u8)val;
1797         data[i++] = (u8)(val >> 8);
1798         offset++;
1799     }
1800 
1801     if (i < len) {
1802         val = read_eeprom(cp->regs, offset, addr_len);
1803         data[i] = (u8)val;
1804     }
1805 
1806     spin_unlock_irq(&cp->lock);
1807     return 0;
1808 }
1809 
1810 static int cp_set_eeprom(struct net_device *dev,
1811              struct ethtool_eeprom *eeprom, u8 *data)
1812 {
1813     struct cp_private *cp = netdev_priv(dev);
1814     unsigned int addr_len;
1815     u16 val;
1816     u32 offset = eeprom->offset >> 1;
1817     u32 len = eeprom->len;
1818     u32 i = 0;
1819 
1820     if (eeprom->magic != CP_EEPROM_MAGIC)
1821         return -EINVAL;
1822 
1823     spin_lock_irq(&cp->lock);
1824 
1825     addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1826 
1827     if (eeprom->offset & 1) {
1828         val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1829         val |= (u16)data[i++] << 8;
1830         write_eeprom(cp->regs, offset, val, addr_len);
1831         offset++;
1832     }
1833 
1834     while (i < len - 1) {
1835         val = (u16)data[i++];
1836         val |= (u16)data[i++] << 8;
1837         write_eeprom(cp->regs, offset, val, addr_len);
1838         offset++;
1839     }
1840 
1841     if (i < len) {
1842         val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1843         val |= (u16)data[i];
1844         write_eeprom(cp->regs, offset, val, addr_len);
1845     }
1846 
1847     spin_unlock_irq(&cp->lock);
1848     return 0;
1849 }
1850 
1851 /* Put the board into D3cold state and wait for WakeUp signal */
1852 static void cp_set_d3_state (struct cp_private *cp)
1853 {
1854     pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */
1855     pci_set_power_state (cp->pdev, PCI_D3hot);
1856 }
1857 
1858 static netdev_features_t cp_features_check(struct sk_buff *skb,
1859                        struct net_device *dev,
1860                        netdev_features_t features)
1861 {
1862     if (skb_shinfo(skb)->gso_size > MSSMask)
1863         features &= ~NETIF_F_TSO;
1864 
1865     return vlan_features_check(skb, features);
1866 }
1867 static const struct net_device_ops cp_netdev_ops = {
1868     .ndo_open       = cp_open,
1869     .ndo_stop       = cp_close,
1870     .ndo_validate_addr  = eth_validate_addr,
1871     .ndo_set_mac_address    = cp_set_mac_address,
1872     .ndo_set_rx_mode    = cp_set_rx_mode,
1873     .ndo_get_stats      = cp_get_stats,
1874     .ndo_eth_ioctl      = cp_ioctl,
1875     .ndo_start_xmit     = cp_start_xmit,
1876     .ndo_tx_timeout     = cp_tx_timeout,
1877     .ndo_set_features   = cp_set_features,
1878     .ndo_change_mtu     = cp_change_mtu,
1879     .ndo_features_check = cp_features_check,
1880 
1881 #ifdef CONFIG_NET_POLL_CONTROLLER
1882     .ndo_poll_controller    = cp_poll_controller,
1883 #endif
1884 };
1885 
1886 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1887 {
1888     struct net_device *dev;
1889     struct cp_private *cp;
1890     int rc;
1891     void __iomem *regs;
1892     resource_size_t pciaddr;
1893     unsigned int addr_len, i, pci_using_dac;
1894     __le16 addr[ETH_ALEN / 2];
1895 
1896     pr_info_once("%s", version);
1897 
1898     if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1899         pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1900         dev_info(&pdev->dev,
1901              "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1902              pdev->vendor, pdev->device, pdev->revision);
1903         return -ENODEV;
1904     }
1905 
1906     dev = alloc_etherdev(sizeof(struct cp_private));
1907     if (!dev)
1908         return -ENOMEM;
1909     SET_NETDEV_DEV(dev, &pdev->dev);
1910 
1911     cp = netdev_priv(dev);
1912     cp->pdev = pdev;
1913     cp->dev = dev;
1914     cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1915     spin_lock_init (&cp->lock);
1916     cp->mii_if.dev = dev;
1917     cp->mii_if.mdio_read = mdio_read;
1918     cp->mii_if.mdio_write = mdio_write;
1919     cp->mii_if.phy_id = CP_INTERNAL_PHY;
1920     cp->mii_if.phy_id_mask = 0x1f;
1921     cp->mii_if.reg_num_mask = 0x1f;
1922     cp_set_rxbufsize(cp);
1923 
1924     rc = pci_enable_device(pdev);
1925     if (rc)
1926         goto err_out_free;
1927 
1928     rc = pci_set_mwi(pdev);
1929     if (rc)
1930         goto err_out_disable;
1931 
1932     rc = pci_request_regions(pdev, DRV_NAME);
1933     if (rc)
1934         goto err_out_mwi;
1935 
1936     pciaddr = pci_resource_start(pdev, 1);
1937     if (!pciaddr) {
1938         rc = -EIO;
1939         dev_err(&pdev->dev, "no MMIO resource\n");
1940         goto err_out_res;
1941     }
1942     if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1943         rc = -EIO;
1944         dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1945                (unsigned long long)pci_resource_len(pdev, 1));
1946         goto err_out_res;
1947     }
1948 
1949     /* Configure DMA attributes. */
1950     if ((sizeof(dma_addr_t) > 4) &&
1951         !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1952         pci_using_dac = 1;
1953     } else {
1954         pci_using_dac = 0;
1955 
1956         rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1957         if (rc) {
1958             dev_err(&pdev->dev,
1959                 "No usable DMA configuration, aborting\n");
1960             goto err_out_res;
1961         }
1962     }
1963 
1964     cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1965             PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1966 
1967     dev->features |= NETIF_F_RXCSUM;
1968     dev->hw_features |= NETIF_F_RXCSUM;
1969 
1970     regs = ioremap(pciaddr, CP_REGS_SIZE);
1971     if (!regs) {
1972         rc = -EIO;
1973         dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1974             (unsigned long long)pci_resource_len(pdev, 1),
1975                (unsigned long long)pciaddr);
1976         goto err_out_res;
1977     }
1978     cp->regs = regs;
1979 
1980     cp_stop_hw(cp);
1981 
1982     /* read MAC address from EEPROM */
1983     addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1984     for (i = 0; i < 3; i++)
1985         addr[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1986     eth_hw_addr_set(dev, (u8 *)addr);
1987 
1988     dev->netdev_ops = &cp_netdev_ops;
1989     netif_napi_add_weight(dev, &cp->napi, cp_rx_poll, 16);
1990     dev->ethtool_ops = &cp_ethtool_ops;
1991     dev->watchdog_timeo = TX_TIMEOUT;
1992 
1993     dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1994         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1995 
1996     if (pci_using_dac)
1997         dev->features |= NETIF_F_HIGHDMA;
1998 
1999     dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2000         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2001     dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2002         NETIF_F_HIGHDMA;
2003 
2004     /* MTU range: 60 - 4096 */
2005     dev->min_mtu = CP_MIN_MTU;
2006     dev->max_mtu = CP_MAX_MTU;
2007 
2008     rc = register_netdev(dev);
2009     if (rc)
2010         goto err_out_iomap;
2011 
2012     netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2013             regs, dev->dev_addr, pdev->irq);
2014 
2015     pci_set_drvdata(pdev, dev);
2016 
2017     /* enable busmastering and memory-write-invalidate */
2018     pci_set_master(pdev);
2019 
2020     if (cp->wol_enabled)
2021         cp_set_d3_state (cp);
2022 
2023     return 0;
2024 
2025 err_out_iomap:
2026     iounmap(regs);
2027 err_out_res:
2028     pci_release_regions(pdev);
2029 err_out_mwi:
2030     pci_clear_mwi(pdev);
2031 err_out_disable:
2032     pci_disable_device(pdev);
2033 err_out_free:
2034     free_netdev(dev);
2035     return rc;
2036 }
2037 
2038 static void cp_remove_one (struct pci_dev *pdev)
2039 {
2040     struct net_device *dev = pci_get_drvdata(pdev);
2041     struct cp_private *cp = netdev_priv(dev);
2042 
2043     unregister_netdev(dev);
2044     iounmap(cp->regs);
2045     if (cp->wol_enabled)
2046         pci_set_power_state (pdev, PCI_D0);
2047     pci_release_regions(pdev);
2048     pci_clear_mwi(pdev);
2049     pci_disable_device(pdev);
2050     free_netdev(dev);
2051 }
2052 
2053 static int __maybe_unused cp_suspend(struct device *device)
2054 {
2055     struct net_device *dev = dev_get_drvdata(device);
2056     struct cp_private *cp = netdev_priv(dev);
2057     unsigned long flags;
2058 
2059     if (!netif_running(dev))
2060         return 0;
2061 
2062     netif_device_detach (dev);
2063     netif_stop_queue (dev);
2064 
2065     spin_lock_irqsave (&cp->lock, flags);
2066 
2067     /* Disable Rx and Tx */
2068     cpw16 (IntrMask, 0);
2069     cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2070 
2071     spin_unlock_irqrestore (&cp->lock, flags);
2072 
2073     device_set_wakeup_enable(device, cp->wol_enabled);
2074 
2075     return 0;
2076 }
2077 
2078 static int __maybe_unused cp_resume(struct device *device)
2079 {
2080     struct net_device *dev = dev_get_drvdata(device);
2081     struct cp_private *cp = netdev_priv(dev);
2082     unsigned long flags;
2083 
2084     if (!netif_running(dev))
2085         return 0;
2086 
2087     netif_device_attach (dev);
2088 
2089     /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2090     cp_init_rings_index (cp);
2091     cp_init_hw (cp);
2092     cp_enable_irq(cp);
2093     netif_start_queue (dev);
2094 
2095     spin_lock_irqsave (&cp->lock, flags);
2096 
2097     mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2098 
2099     spin_unlock_irqrestore (&cp->lock, flags);
2100 
2101     return 0;
2102 }
2103 
2104 static const struct pci_device_id cp_pci_tbl[] = {
2105         { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
2106         { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
2107         { },
2108 };
2109 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
2110 
2111 static SIMPLE_DEV_PM_OPS(cp_pm_ops, cp_suspend, cp_resume);
2112 
2113 static struct pci_driver cp_driver = {
2114     .name         = DRV_NAME,
2115     .id_table     = cp_pci_tbl,
2116     .probe        = cp_init_one,
2117     .remove       = cp_remove_one,
2118     .driver.pm    = &cp_pm_ops,
2119 };
2120 
2121 module_pci_driver(cp_driver);