Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*******************************************************************************
0003 
0004   Copyright(c) 2006 Tundra Semiconductor Corporation.
0005 
0006 
0007 *******************************************************************************/
0008 
0009 /* This driver is based on the driver code originally developed
0010  * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
0011  * scott.wood@timesys.com  * Copyright (C) 2003 TimeSys Corporation
0012  *
0013  * Currently changes from original version are:
0014  * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
0015  * - modifications to handle two ports independently and support for
0016  *   additional PHY devices (alexandre.bounine@tundra.com)
0017  * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
0018  *
0019  */
0020 
0021 #include <linux/module.h>
0022 #include <linux/types.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/net.h>
0025 #include <linux/netdevice.h>
0026 #include <linux/etherdevice.h>
0027 #include <linux/ethtool.h>
0028 #include <linux/skbuff.h>
0029 #include <linux/spinlock.h>
0030 #include <linux/delay.h>
0031 #include <linux/crc32.h>
0032 #include <linux/mii.h>
0033 #include <linux/device.h>
0034 #include <linux/pci.h>
0035 #include <linux/rtnetlink.h>
0036 #include <linux/timer.h>
0037 #include <linux/platform_device.h>
0038 #include <linux/gfp.h>
0039 
0040 #include <asm/io.h>
0041 #include <asm/tsi108.h>
0042 
0043 #include "tsi108_eth.h"
0044 
0045 #define MII_READ_DELAY 10000    /* max link wait time in msec */
0046 
0047 #define TSI108_RXRING_LEN     256
0048 
0049 /* NOTE: The driver currently does not support receiving packets
0050  * larger than the buffer size, so don't decrease this (unless you
0051  * want to add such support).
0052  */
0053 #define TSI108_RXBUF_SIZE     1536
0054 
0055 #define TSI108_TXRING_LEN     256
0056 
0057 #define TSI108_TX_INT_FREQ    64
0058 
0059 /* Check the phy status every half a second. */
0060 #define CHECK_PHY_INTERVAL (HZ/2)
0061 
0062 static int tsi108_init_one(struct platform_device *pdev);
0063 static int tsi108_ether_remove(struct platform_device *pdev);
0064 
0065 struct tsi108_prv_data {
0066     void  __iomem *regs;    /* Base of normal regs */
0067     void  __iomem *phyregs; /* Base of register bank used for PHY access */
0068 
0069     struct net_device *dev;
0070     struct napi_struct napi;
0071 
0072     unsigned int phy;       /* Index of PHY for this interface */
0073     unsigned int irq_num;
0074     unsigned int id;
0075     unsigned int phy_type;
0076 
0077     struct timer_list timer;/* Timer that triggers the check phy function */
0078     unsigned int rxtail;    /* Next entry in rxring to read */
0079     unsigned int rxhead;    /* Next entry in rxring to give a new buffer */
0080     unsigned int rxfree;    /* Number of free, allocated RX buffers */
0081 
0082     unsigned int rxpending; /* Non-zero if there are still descriptors
0083                  * to be processed from a previous descriptor
0084                  * interrupt condition that has been cleared */
0085 
0086     unsigned int txtail;    /* Next TX descriptor to check status on */
0087     unsigned int txhead;    /* Next TX descriptor to use */
0088 
0089     /* Number of free TX descriptors.  This could be calculated from
0090      * rxhead and rxtail if one descriptor were left unused to disambiguate
0091      * full and empty conditions, but it's simpler to just keep track
0092      * explicitly. */
0093 
0094     unsigned int txfree;
0095 
0096     unsigned int phy_ok;        /* The PHY is currently powered on. */
0097 
0098     /* PHY status (duplex is 1 for half, 2 for full,
0099      * so that the default 0 indicates that neither has
0100      * yet been configured). */
0101 
0102     unsigned int link_up;
0103     unsigned int speed;
0104     unsigned int duplex;
0105 
0106     tx_desc *txring;
0107     rx_desc *rxring;
0108     struct sk_buff *txskbs[TSI108_TXRING_LEN];
0109     struct sk_buff *rxskbs[TSI108_RXRING_LEN];
0110 
0111     dma_addr_t txdma, rxdma;
0112 
0113     /* txlock nests in misclock and phy_lock */
0114 
0115     spinlock_t txlock, misclock;
0116 
0117     /* stats is used to hold the upper bits of each hardware counter,
0118      * and tmpstats is used to hold the full values for returning
0119      * to the caller of get_stats().  They must be separate in case
0120      * an overflow interrupt occurs before the stats are consumed.
0121      */
0122 
0123     struct net_device_stats stats;
0124     struct net_device_stats tmpstats;
0125 
0126     /* These stats are kept separate in hardware, thus require individual
0127      * fields for handling carry.  They are combined in get_stats.
0128      */
0129 
0130     unsigned long rx_fcs;   /* Add to rx_frame_errors */
0131     unsigned long rx_short_fcs; /* Add to rx_frame_errors */
0132     unsigned long rx_long_fcs;  /* Add to rx_frame_errors */
0133     unsigned long rx_underruns; /* Add to rx_length_errors */
0134     unsigned long rx_overruns;  /* Add to rx_length_errors */
0135 
0136     unsigned long tx_coll_abort;    /* Add to tx_aborted_errors/collisions */
0137     unsigned long tx_pause_drop;    /* Add to tx_aborted_errors */
0138 
0139     unsigned long mc_hash[16];
0140     u32 msg_enable;         /* debug message level */
0141     struct mii_if_info mii_if;
0142     unsigned int init_media;
0143 
0144     struct platform_device *pdev;
0145 };
0146 
0147 /* Structure for a device driver */
0148 
0149 static struct platform_driver tsi_eth_driver = {
0150     .probe = tsi108_init_one,
0151     .remove = tsi108_ether_remove,
0152     .driver = {
0153         .name = "tsi-ethernet",
0154     },
0155 };
0156 
0157 static void tsi108_timed_checker(struct timer_list *t);
0158 
0159 #ifdef DEBUG
0160 static void dump_eth_one(struct net_device *dev)
0161 {
0162     struct tsi108_prv_data *data = netdev_priv(dev);
0163 
0164     printk("Dumping %s...\n", dev->name);
0165     printk("intstat %x intmask %x phy_ok %d"
0166            " link %d speed %d duplex %d\n",
0167            TSI_READ(TSI108_EC_INTSTAT),
0168            TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
0169            data->link_up, data->speed, data->duplex);
0170 
0171     printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
0172            data->txhead, data->txtail, data->txfree,
0173            TSI_READ(TSI108_EC_TXSTAT),
0174            TSI_READ(TSI108_EC_TXESTAT),
0175            TSI_READ(TSI108_EC_TXERR));
0176 
0177     printk("RX: head %d, tail %d, free %d, stat %x,"
0178            " estat %x, err %x, pending %d\n\n",
0179            data->rxhead, data->rxtail, data->rxfree,
0180            TSI_READ(TSI108_EC_RXSTAT),
0181            TSI_READ(TSI108_EC_RXESTAT),
0182            TSI_READ(TSI108_EC_RXERR), data->rxpending);
0183 }
0184 #endif
0185 
0186 /* Synchronization is needed between the thread and up/down events.
0187  * Note that the PHY is accessed through the same registers for both
0188  * interfaces, so this can't be made interface-specific.
0189  */
0190 
0191 static DEFINE_SPINLOCK(phy_lock);
0192 
0193 static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
0194 {
0195     unsigned i;
0196 
0197     TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
0198                 (data->phy << TSI108_MAC_MII_ADDR_PHY) |
0199                 (reg << TSI108_MAC_MII_ADDR_REG));
0200     TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
0201     TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
0202     for (i = 0; i < 100; i++) {
0203         if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
0204               (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
0205             break;
0206         udelay(10);
0207     }
0208 
0209     if (i == 100)
0210         return 0xffff;
0211     else
0212         return TSI_READ_PHY(TSI108_MAC_MII_DATAIN);
0213 }
0214 
0215 static void tsi108_write_mii(struct tsi108_prv_data *data,
0216                 int reg, u16 val)
0217 {
0218     unsigned i = 100;
0219     TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
0220                 (data->phy << TSI108_MAC_MII_ADDR_PHY) |
0221                 (reg << TSI108_MAC_MII_ADDR_REG));
0222     TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
0223     while (i--) {
0224         if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
0225             TSI108_MAC_MII_IND_BUSY))
0226             break;
0227         udelay(10);
0228     }
0229 }
0230 
0231 static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
0232 {
0233     struct tsi108_prv_data *data = netdev_priv(dev);
0234     return tsi108_read_mii(data, reg);
0235 }
0236 
0237 static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
0238 {
0239     struct tsi108_prv_data *data = netdev_priv(dev);
0240     tsi108_write_mii(data, reg, val);
0241 }
0242 
0243 static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
0244                     int reg, u16 val)
0245 {
0246     unsigned i = 1000;
0247     TSI_WRITE(TSI108_MAC_MII_ADDR,
0248                  (0x1e << TSI108_MAC_MII_ADDR_PHY)
0249                  | (reg << TSI108_MAC_MII_ADDR_REG));
0250     TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
0251     while(i--) {
0252         if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
0253             return;
0254         udelay(10);
0255     }
0256     printk(KERN_ERR "%s function time out\n", __func__);
0257 }
0258 
0259 static int mii_speed(struct mii_if_info *mii)
0260 {
0261     int advert, lpa, val, media;
0262     int lpa2 = 0;
0263     int speed;
0264 
0265     if (!mii_link_ok(mii))
0266         return 0;
0267 
0268     val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
0269     if ((val & BMSR_ANEGCOMPLETE) == 0)
0270         return 0;
0271 
0272     advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
0273     lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
0274     media = mii_nway_result(advert & lpa);
0275 
0276     if (mii->supports_gmii)
0277         lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
0278 
0279     speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
0280             (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
0281     return speed;
0282 }
0283 
0284 static void tsi108_check_phy(struct net_device *dev)
0285 {
0286     struct tsi108_prv_data *data = netdev_priv(dev);
0287     u32 mac_cfg2_reg, portctrl_reg;
0288     u32 duplex;
0289     u32 speed;
0290     unsigned long flags;
0291 
0292     spin_lock_irqsave(&phy_lock, flags);
0293 
0294     if (!data->phy_ok)
0295         goto out;
0296 
0297     duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
0298     data->init_media = 0;
0299 
0300     if (netif_carrier_ok(dev)) {
0301 
0302         speed = mii_speed(&data->mii_if);
0303 
0304         if ((speed != data->speed) || duplex) {
0305 
0306             mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
0307             portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
0308 
0309             mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
0310 
0311             if (speed == 1000) {
0312                 mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
0313                 portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
0314             } else {
0315                 mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
0316                 portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
0317             }
0318 
0319             data->speed = speed;
0320 
0321             if (data->mii_if.full_duplex) {
0322                 mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
0323                 portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
0324                 data->duplex = 2;
0325             } else {
0326                 mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
0327                 portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
0328                 data->duplex = 1;
0329             }
0330 
0331             TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
0332             TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
0333         }
0334 
0335         if (data->link_up == 0) {
0336             /* The manual says it can take 3-4 usecs for the speed change
0337              * to take effect.
0338              */
0339             udelay(5);
0340 
0341             spin_lock(&data->txlock);
0342             if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
0343                 netif_wake_queue(dev);
0344 
0345             data->link_up = 1;
0346             spin_unlock(&data->txlock);
0347         }
0348     } else {
0349         if (data->link_up == 1) {
0350             netif_stop_queue(dev);
0351             data->link_up = 0;
0352             printk(KERN_NOTICE "%s : link is down\n", dev->name);
0353         }
0354 
0355         goto out;
0356     }
0357 
0358 
0359 out:
0360     spin_unlock_irqrestore(&phy_lock, flags);
0361 }
0362 
0363 static inline void
0364 tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
0365               unsigned long *upper)
0366 {
0367     if (carry & carry_bit)
0368         *upper += carry_shift;
0369 }
0370 
0371 static void tsi108_stat_carry(struct net_device *dev)
0372 {
0373     struct tsi108_prv_data *data = netdev_priv(dev);
0374     unsigned long flags;
0375     u32 carry1, carry2;
0376 
0377     spin_lock_irqsave(&data->misclock, flags);
0378 
0379     carry1 = TSI_READ(TSI108_STAT_CARRY1);
0380     carry2 = TSI_READ(TSI108_STAT_CARRY2);
0381 
0382     TSI_WRITE(TSI108_STAT_CARRY1, carry1);
0383     TSI_WRITE(TSI108_STAT_CARRY2, carry2);
0384 
0385     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
0386                   TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
0387 
0388     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
0389                   TSI108_STAT_RXPKTS_CARRY,
0390                   &data->stats.rx_packets);
0391 
0392     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
0393                   TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
0394 
0395     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
0396                   TSI108_STAT_RXMCAST_CARRY,
0397                   &data->stats.multicast);
0398 
0399     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
0400                   TSI108_STAT_RXALIGN_CARRY,
0401                   &data->stats.rx_frame_errors);
0402 
0403     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
0404                   TSI108_STAT_RXLENGTH_CARRY,
0405                   &data->stats.rx_length_errors);
0406 
0407     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
0408                   TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
0409 
0410     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
0411                   TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
0412 
0413     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
0414                   TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
0415 
0416     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
0417                   TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
0418 
0419     tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
0420                   TSI108_STAT_RXDROP_CARRY,
0421                   &data->stats.rx_missed_errors);
0422 
0423     tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
0424                   TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
0425 
0426     tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
0427                   TSI108_STAT_TXPKTS_CARRY,
0428                   &data->stats.tx_packets);
0429 
0430     tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
0431                   TSI108_STAT_TXEXDEF_CARRY,
0432                   &data->stats.tx_aborted_errors);
0433 
0434     tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
0435                   TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
0436 
0437     tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
0438                   TSI108_STAT_TXTCOL_CARRY,
0439                   &data->stats.collisions);
0440 
0441     tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
0442                   TSI108_STAT_TXPAUSEDROP_CARRY,
0443                   &data->tx_pause_drop);
0444 
0445     spin_unlock_irqrestore(&data->misclock, flags);
0446 }
0447 
0448 /* Read a stat counter atomically with respect to carries.
0449  * data->misclock must be held.
0450  */
0451 static inline unsigned long
0452 tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
0453          int carry_shift, unsigned long *upper)
0454 {
0455     int carryreg;
0456     unsigned long val;
0457 
0458     if (reg < 0xb0)
0459         carryreg = TSI108_STAT_CARRY1;
0460     else
0461         carryreg = TSI108_STAT_CARRY2;
0462 
0463       again:
0464     val = TSI_READ(reg) | *upper;
0465 
0466     /* Check to see if it overflowed, but the interrupt hasn't
0467      * been serviced yet.  If so, handle the carry here, and
0468      * try again.
0469      */
0470 
0471     if (unlikely(TSI_READ(carryreg) & carry_bit)) {
0472         *upper += carry_shift;
0473         TSI_WRITE(carryreg, carry_bit);
0474         goto again;
0475     }
0476 
0477     return val;
0478 }
0479 
0480 static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
0481 {
0482     unsigned long excol;
0483 
0484     struct tsi108_prv_data *data = netdev_priv(dev);
0485     spin_lock_irq(&data->misclock);
0486 
0487     data->tmpstats.rx_packets =
0488         tsi108_read_stat(data, TSI108_STAT_RXPKTS,
0489                  TSI108_STAT_CARRY1_RXPKTS,
0490                  TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
0491 
0492     data->tmpstats.tx_packets =
0493         tsi108_read_stat(data, TSI108_STAT_TXPKTS,
0494                  TSI108_STAT_CARRY2_TXPKTS,
0495                  TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
0496 
0497     data->tmpstats.rx_bytes =
0498         tsi108_read_stat(data, TSI108_STAT_RXBYTES,
0499                  TSI108_STAT_CARRY1_RXBYTES,
0500                  TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
0501 
0502     data->tmpstats.tx_bytes =
0503         tsi108_read_stat(data, TSI108_STAT_TXBYTES,
0504                  TSI108_STAT_CARRY2_TXBYTES,
0505                  TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
0506 
0507     data->tmpstats.multicast =
0508         tsi108_read_stat(data, TSI108_STAT_RXMCAST,
0509                  TSI108_STAT_CARRY1_RXMCAST,
0510                  TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
0511 
0512     excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
0513                  TSI108_STAT_CARRY2_TXEXCOL,
0514                  TSI108_STAT_TXEXCOL_CARRY,
0515                  &data->tx_coll_abort);
0516 
0517     data->tmpstats.collisions =
0518         tsi108_read_stat(data, TSI108_STAT_TXTCOL,
0519                  TSI108_STAT_CARRY2_TXTCOL,
0520                  TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
0521 
0522     data->tmpstats.collisions += excol;
0523 
0524     data->tmpstats.rx_length_errors =
0525         tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
0526                  TSI108_STAT_CARRY1_RXLENGTH,
0527                  TSI108_STAT_RXLENGTH_CARRY,
0528                  &data->stats.rx_length_errors);
0529 
0530     data->tmpstats.rx_length_errors +=
0531         tsi108_read_stat(data, TSI108_STAT_RXRUNT,
0532                  TSI108_STAT_CARRY1_RXRUNT,
0533                  TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
0534 
0535     data->tmpstats.rx_length_errors +=
0536         tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
0537                  TSI108_STAT_CARRY1_RXJUMBO,
0538                  TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
0539 
0540     data->tmpstats.rx_frame_errors =
0541         tsi108_read_stat(data, TSI108_STAT_RXALIGN,
0542                  TSI108_STAT_CARRY1_RXALIGN,
0543                  TSI108_STAT_RXALIGN_CARRY,
0544                  &data->stats.rx_frame_errors);
0545 
0546     data->tmpstats.rx_frame_errors +=
0547         tsi108_read_stat(data, TSI108_STAT_RXFCS,
0548                  TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
0549                  &data->rx_fcs);
0550 
0551     data->tmpstats.rx_frame_errors +=
0552         tsi108_read_stat(data, TSI108_STAT_RXFRAG,
0553                  TSI108_STAT_CARRY1_RXFRAG,
0554                  TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
0555 
0556     data->tmpstats.rx_missed_errors =
0557         tsi108_read_stat(data, TSI108_STAT_RXDROP,
0558                  TSI108_STAT_CARRY1_RXDROP,
0559                  TSI108_STAT_RXDROP_CARRY,
0560                  &data->stats.rx_missed_errors);
0561 
0562     /* These three are maintained by software. */
0563     data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
0564     data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
0565 
0566     data->tmpstats.tx_aborted_errors =
0567         tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
0568                  TSI108_STAT_CARRY2_TXEXDEF,
0569                  TSI108_STAT_TXEXDEF_CARRY,
0570                  &data->stats.tx_aborted_errors);
0571 
0572     data->tmpstats.tx_aborted_errors +=
0573         tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
0574                  TSI108_STAT_CARRY2_TXPAUSE,
0575                  TSI108_STAT_TXPAUSEDROP_CARRY,
0576                  &data->tx_pause_drop);
0577 
0578     data->tmpstats.tx_aborted_errors += excol;
0579 
0580     data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
0581     data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
0582         data->tmpstats.rx_crc_errors +
0583         data->tmpstats.rx_frame_errors +
0584         data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
0585 
0586     spin_unlock_irq(&data->misclock);
0587     return &data->tmpstats;
0588 }
0589 
0590 static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
0591 {
0592     TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
0593                  TSI108_EC_RXQ_PTRHIGH_VALID);
0594 
0595     TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
0596                  | TSI108_EC_RXCTRL_QUEUE0);
0597 }
0598 
0599 static void tsi108_restart_tx(struct tsi108_prv_data * data)
0600 {
0601     TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
0602                  TSI108_EC_TXQ_PTRHIGH_VALID);
0603 
0604     TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
0605                  TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
0606 }
0607 
0608 /* txlock must be held by caller, with IRQs disabled, and
0609  * with permission to re-enable them when the lock is dropped.
0610  */
0611 static void tsi108_complete_tx(struct net_device *dev)
0612 {
0613     struct tsi108_prv_data *data = netdev_priv(dev);
0614     int tx;
0615     struct sk_buff *skb;
0616     int release = 0;
0617 
0618     while (!data->txfree || data->txhead != data->txtail) {
0619         tx = data->txtail;
0620 
0621         if (data->txring[tx].misc & TSI108_TX_OWN)
0622             break;
0623 
0624         skb = data->txskbs[tx];
0625 
0626         if (!(data->txring[tx].misc & TSI108_TX_OK))
0627             printk("%s: bad tx packet, misc %x\n",
0628                    dev->name, data->txring[tx].misc);
0629 
0630         data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
0631         data->txfree++;
0632 
0633         if (data->txring[tx].misc & TSI108_TX_EOF) {
0634             dev_kfree_skb_any(skb);
0635             release++;
0636         }
0637     }
0638 
0639     if (release) {
0640         if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
0641             netif_wake_queue(dev);
0642     }
0643 }
0644 
0645 static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
0646 {
0647     struct tsi108_prv_data *data = netdev_priv(dev);
0648     int frags = skb_shinfo(skb)->nr_frags + 1;
0649     int i;
0650 
0651     if (!data->phy_ok && net_ratelimit())
0652         printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
0653 
0654     if (!data->link_up) {
0655         printk(KERN_ERR "%s: Transmit while link is down!\n",
0656                dev->name);
0657         netif_stop_queue(dev);
0658         return NETDEV_TX_BUSY;
0659     }
0660 
0661     if (data->txfree < MAX_SKB_FRAGS + 1) {
0662         netif_stop_queue(dev);
0663 
0664         if (net_ratelimit())
0665             printk(KERN_ERR "%s: Transmit with full tx ring!\n",
0666                    dev->name);
0667         return NETDEV_TX_BUSY;
0668     }
0669 
0670     if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
0671         netif_stop_queue(dev);
0672     }
0673 
0674     spin_lock_irq(&data->txlock);
0675 
0676     for (i = 0; i < frags; i++) {
0677         int misc = 0;
0678         int tx = data->txhead;
0679 
0680         /* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
0681          * the interrupt bit.  TX descriptor-complete interrupts are
0682          * enabled when the queue fills up, and masked when there is
0683          * still free space.  This way, when saturating the outbound
0684          * link, the tx interrupts are kept to a reasonable level.
0685          * When the queue is not full, reclamation of skbs still occurs
0686          * as new packets are transmitted, or on a queue-empty
0687          * interrupt.
0688          */
0689 
0690         if ((tx % TSI108_TX_INT_FREQ == 0) &&
0691             ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
0692             misc = TSI108_TX_INT;
0693 
0694         data->txskbs[tx] = skb;
0695 
0696         if (i == 0) {
0697             data->txring[tx].buf0 = dma_map_single(&data->pdev->dev,
0698                     skb->data, skb_headlen(skb),
0699                     DMA_TO_DEVICE);
0700             data->txring[tx].len = skb_headlen(skb);
0701             misc |= TSI108_TX_SOF;
0702         } else {
0703             const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
0704 
0705             data->txring[tx].buf0 =
0706                 skb_frag_dma_map(&data->pdev->dev, frag,
0707                         0, skb_frag_size(frag),
0708                         DMA_TO_DEVICE);
0709             data->txring[tx].len = skb_frag_size(frag);
0710         }
0711 
0712         if (i == frags - 1)
0713             misc |= TSI108_TX_EOF;
0714 
0715         if (netif_msg_pktdata(data)) {
0716             int i;
0717             printk("%s: Tx Frame contents (%d)\n", dev->name,
0718                    skb->len);
0719             for (i = 0; i < skb->len; i++)
0720                 printk(" %2.2x", skb->data[i]);
0721             printk(".\n");
0722         }
0723         data->txring[tx].misc = misc | TSI108_TX_OWN;
0724 
0725         data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
0726         data->txfree--;
0727     }
0728 
0729     tsi108_complete_tx(dev);
0730 
0731     /* This must be done after the check for completed tx descriptors,
0732      * so that the tail pointer is correct.
0733      */
0734 
0735     if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
0736         tsi108_restart_tx(data);
0737 
0738     spin_unlock_irq(&data->txlock);
0739     return NETDEV_TX_OK;
0740 }
0741 
0742 static int tsi108_complete_rx(struct net_device *dev, int budget)
0743 {
0744     struct tsi108_prv_data *data = netdev_priv(dev);
0745     int done = 0;
0746 
0747     while (data->rxfree && done != budget) {
0748         int rx = data->rxtail;
0749         struct sk_buff *skb;
0750 
0751         if (data->rxring[rx].misc & TSI108_RX_OWN)
0752             break;
0753 
0754         skb = data->rxskbs[rx];
0755         data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
0756         data->rxfree--;
0757         done++;
0758 
0759         if (data->rxring[rx].misc & TSI108_RX_BAD) {
0760             spin_lock_irq(&data->misclock);
0761 
0762             if (data->rxring[rx].misc & TSI108_RX_CRC)
0763                 data->stats.rx_crc_errors++;
0764             if (data->rxring[rx].misc & TSI108_RX_OVER)
0765                 data->stats.rx_fifo_errors++;
0766 
0767             spin_unlock_irq(&data->misclock);
0768 
0769             dev_kfree_skb_any(skb);
0770             continue;
0771         }
0772         if (netif_msg_pktdata(data)) {
0773             int i;
0774             printk("%s: Rx Frame contents (%d)\n",
0775                    dev->name, data->rxring[rx].len);
0776             for (i = 0; i < data->rxring[rx].len; i++)
0777                 printk(" %2.2x", skb->data[i]);
0778             printk(".\n");
0779         }
0780 
0781         skb_put(skb, data->rxring[rx].len);
0782         skb->protocol = eth_type_trans(skb, dev);
0783         netif_receive_skb(skb);
0784     }
0785 
0786     return done;
0787 }
0788 
0789 static int tsi108_refill_rx(struct net_device *dev, int budget)
0790 {
0791     struct tsi108_prv_data *data = netdev_priv(dev);
0792     int done = 0;
0793 
0794     while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
0795         int rx = data->rxhead;
0796         struct sk_buff *skb;
0797 
0798         skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
0799         data->rxskbs[rx] = skb;
0800         if (!skb)
0801             break;
0802 
0803         data->rxring[rx].buf0 = dma_map_single(&data->pdev->dev,
0804                 skb->data, TSI108_RX_SKB_SIZE,
0805                 DMA_FROM_DEVICE);
0806 
0807         /* Sometimes the hardware sets blen to zero after packet
0808          * reception, even though the manual says that it's only ever
0809          * modified by the driver.
0810          */
0811 
0812         data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
0813         data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
0814 
0815         data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
0816         data->rxfree++;
0817         done++;
0818     }
0819 
0820     if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
0821                TSI108_EC_RXSTAT_QUEUE0))
0822         tsi108_restart_rx(data, dev);
0823 
0824     return done;
0825 }
0826 
0827 static int tsi108_poll(struct napi_struct *napi, int budget)
0828 {
0829     struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi);
0830     struct net_device *dev = data->dev;
0831     u32 estat = TSI_READ(TSI108_EC_RXESTAT);
0832     u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
0833     int num_received = 0, num_filled = 0;
0834 
0835     intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
0836         TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
0837 
0838     TSI_WRITE(TSI108_EC_RXESTAT, estat);
0839     TSI_WRITE(TSI108_EC_INTSTAT, intstat);
0840 
0841     if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
0842         num_received = tsi108_complete_rx(dev, budget);
0843 
0844     /* This should normally fill no more slots than the number of
0845      * packets received in tsi108_complete_rx().  The exception
0846      * is when we previously ran out of memory for RX SKBs.  In that
0847      * case, it's helpful to obey the budget, not only so that the
0848      * CPU isn't hogged, but so that memory (which may still be low)
0849      * is not hogged by one device.
0850      *
0851      * A work unit is considered to be two SKBs to allow us to catch
0852      * up when the ring has shrunk due to out-of-memory but we're
0853      * still removing the full budget's worth of packets each time.
0854      */
0855 
0856     if (data->rxfree < TSI108_RXRING_LEN)
0857         num_filled = tsi108_refill_rx(dev, budget * 2);
0858 
0859     if (intstat & TSI108_INT_RXERROR) {
0860         u32 err = TSI_READ(TSI108_EC_RXERR);
0861         TSI_WRITE(TSI108_EC_RXERR, err);
0862 
0863         if (err) {
0864             if (net_ratelimit())
0865                 printk(KERN_DEBUG "%s: RX error %x\n",
0866                        dev->name, err);
0867 
0868             if (!(TSI_READ(TSI108_EC_RXSTAT) &
0869                   TSI108_EC_RXSTAT_QUEUE0))
0870                 tsi108_restart_rx(data, dev);
0871         }
0872     }
0873 
0874     if (intstat & TSI108_INT_RXOVERRUN) {
0875         spin_lock_irq(&data->misclock);
0876         data->stats.rx_fifo_errors++;
0877         spin_unlock_irq(&data->misclock);
0878     }
0879 
0880     if (num_received < budget) {
0881         data->rxpending = 0;
0882         napi_complete_done(napi, num_received);
0883 
0884         TSI_WRITE(TSI108_EC_INTMASK,
0885                      TSI_READ(TSI108_EC_INTMASK)
0886                      & ~(TSI108_INT_RXQUEUE0
0887                      | TSI108_INT_RXTHRESH |
0888                      TSI108_INT_RXOVERRUN |
0889                      TSI108_INT_RXERROR |
0890                      TSI108_INT_RXWAIT));
0891     } else {
0892         data->rxpending = 1;
0893     }
0894 
0895     return num_received;
0896 }
0897 
0898 static void tsi108_rx_int(struct net_device *dev)
0899 {
0900     struct tsi108_prv_data *data = netdev_priv(dev);
0901 
0902     /* A race could cause dev to already be scheduled, so it's not an
0903      * error if that happens (and interrupts shouldn't be re-masked,
0904      * because that can cause harmful races, if poll has already
0905      * unmasked them but not cleared LINK_STATE_SCHED).
0906      *
0907      * This can happen if this code races with tsi108_poll(), which masks
0908      * the interrupts after tsi108_irq_one() read the mask, but before
0909      * napi_schedule is called.  It could also happen due to calls
0910      * from tsi108_check_rxring().
0911      */
0912 
0913     if (napi_schedule_prep(&data->napi)) {
0914         /* Mask, rather than ack, the receive interrupts.  The ack
0915          * will happen in tsi108_poll().
0916          */
0917 
0918         TSI_WRITE(TSI108_EC_INTMASK,
0919                      TSI_READ(TSI108_EC_INTMASK) |
0920                      TSI108_INT_RXQUEUE0
0921                      | TSI108_INT_RXTHRESH |
0922                      TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
0923                      TSI108_INT_RXWAIT);
0924         __napi_schedule(&data->napi);
0925     } else {
0926         if (!netif_running(dev)) {
0927             /* This can happen if an interrupt occurs while the
0928              * interface is being brought down, as the START
0929              * bit is cleared before the stop function is called.
0930              *
0931              * In this case, the interrupts must be masked, or
0932              * they will continue indefinitely.
0933              *
0934              * There's a race here if the interface is brought down
0935              * and then up in rapid succession, as the device could
0936              * be made running after the above check and before
0937              * the masking below.  This will only happen if the IRQ
0938              * thread has a lower priority than the task brining
0939              * up the interface.  Fixing this race would likely
0940              * require changes in generic code.
0941              */
0942 
0943             TSI_WRITE(TSI108_EC_INTMASK,
0944                          TSI_READ
0945                          (TSI108_EC_INTMASK) |
0946                          TSI108_INT_RXQUEUE0 |
0947                          TSI108_INT_RXTHRESH |
0948                          TSI108_INT_RXOVERRUN |
0949                          TSI108_INT_RXERROR |
0950                          TSI108_INT_RXWAIT);
0951         }
0952     }
0953 }
0954 
0955 /* If the RX ring has run out of memory, try periodically
0956  * to allocate some more, as otherwise poll would never
0957  * get called (apart from the initial end-of-queue condition).
0958  *
0959  * This is called once per second (by default) from the thread.
0960  */
0961 
0962 static void tsi108_check_rxring(struct net_device *dev)
0963 {
0964     struct tsi108_prv_data *data = netdev_priv(dev);
0965 
0966     /* A poll is scheduled, as opposed to caling tsi108_refill_rx
0967      * directly, so as to keep the receive path single-threaded
0968      * (and thus not needing a lock).
0969      */
0970 
0971     if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
0972         tsi108_rx_int(dev);
0973 }
0974 
0975 static void tsi108_tx_int(struct net_device *dev)
0976 {
0977     struct tsi108_prv_data *data = netdev_priv(dev);
0978     u32 estat = TSI_READ(TSI108_EC_TXESTAT);
0979 
0980     TSI_WRITE(TSI108_EC_TXESTAT, estat);
0981     TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
0982                  TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
0983     if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
0984         u32 err = TSI_READ(TSI108_EC_TXERR);
0985         TSI_WRITE(TSI108_EC_TXERR, err);
0986 
0987         if (err && net_ratelimit())
0988             printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
0989     }
0990 
0991     if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
0992         spin_lock(&data->txlock);
0993         tsi108_complete_tx(dev);
0994         spin_unlock(&data->txlock);
0995     }
0996 }
0997 
0998 
0999 static irqreturn_t tsi108_irq(int irq, void *dev_id)
1000 {
1001     struct net_device *dev = dev_id;
1002     struct tsi108_prv_data *data = netdev_priv(dev);
1003     u32 stat = TSI_READ(TSI108_EC_INTSTAT);
1004 
1005     if (!(stat & TSI108_INT_ANY))
1006         return IRQ_NONE;    /* Not our interrupt */
1007 
1008     stat &= ~TSI_READ(TSI108_EC_INTMASK);
1009 
1010     if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
1011             TSI108_INT_TXERROR))
1012         tsi108_tx_int(dev);
1013     if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
1014             TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
1015             TSI108_INT_RXERROR))
1016         tsi108_rx_int(dev);
1017 
1018     if (stat & TSI108_INT_SFN) {
1019         if (net_ratelimit())
1020             printk(KERN_DEBUG "%s: SFN error\n", dev->name);
1021         TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
1022     }
1023 
1024     if (stat & TSI108_INT_STATCARRY) {
1025         tsi108_stat_carry(dev);
1026         TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
1027     }
1028 
1029     return IRQ_HANDLED;
1030 }
1031 
1032 static void tsi108_stop_ethernet(struct net_device *dev)
1033 {
1034     struct tsi108_prv_data *data = netdev_priv(dev);
1035     int i = 1000;
1036     /* Disable all TX and RX queues ... */
1037     TSI_WRITE(TSI108_EC_TXCTRL, 0);
1038     TSI_WRITE(TSI108_EC_RXCTRL, 0);
1039 
1040     /* ...and wait for them to become idle */
1041     while(i--) {
1042         if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
1043             break;
1044         udelay(10);
1045     }
1046     i = 1000;
1047     while(i--){
1048         if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
1049             return;
1050         udelay(10);
1051     }
1052     printk(KERN_ERR "%s function time out\n", __func__);
1053 }
1054 
1055 static void tsi108_reset_ether(struct tsi108_prv_data * data)
1056 {
1057     TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
1058     udelay(100);
1059     TSI_WRITE(TSI108_MAC_CFG1, 0);
1060 
1061     TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
1062     udelay(100);
1063     TSI_WRITE(TSI108_EC_PORTCTRL,
1064                  TSI_READ(TSI108_EC_PORTCTRL) &
1065                  ~TSI108_EC_PORTCTRL_STATRST);
1066 
1067     TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
1068     udelay(100);
1069     TSI_WRITE(TSI108_EC_TXCFG,
1070                  TSI_READ(TSI108_EC_TXCFG) &
1071                  ~TSI108_EC_TXCFG_RST);
1072 
1073     TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
1074     udelay(100);
1075     TSI_WRITE(TSI108_EC_RXCFG,
1076                  TSI_READ(TSI108_EC_RXCFG) &
1077                  ~TSI108_EC_RXCFG_RST);
1078 
1079     TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
1080                  TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
1081                  TSI108_MAC_MII_MGMT_RST);
1082     udelay(100);
1083     TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
1084                  (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
1085                  ~(TSI108_MAC_MII_MGMT_RST |
1086                    TSI108_MAC_MII_MGMT_CLK)) | 0x07);
1087 }
1088 
1089 static int tsi108_get_mac(struct net_device *dev)
1090 {
1091     struct tsi108_prv_data *data = netdev_priv(dev);
1092     u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
1093     u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
1094     u8 addr[ETH_ALEN];
1095 
1096     /* Note that the octets are reversed from what the manual says,
1097      * producing an even weirder ordering...
1098      */
1099     if (word2 == 0 && word1 == 0) {
1100         addr[0] = 0x00;
1101         addr[1] = 0x06;
1102         addr[2] = 0xd2;
1103         addr[3] = 0x00;
1104         addr[4] = 0x00;
1105         if (0x8 == data->phy)
1106             addr[5] = 0x01;
1107         else
1108             addr[5] = 0x02;
1109         eth_hw_addr_set(dev, addr);
1110 
1111         word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
1112 
1113         word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
1114             (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
1115 
1116         TSI_WRITE(TSI108_MAC_ADDR1, word1);
1117         TSI_WRITE(TSI108_MAC_ADDR2, word2);
1118     } else {
1119         addr[0] = (word2 >> 16) & 0xff;
1120         addr[1] = (word2 >> 24) & 0xff;
1121         addr[2] = (word1 >> 0) & 0xff;
1122         addr[3] = (word1 >> 8) & 0xff;
1123         addr[4] = (word1 >> 16) & 0xff;
1124         addr[5] = (word1 >> 24) & 0xff;
1125         eth_hw_addr_set(dev, addr);
1126     }
1127 
1128     if (!is_valid_ether_addr(dev->dev_addr)) {
1129         printk(KERN_ERR
1130                "%s: Invalid MAC address. word1: %08x, word2: %08x\n",
1131                dev->name, word1, word2);
1132         return -EINVAL;
1133     }
1134 
1135     return 0;
1136 }
1137 
1138 static int tsi108_set_mac(struct net_device *dev, void *addr)
1139 {
1140     struct tsi108_prv_data *data = netdev_priv(dev);
1141     u32 word1, word2;
1142 
1143     if (!is_valid_ether_addr(addr))
1144         return -EADDRNOTAVAIL;
1145 
1146     /* +2 is for the offset of the HW addr type */
1147     eth_hw_addr_set(dev, ((unsigned char *)addr) + 2);
1148 
1149     word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
1150 
1151     word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
1152         (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
1153 
1154     spin_lock_irq(&data->misclock);
1155     TSI_WRITE(TSI108_MAC_ADDR1, word1);
1156     TSI_WRITE(TSI108_MAC_ADDR2, word2);
1157     spin_lock(&data->txlock);
1158 
1159     if (data->txfree && data->link_up)
1160         netif_wake_queue(dev);
1161 
1162     spin_unlock(&data->txlock);
1163     spin_unlock_irq(&data->misclock);
1164     return 0;
1165 }
1166 
1167 /* Protected by dev->xmit_lock. */
1168 static void tsi108_set_rx_mode(struct net_device *dev)
1169 {
1170     struct tsi108_prv_data *data = netdev_priv(dev);
1171     u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
1172 
1173     if (dev->flags & IFF_PROMISC) {
1174         rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
1175         rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
1176         goto out;
1177     }
1178 
1179     rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
1180 
1181     if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
1182         int i;
1183         struct netdev_hw_addr *ha;
1184         rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
1185 
1186         memset(data->mc_hash, 0, sizeof(data->mc_hash));
1187 
1188         netdev_for_each_mc_addr(ha, dev) {
1189             u32 hash, crc;
1190 
1191             crc = ether_crc(6, ha->addr);
1192             hash = crc >> 23;
1193             __set_bit(hash, &data->mc_hash[0]);
1194         }
1195 
1196         TSI_WRITE(TSI108_EC_HASHADDR,
1197                      TSI108_EC_HASHADDR_AUTOINC |
1198                      TSI108_EC_HASHADDR_MCAST);
1199 
1200         for (i = 0; i < 16; i++) {
1201             /* The manual says that the hardware may drop
1202              * back-to-back writes to the data register.
1203              */
1204             udelay(1);
1205             TSI_WRITE(TSI108_EC_HASHDATA,
1206                          data->mc_hash[i]);
1207         }
1208     }
1209 
1210       out:
1211     TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
1212 }
1213 
1214 static void tsi108_init_phy(struct net_device *dev)
1215 {
1216     struct tsi108_prv_data *data = netdev_priv(dev);
1217     u32 i = 0;
1218     u16 phyval = 0;
1219     unsigned long flags;
1220 
1221     spin_lock_irqsave(&phy_lock, flags);
1222 
1223     tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
1224     while (--i) {
1225         if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
1226             break;
1227         udelay(10);
1228     }
1229     if (i == 0)
1230         printk(KERN_ERR "%s function time out\n", __func__);
1231 
1232     if (data->phy_type == TSI108_PHY_BCM54XX) {
1233         tsi108_write_mii(data, 0x09, 0x0300);
1234         tsi108_write_mii(data, 0x10, 0x1020);
1235         tsi108_write_mii(data, 0x1c, 0x8c00);
1236     }
1237 
1238     tsi108_write_mii(data,
1239              MII_BMCR,
1240              BMCR_ANENABLE | BMCR_ANRESTART);
1241     while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
1242         cpu_relax();
1243 
1244     /* Set G/MII mode and receive clock select in TBI control #2.  The
1245      * second port won't work if this isn't done, even though we don't
1246      * use TBI mode.
1247      */
1248 
1249     tsi108_write_tbi(data, 0x11, 0x30);
1250 
1251     /* FIXME: It seems to take more than 2 back-to-back reads to the
1252      * PHY_STAT register before the link up status bit is set.
1253      */
1254 
1255     data->link_up = 0;
1256 
1257     while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
1258          BMSR_LSTATUS)) {
1259         if (i++ > (MII_READ_DELAY / 10)) {
1260             break;
1261         }
1262         spin_unlock_irqrestore(&phy_lock, flags);
1263         msleep(10);
1264         spin_lock_irqsave(&phy_lock, flags);
1265     }
1266 
1267     data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
1268     printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
1269     data->phy_ok = 1;
1270     data->init_media = 1;
1271     spin_unlock_irqrestore(&phy_lock, flags);
1272 }
1273 
1274 static void tsi108_kill_phy(struct net_device *dev)
1275 {
1276     struct tsi108_prv_data *data = netdev_priv(dev);
1277     unsigned long flags;
1278 
1279     spin_lock_irqsave(&phy_lock, flags);
1280     tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
1281     data->phy_ok = 0;
1282     spin_unlock_irqrestore(&phy_lock, flags);
1283 }
1284 
1285 static int tsi108_open(struct net_device *dev)
1286 {
1287     int i;
1288     struct tsi108_prv_data *data = netdev_priv(dev);
1289     unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
1290     unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
1291 
1292     i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
1293     if (i != 0) {
1294         printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
1295                data->id, data->irq_num);
1296         return i;
1297     } else {
1298         dev->irq = data->irq_num;
1299         printk(KERN_NOTICE
1300                "tsi108_open : Port %d Assigned IRQ %d to %s\n",
1301                data->id, dev->irq, dev->name);
1302     }
1303 
1304     data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
1305                       &data->rxdma, GFP_KERNEL);
1306     if (!data->rxring)
1307         return -ENOMEM;
1308 
1309     data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
1310                       &data->txdma, GFP_KERNEL);
1311     if (!data->txring) {
1312         dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
1313                     data->rxdma);
1314         return -ENOMEM;
1315     }
1316 
1317     for (i = 0; i < TSI108_RXRING_LEN; i++) {
1318         data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
1319         data->rxring[i].blen = TSI108_RXBUF_SIZE;
1320         data->rxring[i].vlan = 0;
1321     }
1322 
1323     data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
1324 
1325     data->rxtail = 0;
1326     data->rxhead = 0;
1327 
1328     for (i = 0; i < TSI108_RXRING_LEN; i++) {
1329         struct sk_buff *skb;
1330 
1331         skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
1332         if (!skb) {
1333             /* Bah.  No memory for now, but maybe we'll get
1334              * some more later.
1335              * For now, we'll live with the smaller ring.
1336              */
1337             printk(KERN_WARNING
1338                    "%s: Could only allocate %d receive skb(s).\n",
1339                    dev->name, i);
1340             data->rxhead = i;
1341             break;
1342         }
1343 
1344         data->rxskbs[i] = skb;
1345         data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1346         data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
1347     }
1348 
1349     data->rxfree = i;
1350     TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
1351 
1352     for (i = 0; i < TSI108_TXRING_LEN; i++) {
1353         data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
1354         data->txring[i].misc = 0;
1355     }
1356 
1357     data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
1358     data->txtail = 0;
1359     data->txhead = 0;
1360     data->txfree = TSI108_TXRING_LEN;
1361     TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
1362     tsi108_init_phy(dev);
1363 
1364     napi_enable(&data->napi);
1365 
1366     timer_setup(&data->timer, tsi108_timed_checker, 0);
1367     mod_timer(&data->timer, jiffies + 1);
1368 
1369     tsi108_restart_rx(data, dev);
1370 
1371     TSI_WRITE(TSI108_EC_INTSTAT, ~0);
1372 
1373     TSI_WRITE(TSI108_EC_INTMASK,
1374                  ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
1375                    TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
1376                    TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
1377                    TSI108_INT_SFN | TSI108_INT_STATCARRY));
1378 
1379     TSI_WRITE(TSI108_MAC_CFG1,
1380                  TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
1381     netif_start_queue(dev);
1382     return 0;
1383 }
1384 
1385 static int tsi108_close(struct net_device *dev)
1386 {
1387     struct tsi108_prv_data *data = netdev_priv(dev);
1388 
1389     netif_stop_queue(dev);
1390     napi_disable(&data->napi);
1391 
1392     del_timer_sync(&data->timer);
1393 
1394     tsi108_stop_ethernet(dev);
1395     tsi108_kill_phy(dev);
1396     TSI_WRITE(TSI108_EC_INTMASK, ~0);
1397     TSI_WRITE(TSI108_MAC_CFG1, 0);
1398 
1399     /* Check for any pending TX packets, and drop them. */
1400 
1401     while (!data->txfree || data->txhead != data->txtail) {
1402         int tx = data->txtail;
1403         struct sk_buff *skb;
1404         skb = data->txskbs[tx];
1405         data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
1406         data->txfree++;
1407         dev_kfree_skb(skb);
1408     }
1409 
1410     free_irq(data->irq_num, dev);
1411 
1412     /* Discard the RX ring. */
1413 
1414     while (data->rxfree) {
1415         int rx = data->rxtail;
1416         struct sk_buff *skb;
1417 
1418         skb = data->rxskbs[rx];
1419         data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
1420         data->rxfree--;
1421         dev_kfree_skb(skb);
1422     }
1423 
1424     dma_free_coherent(&data->pdev->dev,
1425                 TSI108_RXRING_LEN * sizeof(rx_desc),
1426                 data->rxring, data->rxdma);
1427     dma_free_coherent(&data->pdev->dev,
1428                 TSI108_TXRING_LEN * sizeof(tx_desc),
1429                 data->txring, data->txdma);
1430 
1431     return 0;
1432 }
1433 
1434 static void tsi108_init_mac(struct net_device *dev)
1435 {
1436     struct tsi108_prv_data *data = netdev_priv(dev);
1437 
1438     TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
1439                  TSI108_MAC_CFG2_PADCRC);
1440 
1441     TSI_WRITE(TSI108_EC_TXTHRESH,
1442                  (192 << TSI108_EC_TXTHRESH_STARTFILL) |
1443                  (192 << TSI108_EC_TXTHRESH_STOPFILL));
1444 
1445     TSI_WRITE(TSI108_STAT_CARRYMASK1,
1446                  ~(TSI108_STAT_CARRY1_RXBYTES |
1447                    TSI108_STAT_CARRY1_RXPKTS |
1448                    TSI108_STAT_CARRY1_RXFCS |
1449                    TSI108_STAT_CARRY1_RXMCAST |
1450                    TSI108_STAT_CARRY1_RXALIGN |
1451                    TSI108_STAT_CARRY1_RXLENGTH |
1452                    TSI108_STAT_CARRY1_RXRUNT |
1453                    TSI108_STAT_CARRY1_RXJUMBO |
1454                    TSI108_STAT_CARRY1_RXFRAG |
1455                    TSI108_STAT_CARRY1_RXJABBER |
1456                    TSI108_STAT_CARRY1_RXDROP));
1457 
1458     TSI_WRITE(TSI108_STAT_CARRYMASK2,
1459                  ~(TSI108_STAT_CARRY2_TXBYTES |
1460                    TSI108_STAT_CARRY2_TXPKTS |
1461                    TSI108_STAT_CARRY2_TXEXDEF |
1462                    TSI108_STAT_CARRY2_TXEXCOL |
1463                    TSI108_STAT_CARRY2_TXTCOL |
1464                    TSI108_STAT_CARRY2_TXPAUSE));
1465 
1466     TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
1467     TSI_WRITE(TSI108_MAC_CFG1, 0);
1468 
1469     TSI_WRITE(TSI108_EC_RXCFG,
1470                  TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
1471 
1472     TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
1473                  TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
1474                  TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
1475                         TSI108_EC_TXQ_CFG_SFNPORT));
1476 
1477     TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
1478                  TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
1479                  TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
1480                         TSI108_EC_RXQ_CFG_SFNPORT));
1481 
1482     TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
1483                  TSI108_EC_TXQ_BUFCFG_BURST256 |
1484                  TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
1485                         TSI108_EC_TXQ_BUFCFG_SFNPORT));
1486 
1487     TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
1488                  TSI108_EC_RXQ_BUFCFG_BURST256 |
1489                  TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
1490                         TSI108_EC_RXQ_BUFCFG_SFNPORT));
1491 
1492     TSI_WRITE(TSI108_EC_INTMASK, ~0);
1493 }
1494 
1495 static int tsi108_get_link_ksettings(struct net_device *dev,
1496                      struct ethtool_link_ksettings *cmd)
1497 {
1498     struct tsi108_prv_data *data = netdev_priv(dev);
1499     unsigned long flags;
1500 
1501     spin_lock_irqsave(&data->txlock, flags);
1502     mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
1503     spin_unlock_irqrestore(&data->txlock, flags);
1504 
1505     return 0;
1506 }
1507 
1508 static int tsi108_set_link_ksettings(struct net_device *dev,
1509                      const struct ethtool_link_ksettings *cmd)
1510 {
1511     struct tsi108_prv_data *data = netdev_priv(dev);
1512     unsigned long flags;
1513     int rc;
1514 
1515     spin_lock_irqsave(&data->txlock, flags);
1516     rc = mii_ethtool_set_link_ksettings(&data->mii_if, cmd);
1517     spin_unlock_irqrestore(&data->txlock, flags);
1518 
1519     return rc;
1520 }
1521 
1522 static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1523 {
1524     struct tsi108_prv_data *data = netdev_priv(dev);
1525     if (!netif_running(dev))
1526         return -EINVAL;
1527     return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
1528 }
1529 
1530 static const struct ethtool_ops tsi108_ethtool_ops = {
1531     .get_link   = ethtool_op_get_link,
1532     .get_link_ksettings = tsi108_get_link_ksettings,
1533     .set_link_ksettings = tsi108_set_link_ksettings,
1534 };
1535 
1536 static const struct net_device_ops tsi108_netdev_ops = {
1537     .ndo_open       = tsi108_open,
1538     .ndo_stop       = tsi108_close,
1539     .ndo_start_xmit     = tsi108_send_packet,
1540     .ndo_set_rx_mode    = tsi108_set_rx_mode,
1541     .ndo_get_stats      = tsi108_get_stats,
1542     .ndo_eth_ioctl      = tsi108_do_ioctl,
1543     .ndo_set_mac_address    = tsi108_set_mac,
1544     .ndo_validate_addr  = eth_validate_addr,
1545 };
1546 
1547 static int
1548 tsi108_init_one(struct platform_device *pdev)
1549 {
1550     struct net_device *dev = NULL;
1551     struct tsi108_prv_data *data = NULL;
1552     hw_info *einfo;
1553     int err = 0;
1554 
1555     einfo = dev_get_platdata(&pdev->dev);
1556 
1557     if (NULL == einfo) {
1558         printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
1559                pdev->id);
1560         return -ENODEV;
1561     }
1562 
1563     /* Create an ethernet device instance */
1564 
1565     dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
1566     if (!dev)
1567         return -ENOMEM;
1568 
1569     printk("tsi108_eth%d: probe...\n", pdev->id);
1570     data = netdev_priv(dev);
1571     data->dev = dev;
1572     data->pdev = pdev;
1573 
1574     pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
1575             pdev->id, einfo->regs, einfo->phyregs,
1576             einfo->phy, einfo->irq_num);
1577 
1578     data->regs = ioremap(einfo->regs, 0x400);
1579     if (NULL == data->regs) {
1580         err = -ENOMEM;
1581         goto regs_fail;
1582     }
1583 
1584     data->phyregs = ioremap(einfo->phyregs, 0x400);
1585     if (NULL == data->phyregs) {
1586         err = -ENOMEM;
1587         goto phyregs_fail;
1588     }
1589 /* MII setup */
1590     data->mii_if.dev = dev;
1591     data->mii_if.mdio_read = tsi108_mdio_read;
1592     data->mii_if.mdio_write = tsi108_mdio_write;
1593     data->mii_if.phy_id = einfo->phy;
1594     data->mii_if.phy_id_mask = 0x1f;
1595     data->mii_if.reg_num_mask = 0x1f;
1596 
1597     data->phy = einfo->phy;
1598     data->phy_type = einfo->phy_type;
1599     data->irq_num = einfo->irq_num;
1600     data->id = pdev->id;
1601     netif_napi_add(dev, &data->napi, tsi108_poll, 64);
1602     dev->netdev_ops = &tsi108_netdev_ops;
1603     dev->ethtool_ops = &tsi108_ethtool_ops;
1604 
1605     /* Apparently, the Linux networking code won't use scatter-gather
1606      * if the hardware doesn't do checksums.  However, it's faster
1607      * to checksum in place and use SG, as (among other reasons)
1608      * the cache won't be dirtied (which then has to be flushed
1609      * before DMA).  The checksumming is done by the driver (via
1610      * a new function skb_csum_dev() in net/core/skbuff.c).
1611      */
1612 
1613     dev->features = NETIF_F_HIGHDMA;
1614 
1615     spin_lock_init(&data->txlock);
1616     spin_lock_init(&data->misclock);
1617 
1618     tsi108_reset_ether(data);
1619     tsi108_kill_phy(dev);
1620 
1621     if ((err = tsi108_get_mac(dev)) != 0) {
1622         printk(KERN_ERR "%s: Invalid MAC address.  Please correct.\n",
1623                dev->name);
1624         goto register_fail;
1625     }
1626 
1627     tsi108_init_mac(dev);
1628     err = register_netdev(dev);
1629     if (err) {
1630         printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
1631                 dev->name);
1632         goto register_fail;
1633     }
1634 
1635     platform_set_drvdata(pdev, dev);
1636     printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %pM\n",
1637            dev->name, dev->dev_addr);
1638 #ifdef DEBUG
1639     data->msg_enable = DEBUG;
1640     dump_eth_one(dev);
1641 #endif
1642 
1643     return 0;
1644 
1645 register_fail:
1646     iounmap(data->phyregs);
1647 
1648 phyregs_fail:
1649     iounmap(data->regs);
1650 
1651 regs_fail:
1652     free_netdev(dev);
1653     return err;
1654 }
1655 
1656 /* There's no way to either get interrupts from the PHY when
1657  * something changes, or to have the Tsi108 automatically communicate
1658  * with the PHY to reconfigure itself.
1659  *
1660  * Thus, we have to do it using a timer.
1661  */
1662 
1663 static void tsi108_timed_checker(struct timer_list *t)
1664 {
1665     struct tsi108_prv_data *data = from_timer(data, t, timer);
1666     struct net_device *dev = data->dev;
1667 
1668     tsi108_check_phy(dev);
1669     tsi108_check_rxring(dev);
1670     mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
1671 }
1672 
1673 static int tsi108_ether_remove(struct platform_device *pdev)
1674 {
1675     struct net_device *dev = platform_get_drvdata(pdev);
1676     struct tsi108_prv_data *priv = netdev_priv(dev);
1677 
1678     unregister_netdev(dev);
1679     tsi108_stop_ethernet(dev);
1680     iounmap(priv->regs);
1681     iounmap(priv->phyregs);
1682     free_netdev(dev);
1683 
1684     return 0;
1685 }
1686 module_platform_driver(tsi_eth_driver);
1687 
1688 MODULE_AUTHOR("Tundra Semiconductor Corporation");
1689 MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
1690 MODULE_LICENSE("GPL");
1691 MODULE_ALIAS("platform:tsi-ethernet");