Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
0004  *
0005  * Copyright 2008 JMicron Technology Corporation
0006  * https://www.jmicron.com/
0007  * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
0008  *
0009  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
0010  */
0011 
0012 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0013 
0014 #include <linux/module.h>
0015 #include <linux/kernel.h>
0016 #include <linux/pci.h>
0017 #include <linux/netdevice.h>
0018 #include <linux/etherdevice.h>
0019 #include <linux/ethtool.h>
0020 #include <linux/mii.h>
0021 #include <linux/crc32.h>
0022 #include <linux/delay.h>
0023 #include <linux/spinlock.h>
0024 #include <linux/in.h>
0025 #include <linux/ip.h>
0026 #include <linux/ipv6.h>
0027 #include <linux/tcp.h>
0028 #include <linux/udp.h>
0029 #include <linux/if_vlan.h>
0030 #include <linux/slab.h>
0031 #include <linux/jiffies.h>
0032 #include <net/ip6_checksum.h>
0033 #include "jme.h"
0034 
0035 static int force_pseudohp = -1;
0036 static int no_pseudohp = -1;
0037 static int no_extplug = -1;
0038 module_param(force_pseudohp, int, 0);
0039 MODULE_PARM_DESC(force_pseudohp,
0040     "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
0041 module_param(no_pseudohp, int, 0);
0042 MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
0043 module_param(no_extplug, int, 0);
0044 MODULE_PARM_DESC(no_extplug,
0045     "Do not use external plug signal for pseudo hot-plug.");
0046 
0047 static int
0048 jme_mdio_read(struct net_device *netdev, int phy, int reg)
0049 {
0050     struct jme_adapter *jme = netdev_priv(netdev);
0051     int i, val, again = (reg == MII_BMSR) ? 1 : 0;
0052 
0053 read_again:
0054     jwrite32(jme, JME_SMI, SMI_OP_REQ |
0055                 smi_phy_addr(phy) |
0056                 smi_reg_addr(reg));
0057 
0058     wmb();
0059     for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
0060         udelay(20);
0061         val = jread32(jme, JME_SMI);
0062         if ((val & SMI_OP_REQ) == 0)
0063             break;
0064     }
0065 
0066     if (i == 0) {
0067         pr_err("phy(%d) read timeout : %d\n", phy, reg);
0068         return 0;
0069     }
0070 
0071     if (again--)
0072         goto read_again;
0073 
0074     return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
0075 }
0076 
0077 static void
0078 jme_mdio_write(struct net_device *netdev,
0079                 int phy, int reg, int val)
0080 {
0081     struct jme_adapter *jme = netdev_priv(netdev);
0082     int i;
0083 
0084     jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
0085         ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
0086         smi_phy_addr(phy) | smi_reg_addr(reg));
0087 
0088     wmb();
0089     for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
0090         udelay(20);
0091         if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
0092             break;
0093     }
0094 
0095     if (i == 0)
0096         pr_err("phy(%d) write timeout : %d\n", phy, reg);
0097 }
0098 
0099 static inline void
0100 jme_reset_phy_processor(struct jme_adapter *jme)
0101 {
0102     u32 val;
0103 
0104     jme_mdio_write(jme->dev,
0105             jme->mii_if.phy_id,
0106             MII_ADVERTISE, ADVERTISE_ALL |
0107             ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
0108 
0109     if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
0110         jme_mdio_write(jme->dev,
0111                 jme->mii_if.phy_id,
0112                 MII_CTRL1000,
0113                 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
0114 
0115     val = jme_mdio_read(jme->dev,
0116                 jme->mii_if.phy_id,
0117                 MII_BMCR);
0118 
0119     jme_mdio_write(jme->dev,
0120             jme->mii_if.phy_id,
0121             MII_BMCR, val | BMCR_RESET);
0122 }
0123 
0124 static void
0125 jme_setup_wakeup_frame(struct jme_adapter *jme,
0126                const u32 *mask, u32 crc, int fnr)
0127 {
0128     int i;
0129 
0130     /*
0131      * Setup CRC pattern
0132      */
0133     jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
0134     wmb();
0135     jwrite32(jme, JME_WFODP, crc);
0136     wmb();
0137 
0138     /*
0139      * Setup Mask
0140      */
0141     for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
0142         jwrite32(jme, JME_WFOI,
0143                 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
0144                 (fnr & WFOI_FRAME_SEL));
0145         wmb();
0146         jwrite32(jme, JME_WFODP, mask[i]);
0147         wmb();
0148     }
0149 }
0150 
0151 static inline void
0152 jme_mac_rxclk_off(struct jme_adapter *jme)
0153 {
0154     jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
0155     jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
0156 }
0157 
0158 static inline void
0159 jme_mac_rxclk_on(struct jme_adapter *jme)
0160 {
0161     jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
0162     jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
0163 }
0164 
0165 static inline void
0166 jme_mac_txclk_off(struct jme_adapter *jme)
0167 {
0168     jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
0169     jwrite32f(jme, JME_GHC, jme->reg_ghc);
0170 }
0171 
0172 static inline void
0173 jme_mac_txclk_on(struct jme_adapter *jme)
0174 {
0175     u32 speed = jme->reg_ghc & GHC_SPEED;
0176     if (speed == GHC_SPEED_1000M)
0177         jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
0178     else
0179         jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
0180     jwrite32f(jme, JME_GHC, jme->reg_ghc);
0181 }
0182 
0183 static inline void
0184 jme_reset_ghc_speed(struct jme_adapter *jme)
0185 {
0186     jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
0187     jwrite32f(jme, JME_GHC, jme->reg_ghc);
0188 }
0189 
0190 static inline void
0191 jme_reset_250A2_workaround(struct jme_adapter *jme)
0192 {
0193     jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
0194                  GPREG1_RSSPATCH);
0195     jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
0196 }
0197 
0198 static inline void
0199 jme_assert_ghc_reset(struct jme_adapter *jme)
0200 {
0201     jme->reg_ghc |= GHC_SWRST;
0202     jwrite32f(jme, JME_GHC, jme->reg_ghc);
0203 }
0204 
0205 static inline void
0206 jme_clear_ghc_reset(struct jme_adapter *jme)
0207 {
0208     jme->reg_ghc &= ~GHC_SWRST;
0209     jwrite32f(jme, JME_GHC, jme->reg_ghc);
0210 }
0211 
0212 static void
0213 jme_reset_mac_processor(struct jme_adapter *jme)
0214 {
0215     static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
0216     u32 crc = 0xCDCDCDCD;
0217     u32 gpreg0;
0218     int i;
0219 
0220     jme_reset_ghc_speed(jme);
0221     jme_reset_250A2_workaround(jme);
0222 
0223     jme_mac_rxclk_on(jme);
0224     jme_mac_txclk_on(jme);
0225     udelay(1);
0226     jme_assert_ghc_reset(jme);
0227     udelay(1);
0228     jme_mac_rxclk_off(jme);
0229     jme_mac_txclk_off(jme);
0230     udelay(1);
0231     jme_clear_ghc_reset(jme);
0232     udelay(1);
0233     jme_mac_rxclk_on(jme);
0234     jme_mac_txclk_on(jme);
0235     udelay(1);
0236     jme_mac_rxclk_off(jme);
0237     jme_mac_txclk_off(jme);
0238 
0239     jwrite32(jme, JME_RXDBA_LO, 0x00000000);
0240     jwrite32(jme, JME_RXDBA_HI, 0x00000000);
0241     jwrite32(jme, JME_RXQDC, 0x00000000);
0242     jwrite32(jme, JME_RXNDA, 0x00000000);
0243     jwrite32(jme, JME_TXDBA_LO, 0x00000000);
0244     jwrite32(jme, JME_TXDBA_HI, 0x00000000);
0245     jwrite32(jme, JME_TXQDC, 0x00000000);
0246     jwrite32(jme, JME_TXNDA, 0x00000000);
0247 
0248     jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
0249     jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
0250     for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
0251         jme_setup_wakeup_frame(jme, mask, crc, i);
0252     if (jme->fpgaver)
0253         gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
0254     else
0255         gpreg0 = GPREG0_DEFAULT;
0256     jwrite32(jme, JME_GPREG0, gpreg0);
0257 }
0258 
0259 static inline void
0260 jme_clear_pm_enable_wol(struct jme_adapter *jme)
0261 {
0262     jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
0263 }
0264 
0265 static inline void
0266 jme_clear_pm_disable_wol(struct jme_adapter *jme)
0267 {
0268     jwrite32(jme, JME_PMCS, PMCS_STMASK);
0269 }
0270 
0271 static int
0272 jme_reload_eeprom(struct jme_adapter *jme)
0273 {
0274     u32 val;
0275     int i;
0276 
0277     val = jread32(jme, JME_SMBCSR);
0278 
0279     if (val & SMBCSR_EEPROMD) {
0280         val |= SMBCSR_CNACK;
0281         jwrite32(jme, JME_SMBCSR, val);
0282         val |= SMBCSR_RELOAD;
0283         jwrite32(jme, JME_SMBCSR, val);
0284         mdelay(12);
0285 
0286         for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
0287             mdelay(1);
0288             if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
0289                 break;
0290         }
0291 
0292         if (i == 0) {
0293             pr_err("eeprom reload timeout\n");
0294             return -EIO;
0295         }
0296     }
0297 
0298     return 0;
0299 }
0300 
0301 static void
0302 jme_load_macaddr(struct net_device *netdev)
0303 {
0304     struct jme_adapter *jme = netdev_priv(netdev);
0305     unsigned char macaddr[ETH_ALEN];
0306     u32 val;
0307 
0308     spin_lock_bh(&jme->macaddr_lock);
0309     val = jread32(jme, JME_RXUMA_LO);
0310     macaddr[0] = (val >>  0) & 0xFF;
0311     macaddr[1] = (val >>  8) & 0xFF;
0312     macaddr[2] = (val >> 16) & 0xFF;
0313     macaddr[3] = (val >> 24) & 0xFF;
0314     val = jread32(jme, JME_RXUMA_HI);
0315     macaddr[4] = (val >>  0) & 0xFF;
0316     macaddr[5] = (val >>  8) & 0xFF;
0317     eth_hw_addr_set(netdev, macaddr);
0318     spin_unlock_bh(&jme->macaddr_lock);
0319 }
0320 
0321 static inline void
0322 jme_set_rx_pcc(struct jme_adapter *jme, int p)
0323 {
0324     switch (p) {
0325     case PCC_OFF:
0326         jwrite32(jme, JME_PCCRX0,
0327             ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
0328             ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
0329         break;
0330     case PCC_P1:
0331         jwrite32(jme, JME_PCCRX0,
0332             ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
0333             ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
0334         break;
0335     case PCC_P2:
0336         jwrite32(jme, JME_PCCRX0,
0337             ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
0338             ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
0339         break;
0340     case PCC_P3:
0341         jwrite32(jme, JME_PCCRX0,
0342             ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
0343             ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
0344         break;
0345     default:
0346         break;
0347     }
0348     wmb();
0349 
0350     if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
0351         netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
0352 }
0353 
0354 static void
0355 jme_start_irq(struct jme_adapter *jme)
0356 {
0357     register struct dynpcc_info *dpi = &(jme->dpi);
0358 
0359     jme_set_rx_pcc(jme, PCC_P1);
0360     dpi->cur        = PCC_P1;
0361     dpi->attempt        = PCC_P1;
0362     dpi->cnt        = 0;
0363 
0364     jwrite32(jme, JME_PCCTX,
0365             ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
0366             ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
0367             PCCTXQ0_EN
0368         );
0369 
0370     /*
0371      * Enable Interrupts
0372      */
0373     jwrite32(jme, JME_IENS, INTR_ENABLE);
0374 }
0375 
0376 static inline void
0377 jme_stop_irq(struct jme_adapter *jme)
0378 {
0379     /*
0380      * Disable Interrupts
0381      */
0382     jwrite32f(jme, JME_IENC, INTR_ENABLE);
0383 }
0384 
0385 static u32
0386 jme_linkstat_from_phy(struct jme_adapter *jme)
0387 {
0388     u32 phylink, bmsr;
0389 
0390     phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
0391     bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
0392     if (bmsr & BMSR_ANCOMP)
0393         phylink |= PHY_LINK_AUTONEG_COMPLETE;
0394 
0395     return phylink;
0396 }
0397 
0398 static inline void
0399 jme_set_phyfifo_5level(struct jme_adapter *jme)
0400 {
0401     jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
0402 }
0403 
0404 static inline void
0405 jme_set_phyfifo_8level(struct jme_adapter *jme)
0406 {
0407     jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
0408 }
0409 
0410 static int
0411 jme_check_link(struct net_device *netdev, int testonly)
0412 {
0413     struct jme_adapter *jme = netdev_priv(netdev);
0414     u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
0415     char linkmsg[64];
0416     int rc = 0;
0417 
0418     linkmsg[0] = '\0';
0419 
0420     if (jme->fpgaver)
0421         phylink = jme_linkstat_from_phy(jme);
0422     else
0423         phylink = jread32(jme, JME_PHY_LINK);
0424 
0425     if (phylink & PHY_LINK_UP) {
0426         if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
0427             /*
0428              * If we did not enable AN
0429              * Speed/Duplex Info should be obtained from SMI
0430              */
0431             phylink = PHY_LINK_UP;
0432 
0433             bmcr = jme_mdio_read(jme->dev,
0434                         jme->mii_if.phy_id,
0435                         MII_BMCR);
0436 
0437             phylink |= ((bmcr & BMCR_SPEED1000) &&
0438                     (bmcr & BMCR_SPEED100) == 0) ?
0439                     PHY_LINK_SPEED_1000M :
0440                     (bmcr & BMCR_SPEED100) ?
0441                     PHY_LINK_SPEED_100M :
0442                     PHY_LINK_SPEED_10M;
0443 
0444             phylink |= (bmcr & BMCR_FULLDPLX) ?
0445                      PHY_LINK_DUPLEX : 0;
0446 
0447             strcat(linkmsg, "Forced: ");
0448         } else {
0449             /*
0450              * Keep polling for speed/duplex resolve complete
0451              */
0452             while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
0453                 --cnt) {
0454 
0455                 udelay(1);
0456 
0457                 if (jme->fpgaver)
0458                     phylink = jme_linkstat_from_phy(jme);
0459                 else
0460                     phylink = jread32(jme, JME_PHY_LINK);
0461             }
0462             if (!cnt)
0463                 pr_err("Waiting speed resolve timeout\n");
0464 
0465             strcat(linkmsg, "ANed: ");
0466         }
0467 
0468         if (jme->phylink == phylink) {
0469             rc = 1;
0470             goto out;
0471         }
0472         if (testonly)
0473             goto out;
0474 
0475         jme->phylink = phylink;
0476 
0477         /*
0478          * The speed/duplex setting of jme->reg_ghc already cleared
0479          * by jme_reset_mac_processor()
0480          */
0481         switch (phylink & PHY_LINK_SPEED_MASK) {
0482         case PHY_LINK_SPEED_10M:
0483             jme->reg_ghc |= GHC_SPEED_10M;
0484             strcat(linkmsg, "10 Mbps, ");
0485             break;
0486         case PHY_LINK_SPEED_100M:
0487             jme->reg_ghc |= GHC_SPEED_100M;
0488             strcat(linkmsg, "100 Mbps, ");
0489             break;
0490         case PHY_LINK_SPEED_1000M:
0491             jme->reg_ghc |= GHC_SPEED_1000M;
0492             strcat(linkmsg, "1000 Mbps, ");
0493             break;
0494         default:
0495             break;
0496         }
0497 
0498         if (phylink & PHY_LINK_DUPLEX) {
0499             jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
0500             jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
0501             jme->reg_ghc |= GHC_DPX;
0502         } else {
0503             jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
0504                         TXMCS_BACKOFF |
0505                         TXMCS_CARRIERSENSE |
0506                         TXMCS_COLLISION);
0507             jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
0508         }
0509 
0510         jwrite32(jme, JME_GHC, jme->reg_ghc);
0511 
0512         if (is_buggy250(jme->pdev->device, jme->chiprev)) {
0513             jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
0514                          GPREG1_RSSPATCH);
0515             if (!(phylink & PHY_LINK_DUPLEX))
0516                 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
0517             switch (phylink & PHY_LINK_SPEED_MASK) {
0518             case PHY_LINK_SPEED_10M:
0519                 jme_set_phyfifo_8level(jme);
0520                 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
0521                 break;
0522             case PHY_LINK_SPEED_100M:
0523                 jme_set_phyfifo_5level(jme);
0524                 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
0525                 break;
0526             case PHY_LINK_SPEED_1000M:
0527                 jme_set_phyfifo_8level(jme);
0528                 break;
0529             default:
0530                 break;
0531             }
0532         }
0533         jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
0534 
0535         strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
0536                     "Full-Duplex, " :
0537                     "Half-Duplex, ");
0538         strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
0539                     "MDI-X" :
0540                     "MDI");
0541         netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
0542         netif_carrier_on(netdev);
0543     } else {
0544         if (testonly)
0545             goto out;
0546 
0547         netif_info(jme, link, jme->dev, "Link is down\n");
0548         jme->phylink = 0;
0549         netif_carrier_off(netdev);
0550     }
0551 
0552 out:
0553     return rc;
0554 }
0555 
0556 static int
0557 jme_setup_tx_resources(struct jme_adapter *jme)
0558 {
0559     struct jme_ring *txring = &(jme->txring[0]);
0560 
0561     txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
0562                    TX_RING_ALLOC_SIZE(jme->tx_ring_size),
0563                    &(txring->dmaalloc),
0564                    GFP_ATOMIC);
0565 
0566     if (!txring->alloc)
0567         goto err_set_null;
0568 
0569     /*
0570      * 16 Bytes align
0571      */
0572     txring->desc        = (void *)ALIGN((unsigned long)(txring->alloc),
0573                         RING_DESC_ALIGN);
0574     txring->dma     = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
0575     txring->next_to_use = 0;
0576     atomic_set(&txring->next_to_clean, 0);
0577     atomic_set(&txring->nr_free, jme->tx_ring_size);
0578 
0579     txring->bufinf      = kcalloc(jme->tx_ring_size,
0580                         sizeof(struct jme_buffer_info),
0581                         GFP_ATOMIC);
0582     if (unlikely(!(txring->bufinf)))
0583         goto err_free_txring;
0584 
0585     return 0;
0586 
0587 err_free_txring:
0588     dma_free_coherent(&(jme->pdev->dev),
0589               TX_RING_ALLOC_SIZE(jme->tx_ring_size),
0590               txring->alloc,
0591               txring->dmaalloc);
0592 
0593 err_set_null:
0594     txring->desc = NULL;
0595     txring->dmaalloc = 0;
0596     txring->dma = 0;
0597     txring->bufinf = NULL;
0598 
0599     return -ENOMEM;
0600 }
0601 
0602 static void
0603 jme_free_tx_resources(struct jme_adapter *jme)
0604 {
0605     int i;
0606     struct jme_ring *txring = &(jme->txring[0]);
0607     struct jme_buffer_info *txbi;
0608 
0609     if (txring->alloc) {
0610         if (txring->bufinf) {
0611             for (i = 0 ; i < jme->tx_ring_size ; ++i) {
0612                 txbi = txring->bufinf + i;
0613                 if (txbi->skb) {
0614                     dev_kfree_skb(txbi->skb);
0615                     txbi->skb = NULL;
0616                 }
0617                 txbi->mapping       = 0;
0618                 txbi->len       = 0;
0619                 txbi->nr_desc       = 0;
0620                 txbi->start_xmit    = 0;
0621             }
0622             kfree(txring->bufinf);
0623         }
0624 
0625         dma_free_coherent(&(jme->pdev->dev),
0626                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
0627                   txring->alloc,
0628                   txring->dmaalloc);
0629 
0630         txring->alloc       = NULL;
0631         txring->desc        = NULL;
0632         txring->dmaalloc    = 0;
0633         txring->dma     = 0;
0634         txring->bufinf      = NULL;
0635     }
0636     txring->next_to_use = 0;
0637     atomic_set(&txring->next_to_clean, 0);
0638     atomic_set(&txring->nr_free, 0);
0639 }
0640 
0641 static inline void
0642 jme_enable_tx_engine(struct jme_adapter *jme)
0643 {
0644     /*
0645      * Select Queue 0
0646      */
0647     jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
0648     wmb();
0649 
0650     /*
0651      * Setup TX Queue 0 DMA Bass Address
0652      */
0653     jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
0654     jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
0655     jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
0656 
0657     /*
0658      * Setup TX Descptor Count
0659      */
0660     jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
0661 
0662     /*
0663      * Enable TX Engine
0664      */
0665     wmb();
0666     jwrite32f(jme, JME_TXCS, jme->reg_txcs |
0667                 TXCS_SELECT_QUEUE0 |
0668                 TXCS_ENABLE);
0669 
0670     /*
0671      * Start clock for TX MAC Processor
0672      */
0673     jme_mac_txclk_on(jme);
0674 }
0675 
0676 static inline void
0677 jme_disable_tx_engine(struct jme_adapter *jme)
0678 {
0679     int i;
0680     u32 val;
0681 
0682     /*
0683      * Disable TX Engine
0684      */
0685     jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
0686     wmb();
0687 
0688     val = jread32(jme, JME_TXCS);
0689     for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
0690         mdelay(1);
0691         val = jread32(jme, JME_TXCS);
0692         rmb();
0693     }
0694 
0695     if (!i)
0696         pr_err("Disable TX engine timeout\n");
0697 
0698     /*
0699      * Stop clock for TX MAC Processor
0700      */
0701     jme_mac_txclk_off(jme);
0702 }
0703 
0704 static void
0705 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
0706 {
0707     struct jme_ring *rxring = &(jme->rxring[0]);
0708     register struct rxdesc *rxdesc = rxring->desc;
0709     struct jme_buffer_info *rxbi = rxring->bufinf;
0710     rxdesc += i;
0711     rxbi += i;
0712 
0713     rxdesc->dw[0] = 0;
0714     rxdesc->dw[1] = 0;
0715     rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
0716     rxdesc->desc1.bufaddrl  = cpu_to_le32(
0717                     (__u64)rxbi->mapping & 0xFFFFFFFFUL);
0718     rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
0719     if (jme->dev->features & NETIF_F_HIGHDMA)
0720         rxdesc->desc1.flags = RXFLAG_64BIT;
0721     wmb();
0722     rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
0723 }
0724 
0725 static int
0726 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
0727 {
0728     struct jme_ring *rxring = &(jme->rxring[0]);
0729     struct jme_buffer_info *rxbi = rxring->bufinf + i;
0730     struct sk_buff *skb;
0731     dma_addr_t mapping;
0732 
0733     skb = netdev_alloc_skb(jme->dev,
0734         jme->dev->mtu + RX_EXTRA_LEN);
0735     if (unlikely(!skb))
0736         return -ENOMEM;
0737 
0738     mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data),
0739                    offset_in_page(skb->data), skb_tailroom(skb),
0740                    DMA_FROM_DEVICE);
0741     if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) {
0742         dev_kfree_skb(skb);
0743         return -ENOMEM;
0744     }
0745 
0746     if (likely(rxbi->mapping))
0747         dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len,
0748                    DMA_FROM_DEVICE);
0749 
0750     rxbi->skb = skb;
0751     rxbi->len = skb_tailroom(skb);
0752     rxbi->mapping = mapping;
0753     return 0;
0754 }
0755 
0756 static void
0757 jme_free_rx_buf(struct jme_adapter *jme, int i)
0758 {
0759     struct jme_ring *rxring = &(jme->rxring[0]);
0760     struct jme_buffer_info *rxbi = rxring->bufinf;
0761     rxbi += i;
0762 
0763     if (rxbi->skb) {
0764         dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len,
0765                    DMA_FROM_DEVICE);
0766         dev_kfree_skb(rxbi->skb);
0767         rxbi->skb = NULL;
0768         rxbi->mapping = 0;
0769         rxbi->len = 0;
0770     }
0771 }
0772 
0773 static void
0774 jme_free_rx_resources(struct jme_adapter *jme)
0775 {
0776     int i;
0777     struct jme_ring *rxring = &(jme->rxring[0]);
0778 
0779     if (rxring->alloc) {
0780         if (rxring->bufinf) {
0781             for (i = 0 ; i < jme->rx_ring_size ; ++i)
0782                 jme_free_rx_buf(jme, i);
0783             kfree(rxring->bufinf);
0784         }
0785 
0786         dma_free_coherent(&(jme->pdev->dev),
0787                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
0788                   rxring->alloc,
0789                   rxring->dmaalloc);
0790         rxring->alloc    = NULL;
0791         rxring->desc     = NULL;
0792         rxring->dmaalloc = 0;
0793         rxring->dma      = 0;
0794         rxring->bufinf   = NULL;
0795     }
0796     rxring->next_to_use   = 0;
0797     atomic_set(&rxring->next_to_clean, 0);
0798 }
0799 
0800 static int
0801 jme_setup_rx_resources(struct jme_adapter *jme)
0802 {
0803     int i;
0804     struct jme_ring *rxring = &(jme->rxring[0]);
0805 
0806     rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
0807                    RX_RING_ALLOC_SIZE(jme->rx_ring_size),
0808                    &(rxring->dmaalloc),
0809                    GFP_ATOMIC);
0810     if (!rxring->alloc)
0811         goto err_set_null;
0812 
0813     /*
0814      * 16 Bytes align
0815      */
0816     rxring->desc        = (void *)ALIGN((unsigned long)(rxring->alloc),
0817                         RING_DESC_ALIGN);
0818     rxring->dma     = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
0819     rxring->next_to_use = 0;
0820     atomic_set(&rxring->next_to_clean, 0);
0821 
0822     rxring->bufinf      = kcalloc(jme->rx_ring_size,
0823                         sizeof(struct jme_buffer_info),
0824                         GFP_ATOMIC);
0825     if (unlikely(!(rxring->bufinf)))
0826         goto err_free_rxring;
0827 
0828     /*
0829      * Initiallize Receive Descriptors
0830      */
0831     for (i = 0 ; i < jme->rx_ring_size ; ++i) {
0832         if (unlikely(jme_make_new_rx_buf(jme, i))) {
0833             jme_free_rx_resources(jme);
0834             return -ENOMEM;
0835         }
0836 
0837         jme_set_clean_rxdesc(jme, i);
0838     }
0839 
0840     return 0;
0841 
0842 err_free_rxring:
0843     dma_free_coherent(&(jme->pdev->dev),
0844               RX_RING_ALLOC_SIZE(jme->rx_ring_size),
0845               rxring->alloc,
0846               rxring->dmaalloc);
0847 err_set_null:
0848     rxring->desc = NULL;
0849     rxring->dmaalloc = 0;
0850     rxring->dma = 0;
0851     rxring->bufinf = NULL;
0852 
0853     return -ENOMEM;
0854 }
0855 
0856 static inline void
0857 jme_enable_rx_engine(struct jme_adapter *jme)
0858 {
0859     /*
0860      * Select Queue 0
0861      */
0862     jwrite32(jme, JME_RXCS, jme->reg_rxcs |
0863                 RXCS_QUEUESEL_Q0);
0864     wmb();
0865 
0866     /*
0867      * Setup RX DMA Bass Address
0868      */
0869     jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
0870     jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
0871     jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
0872 
0873     /*
0874      * Setup RX Descriptor Count
0875      */
0876     jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
0877 
0878     /*
0879      * Setup Unicast Filter
0880      */
0881     jme_set_unicastaddr(jme->dev);
0882     jme_set_multi(jme->dev);
0883 
0884     /*
0885      * Enable RX Engine
0886      */
0887     wmb();
0888     jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
0889                 RXCS_QUEUESEL_Q0 |
0890                 RXCS_ENABLE |
0891                 RXCS_QST);
0892 
0893     /*
0894      * Start clock for RX MAC Processor
0895      */
0896     jme_mac_rxclk_on(jme);
0897 }
0898 
0899 static inline void
0900 jme_restart_rx_engine(struct jme_adapter *jme)
0901 {
0902     /*
0903      * Start RX Engine
0904      */
0905     jwrite32(jme, JME_RXCS, jme->reg_rxcs |
0906                 RXCS_QUEUESEL_Q0 |
0907                 RXCS_ENABLE |
0908                 RXCS_QST);
0909 }
0910 
0911 static inline void
0912 jme_disable_rx_engine(struct jme_adapter *jme)
0913 {
0914     int i;
0915     u32 val;
0916 
0917     /*
0918      * Disable RX Engine
0919      */
0920     jwrite32(jme, JME_RXCS, jme->reg_rxcs);
0921     wmb();
0922 
0923     val = jread32(jme, JME_RXCS);
0924     for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
0925         mdelay(1);
0926         val = jread32(jme, JME_RXCS);
0927         rmb();
0928     }
0929 
0930     if (!i)
0931         pr_err("Disable RX engine timeout\n");
0932 
0933     /*
0934      * Stop clock for RX MAC Processor
0935      */
0936     jme_mac_rxclk_off(jme);
0937 }
0938 
0939 static u16
0940 jme_udpsum(struct sk_buff *skb)
0941 {
0942     u16 csum = 0xFFFFu;
0943 
0944     if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
0945         return csum;
0946     if (skb->protocol != htons(ETH_P_IP))
0947         return csum;
0948     skb_set_network_header(skb, ETH_HLEN);
0949     if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
0950         (skb->len < (ETH_HLEN +
0951             (ip_hdr(skb)->ihl << 2) +
0952             sizeof(struct udphdr)))) {
0953         skb_reset_network_header(skb);
0954         return csum;
0955     }
0956     skb_set_transport_header(skb,
0957             ETH_HLEN + (ip_hdr(skb)->ihl << 2));
0958     csum = udp_hdr(skb)->check;
0959     skb_reset_transport_header(skb);
0960     skb_reset_network_header(skb);
0961 
0962     return csum;
0963 }
0964 
0965 static int
0966 jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
0967 {
0968     if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
0969         return false;
0970 
0971     if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
0972             == RXWBFLAG_TCPON)) {
0973         if (flags & RXWBFLAG_IPV4)
0974             netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
0975         return false;
0976     }
0977 
0978     if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
0979             == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
0980         if (flags & RXWBFLAG_IPV4)
0981             netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
0982         return false;
0983     }
0984 
0985     if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
0986             == RXWBFLAG_IPV4)) {
0987         netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
0988         return false;
0989     }
0990 
0991     return true;
0992 }
0993 
0994 static void
0995 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
0996 {
0997     struct jme_ring *rxring = &(jme->rxring[0]);
0998     struct rxdesc *rxdesc = rxring->desc;
0999     struct jme_buffer_info *rxbi = rxring->bufinf;
1000     struct sk_buff *skb;
1001     int framesize;
1002 
1003     rxdesc += idx;
1004     rxbi += idx;
1005 
1006     skb = rxbi->skb;
1007     dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len,
1008                 DMA_FROM_DEVICE);
1009 
1010     if (unlikely(jme_make_new_rx_buf(jme, idx))) {
1011         dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping,
1012                        rxbi->len, DMA_FROM_DEVICE);
1013 
1014         ++(NET_STAT(jme).rx_dropped);
1015     } else {
1016         framesize = le16_to_cpu(rxdesc->descwb.framesize)
1017                 - RX_PREPAD_SIZE;
1018 
1019         skb_reserve(skb, RX_PREPAD_SIZE);
1020         skb_put(skb, framesize);
1021         skb->protocol = eth_type_trans(skb, jme->dev);
1022 
1023         if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
1024             skb->ip_summed = CHECKSUM_UNNECESSARY;
1025         else
1026             skb_checksum_none_assert(skb);
1027 
1028         if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
1029             u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
1030 
1031             __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1032             NET_STAT(jme).rx_bytes += 4;
1033         }
1034         jme->jme_rx(skb);
1035 
1036         if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
1037             cpu_to_le16(RXWBFLAG_DEST_MUL))
1038             ++(NET_STAT(jme).multicast);
1039 
1040         NET_STAT(jme).rx_bytes += framesize;
1041         ++(NET_STAT(jme).rx_packets);
1042     }
1043 
1044     jme_set_clean_rxdesc(jme, idx);
1045 
1046 }
1047 
1048 static int
1049 jme_process_receive(struct jme_adapter *jme, int limit)
1050 {
1051     struct jme_ring *rxring = &(jme->rxring[0]);
1052     struct rxdesc *rxdesc;
1053     int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
1054 
1055     if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1056         goto out_inc;
1057 
1058     if (unlikely(atomic_read(&jme->link_changing) != 1))
1059         goto out_inc;
1060 
1061     if (unlikely(!netif_carrier_ok(jme->dev)))
1062         goto out_inc;
1063 
1064     i = atomic_read(&rxring->next_to_clean);
1065     while (limit > 0) {
1066         rxdesc = rxring->desc;
1067         rxdesc += i;
1068 
1069         if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
1070         !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1071             goto out;
1072         --limit;
1073 
1074         rmb();
1075         desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1076 
1077         if (unlikely(desccnt > 1 ||
1078         rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
1079 
1080             if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
1081                 ++(NET_STAT(jme).rx_crc_errors);
1082             else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
1083                 ++(NET_STAT(jme).rx_fifo_errors);
1084             else
1085                 ++(NET_STAT(jme).rx_errors);
1086 
1087             if (desccnt > 1)
1088                 limit -= desccnt - 1;
1089 
1090             for (j = i, ccnt = desccnt ; ccnt-- ; ) {
1091                 jme_set_clean_rxdesc(jme, j);
1092                 j = (j + 1) & (mask);
1093             }
1094 
1095         } else {
1096             jme_alloc_and_feed_skb(jme, i);
1097         }
1098 
1099         i = (i + desccnt) & (mask);
1100     }
1101 
1102 out:
1103     atomic_set(&rxring->next_to_clean, i);
1104 
1105 out_inc:
1106     atomic_inc(&jme->rx_cleaning);
1107 
1108     return limit > 0 ? limit : 0;
1109 
1110 }
1111 
1112 static void
1113 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1114 {
1115     if (likely(atmp == dpi->cur)) {
1116         dpi->cnt = 0;
1117         return;
1118     }
1119 
1120     if (dpi->attempt == atmp) {
1121         ++(dpi->cnt);
1122     } else {
1123         dpi->attempt = atmp;
1124         dpi->cnt = 0;
1125     }
1126 
1127 }
1128 
1129 static void
1130 jme_dynamic_pcc(struct jme_adapter *jme)
1131 {
1132     register struct dynpcc_info *dpi = &(jme->dpi);
1133 
1134     if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1135         jme_attempt_pcc(dpi, PCC_P3);
1136     else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
1137          dpi->intr_cnt > PCC_INTR_THRESHOLD)
1138         jme_attempt_pcc(dpi, PCC_P2);
1139     else
1140         jme_attempt_pcc(dpi, PCC_P1);
1141 
1142     if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1143         if (dpi->attempt < dpi->cur)
1144             tasklet_schedule(&jme->rxclean_task);
1145         jme_set_rx_pcc(jme, dpi->attempt);
1146         dpi->cur = dpi->attempt;
1147         dpi->cnt = 0;
1148     }
1149 }
1150 
1151 static void
1152 jme_start_pcc_timer(struct jme_adapter *jme)
1153 {
1154     struct dynpcc_info *dpi = &(jme->dpi);
1155     dpi->last_bytes     = NET_STAT(jme).rx_bytes;
1156     dpi->last_pkts      = NET_STAT(jme).rx_packets;
1157     dpi->intr_cnt       = 0;
1158     jwrite32(jme, JME_TMCSR,
1159         TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1160 }
1161 
1162 static inline void
1163 jme_stop_pcc_timer(struct jme_adapter *jme)
1164 {
1165     jwrite32(jme, JME_TMCSR, 0);
1166 }
1167 
1168 static void
1169 jme_shutdown_nic(struct jme_adapter *jme)
1170 {
1171     u32 phylink;
1172 
1173     phylink = jme_linkstat_from_phy(jme);
1174 
1175     if (!(phylink & PHY_LINK_UP)) {
1176         /*
1177          * Disable all interrupt before issue timer
1178          */
1179         jme_stop_irq(jme);
1180         jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1181     }
1182 }
1183 
1184 static void
1185 jme_pcc_tasklet(struct tasklet_struct *t)
1186 {
1187     struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
1188     struct net_device *netdev = jme->dev;
1189 
1190     if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1191         jme_shutdown_nic(jme);
1192         return;
1193     }
1194 
1195     if (unlikely(!netif_carrier_ok(netdev) ||
1196         (atomic_read(&jme->link_changing) != 1)
1197     )) {
1198         jme_stop_pcc_timer(jme);
1199         return;
1200     }
1201 
1202     if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1203         jme_dynamic_pcc(jme);
1204 
1205     jme_start_pcc_timer(jme);
1206 }
1207 
1208 static inline void
1209 jme_polling_mode(struct jme_adapter *jme)
1210 {
1211     jme_set_rx_pcc(jme, PCC_OFF);
1212 }
1213 
1214 static inline void
1215 jme_interrupt_mode(struct jme_adapter *jme)
1216 {
1217     jme_set_rx_pcc(jme, PCC_P1);
1218 }
1219 
1220 static inline int
1221 jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1222 {
1223     u32 apmc;
1224     apmc = jread32(jme, JME_APMC);
1225     return apmc & JME_APMC_PSEUDO_HP_EN;
1226 }
1227 
1228 static void
1229 jme_start_shutdown_timer(struct jme_adapter *jme)
1230 {
1231     u32 apmc;
1232 
1233     apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1234     apmc &= ~JME_APMC_EPIEN_CTRL;
1235     if (!no_extplug) {
1236         jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1237         wmb();
1238     }
1239     jwrite32f(jme, JME_APMC, apmc);
1240 
1241     jwrite32f(jme, JME_TIMER2, 0);
1242     set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1243     jwrite32(jme, JME_TMCSR,
1244         TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1245 }
1246 
1247 static void
1248 jme_stop_shutdown_timer(struct jme_adapter *jme)
1249 {
1250     u32 apmc;
1251 
1252     jwrite32f(jme, JME_TMCSR, 0);
1253     jwrite32f(jme, JME_TIMER2, 0);
1254     clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1255 
1256     apmc = jread32(jme, JME_APMC);
1257     apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1258     jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1259     wmb();
1260     jwrite32f(jme, JME_APMC, apmc);
1261 }
1262 
1263 static void jme_link_change_work(struct work_struct *work)
1264 {
1265     struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task);
1266     struct net_device *netdev = jme->dev;
1267     int rc;
1268 
1269     while (!atomic_dec_and_test(&jme->link_changing)) {
1270         atomic_inc(&jme->link_changing);
1271         netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
1272         while (atomic_read(&jme->link_changing) != 1)
1273             netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
1274     }
1275 
1276     if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1277         goto out;
1278 
1279     jme->old_mtu = netdev->mtu;
1280     netif_stop_queue(netdev);
1281     if (jme_pseudo_hotplug_enabled(jme))
1282         jme_stop_shutdown_timer(jme);
1283 
1284     jme_stop_pcc_timer(jme);
1285     tasklet_disable(&jme->txclean_task);
1286     tasklet_disable(&jme->rxclean_task);
1287     tasklet_disable(&jme->rxempty_task);
1288 
1289     if (netif_carrier_ok(netdev)) {
1290         jme_disable_rx_engine(jme);
1291         jme_disable_tx_engine(jme);
1292         jme_reset_mac_processor(jme);
1293         jme_free_rx_resources(jme);
1294         jme_free_tx_resources(jme);
1295 
1296         if (test_bit(JME_FLAG_POLL, &jme->flags))
1297             jme_polling_mode(jme);
1298 
1299         netif_carrier_off(netdev);
1300     }
1301 
1302     jme_check_link(netdev, 0);
1303     if (netif_carrier_ok(netdev)) {
1304         rc = jme_setup_rx_resources(jme);
1305         if (rc) {
1306             pr_err("Allocating resources for RX error, Device STOPPED!\n");
1307             goto out_enable_tasklet;
1308         }
1309 
1310         rc = jme_setup_tx_resources(jme);
1311         if (rc) {
1312             pr_err("Allocating resources for TX error, Device STOPPED!\n");
1313             goto err_out_free_rx_resources;
1314         }
1315 
1316         jme_enable_rx_engine(jme);
1317         jme_enable_tx_engine(jme);
1318 
1319         netif_start_queue(netdev);
1320 
1321         if (test_bit(JME_FLAG_POLL, &jme->flags))
1322             jme_interrupt_mode(jme);
1323 
1324         jme_start_pcc_timer(jme);
1325     } else if (jme_pseudo_hotplug_enabled(jme)) {
1326         jme_start_shutdown_timer(jme);
1327     }
1328 
1329     goto out_enable_tasklet;
1330 
1331 err_out_free_rx_resources:
1332     jme_free_rx_resources(jme);
1333 out_enable_tasklet:
1334     tasklet_enable(&jme->txclean_task);
1335     tasklet_enable(&jme->rxclean_task);
1336     tasklet_enable(&jme->rxempty_task);
1337 out:
1338     atomic_inc(&jme->link_changing);
1339 }
1340 
1341 static void
1342 jme_rx_clean_tasklet(struct tasklet_struct *t)
1343 {
1344     struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
1345     struct dynpcc_info *dpi = &(jme->dpi);
1346 
1347     jme_process_receive(jme, jme->rx_ring_size);
1348     ++(dpi->intr_cnt);
1349 
1350 }
1351 
1352 static int
1353 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1354 {
1355     struct jme_adapter *jme = jme_napi_priv(holder);
1356     int rest;
1357 
1358     rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1359 
1360     while (atomic_read(&jme->rx_empty) > 0) {
1361         atomic_dec(&jme->rx_empty);
1362         ++(NET_STAT(jme).rx_dropped);
1363         jme_restart_rx_engine(jme);
1364     }
1365     atomic_inc(&jme->rx_empty);
1366 
1367     if (rest) {
1368         JME_RX_COMPLETE(netdev, holder);
1369         jme_interrupt_mode(jme);
1370     }
1371 
1372     JME_NAPI_WEIGHT_SET(budget, rest);
1373     return JME_NAPI_WEIGHT_VAL(budget) - rest;
1374 }
1375 
1376 static void
1377 jme_rx_empty_tasklet(struct tasklet_struct *t)
1378 {
1379     struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
1380 
1381     if (unlikely(atomic_read(&jme->link_changing) != 1))
1382         return;
1383 
1384     if (unlikely(!netif_carrier_ok(jme->dev)))
1385         return;
1386 
1387     netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
1388 
1389     jme_rx_clean_tasklet(&jme->rxclean_task);
1390 
1391     while (atomic_read(&jme->rx_empty) > 0) {
1392         atomic_dec(&jme->rx_empty);
1393         ++(NET_STAT(jme).rx_dropped);
1394         jme_restart_rx_engine(jme);
1395     }
1396     atomic_inc(&jme->rx_empty);
1397 }
1398 
1399 static void
1400 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1401 {
1402     struct jme_ring *txring = &(jme->txring[0]);
1403 
1404     smp_wmb();
1405     if (unlikely(netif_queue_stopped(jme->dev) &&
1406     atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1407         netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
1408         netif_wake_queue(jme->dev);
1409     }
1410 
1411 }
1412 
1413 static void jme_tx_clean_tasklet(struct tasklet_struct *t)
1414 {
1415     struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
1416     struct jme_ring *txring = &(jme->txring[0]);
1417     struct txdesc *txdesc = txring->desc;
1418     struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1419     int i, j, cnt = 0, max, err, mask;
1420 
1421     tx_dbg(jme, "Into txclean\n");
1422 
1423     if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1424         goto out;
1425 
1426     if (unlikely(atomic_read(&jme->link_changing) != 1))
1427         goto out;
1428 
1429     if (unlikely(!netif_carrier_ok(jme->dev)))
1430         goto out;
1431 
1432     max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1433     mask = jme->tx_ring_mask;
1434 
1435     for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1436 
1437         ctxbi = txbi + i;
1438 
1439         if (likely(ctxbi->skb &&
1440         !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1441 
1442             tx_dbg(jme, "txclean: %d+%d@%lu\n",
1443                    i, ctxbi->nr_desc, jiffies);
1444 
1445             err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1446 
1447             for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1448                 ttxbi = txbi + ((i + j) & (mask));
1449                 txdesc[(i + j) & (mask)].dw[0] = 0;
1450 
1451                 dma_unmap_page(&jme->pdev->dev,
1452                            ttxbi->mapping, ttxbi->len,
1453                            DMA_TO_DEVICE);
1454 
1455                 ttxbi->mapping = 0;
1456                 ttxbi->len = 0;
1457             }
1458 
1459             dev_kfree_skb(ctxbi->skb);
1460 
1461             cnt += ctxbi->nr_desc;
1462 
1463             if (unlikely(err)) {
1464                 ++(NET_STAT(jme).tx_carrier_errors);
1465             } else {
1466                 ++(NET_STAT(jme).tx_packets);
1467                 NET_STAT(jme).tx_bytes += ctxbi->len;
1468             }
1469 
1470             ctxbi->skb = NULL;
1471             ctxbi->len = 0;
1472             ctxbi->start_xmit = 0;
1473 
1474         } else {
1475             break;
1476         }
1477 
1478         i = (i + ctxbi->nr_desc) & mask;
1479 
1480         ctxbi->nr_desc = 0;
1481     }
1482 
1483     tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
1484     atomic_set(&txring->next_to_clean, i);
1485     atomic_add(cnt, &txring->nr_free);
1486 
1487     jme_wake_queue_if_stopped(jme);
1488 
1489 out:
1490     atomic_inc(&jme->tx_cleaning);
1491 }
1492 
1493 static void
1494 jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1495 {
1496     /*
1497      * Disable interrupt
1498      */
1499     jwrite32f(jme, JME_IENC, INTR_ENABLE);
1500 
1501     if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1502         /*
1503          * Link change event is critical
1504          * all other events are ignored
1505          */
1506         jwrite32(jme, JME_IEVE, intrstat);
1507         schedule_work(&jme->linkch_task);
1508         goto out_reenable;
1509     }
1510 
1511     if (intrstat & INTR_TMINTR) {
1512         jwrite32(jme, JME_IEVE, INTR_TMINTR);
1513         tasklet_schedule(&jme->pcc_task);
1514     }
1515 
1516     if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1517         jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1518         tasklet_schedule(&jme->txclean_task);
1519     }
1520 
1521     if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1522         jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1523                              INTR_PCCRX0 |
1524                              INTR_RX0EMP)) |
1525                     INTR_RX0);
1526     }
1527 
1528     if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1529         if (intrstat & INTR_RX0EMP)
1530             atomic_inc(&jme->rx_empty);
1531 
1532         if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1533             if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1534                 jme_polling_mode(jme);
1535                 JME_RX_SCHEDULE(jme);
1536             }
1537         }
1538     } else {
1539         if (intrstat & INTR_RX0EMP) {
1540             atomic_inc(&jme->rx_empty);
1541             tasklet_hi_schedule(&jme->rxempty_task);
1542         } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1543             tasklet_hi_schedule(&jme->rxclean_task);
1544         }
1545     }
1546 
1547 out_reenable:
1548     /*
1549      * Re-enable interrupt
1550      */
1551     jwrite32f(jme, JME_IENS, INTR_ENABLE);
1552 }
1553 
1554 static irqreturn_t
1555 jme_intr(int irq, void *dev_id)
1556 {
1557     struct net_device *netdev = dev_id;
1558     struct jme_adapter *jme = netdev_priv(netdev);
1559     u32 intrstat;
1560 
1561     intrstat = jread32(jme, JME_IEVE);
1562 
1563     /*
1564      * Check if it's really an interrupt for us
1565      */
1566     if (unlikely((intrstat & INTR_ENABLE) == 0))
1567         return IRQ_NONE;
1568 
1569     /*
1570      * Check if the device still exist
1571      */
1572     if (unlikely(intrstat == ~((typeof(intrstat))0)))
1573         return IRQ_NONE;
1574 
1575     jme_intr_msi(jme, intrstat);
1576 
1577     return IRQ_HANDLED;
1578 }
1579 
1580 static irqreturn_t
1581 jme_msi(int irq, void *dev_id)
1582 {
1583     struct net_device *netdev = dev_id;
1584     struct jme_adapter *jme = netdev_priv(netdev);
1585     u32 intrstat;
1586 
1587     intrstat = jread32(jme, JME_IEVE);
1588 
1589     jme_intr_msi(jme, intrstat);
1590 
1591     return IRQ_HANDLED;
1592 }
1593 
1594 static void
1595 jme_reset_link(struct jme_adapter *jme)
1596 {
1597     jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1598 }
1599 
1600 static void
1601 jme_restart_an(struct jme_adapter *jme)
1602 {
1603     u32 bmcr;
1604 
1605     spin_lock_bh(&jme->phy_lock);
1606     bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1607     bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1608     jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1609     spin_unlock_bh(&jme->phy_lock);
1610 }
1611 
1612 static int
1613 jme_request_irq(struct jme_adapter *jme)
1614 {
1615     int rc;
1616     struct net_device *netdev = jme->dev;
1617     irq_handler_t handler = jme_intr;
1618     int irq_flags = IRQF_SHARED;
1619 
1620     if (!pci_enable_msi(jme->pdev)) {
1621         set_bit(JME_FLAG_MSI, &jme->flags);
1622         handler = jme_msi;
1623         irq_flags = 0;
1624     }
1625 
1626     rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1627               netdev);
1628     if (rc) {
1629         netdev_err(netdev,
1630                "Unable to request %s interrupt (return: %d)\n",
1631                test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1632                rc);
1633 
1634         if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1635             pci_disable_msi(jme->pdev);
1636             clear_bit(JME_FLAG_MSI, &jme->flags);
1637         }
1638     } else {
1639         netdev->irq = jme->pdev->irq;
1640     }
1641 
1642     return rc;
1643 }
1644 
1645 static void
1646 jme_free_irq(struct jme_adapter *jme)
1647 {
1648     free_irq(jme->pdev->irq, jme->dev);
1649     if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1650         pci_disable_msi(jme->pdev);
1651         clear_bit(JME_FLAG_MSI, &jme->flags);
1652         jme->dev->irq = jme->pdev->irq;
1653     }
1654 }
1655 
1656 static inline void
1657 jme_new_phy_on(struct jme_adapter *jme)
1658 {
1659     u32 reg;
1660 
1661     reg = jread32(jme, JME_PHY_PWR);
1662     reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1663          PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1664     jwrite32(jme, JME_PHY_PWR, reg);
1665 
1666     pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1667     reg &= ~PE1_GPREG0_PBG;
1668     reg |= PE1_GPREG0_ENBG;
1669     pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1670 }
1671 
1672 static inline void
1673 jme_new_phy_off(struct jme_adapter *jme)
1674 {
1675     u32 reg;
1676 
1677     reg = jread32(jme, JME_PHY_PWR);
1678     reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1679            PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1680     jwrite32(jme, JME_PHY_PWR, reg);
1681 
1682     pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1683     reg &= ~PE1_GPREG0_PBG;
1684     reg |= PE1_GPREG0_PDD3COLD;
1685     pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1686 }
1687 
1688 static inline void
1689 jme_phy_on(struct jme_adapter *jme)
1690 {
1691     u32 bmcr;
1692 
1693     bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1694     bmcr &= ~BMCR_PDOWN;
1695     jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1696 
1697     if (new_phy_power_ctrl(jme->chip_main_rev))
1698         jme_new_phy_on(jme);
1699 }
1700 
1701 static inline void
1702 jme_phy_off(struct jme_adapter *jme)
1703 {
1704     u32 bmcr;
1705 
1706     bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1707     bmcr |= BMCR_PDOWN;
1708     jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1709 
1710     if (new_phy_power_ctrl(jme->chip_main_rev))
1711         jme_new_phy_off(jme);
1712 }
1713 
1714 static int
1715 jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
1716 {
1717     u32 phy_addr;
1718 
1719     phy_addr = JM_PHY_SPEC_REG_READ | specreg;
1720     jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1721             phy_addr);
1722     return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
1723             JM_PHY_SPEC_DATA_REG);
1724 }
1725 
1726 static void
1727 jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
1728 {
1729     u32 phy_addr;
1730 
1731     phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
1732     jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
1733             phy_data);
1734     jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1735             phy_addr);
1736 }
1737 
1738 static int
1739 jme_phy_calibration(struct jme_adapter *jme)
1740 {
1741     u32 ctrl1000, phy_data;
1742 
1743     jme_phy_off(jme);
1744     jme_phy_on(jme);
1745     /*  Enabel PHY test mode 1 */
1746     ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1747     ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1748     ctrl1000 |= PHY_GAD_TEST_MODE_1;
1749     jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1750 
1751     phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1752     phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
1753     phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
1754             JM_PHY_EXT_COMM_2_CALI_ENABLE;
1755     jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1756     msleep(20);
1757     phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1758     phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
1759             JM_PHY_EXT_COMM_2_CALI_MODE_0 |
1760             JM_PHY_EXT_COMM_2_CALI_LATCH);
1761     jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1762 
1763     /*  Disable PHY test mode */
1764     ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1765     ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1766     jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1767     return 0;
1768 }
1769 
1770 static int
1771 jme_phy_setEA(struct jme_adapter *jme)
1772 {
1773     u32 phy_comm0 = 0, phy_comm1 = 0;
1774     u8 nic_ctrl;
1775 
1776     pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
1777     if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
1778         return 0;
1779 
1780     switch (jme->pdev->device) {
1781     case PCI_DEVICE_ID_JMICRON_JMC250:
1782         if (((jme->chip_main_rev == 5) &&
1783             ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1784             (jme->chip_sub_rev == 3))) ||
1785             (jme->chip_main_rev >= 6)) {
1786             phy_comm0 = 0x008A;
1787             phy_comm1 = 0x4109;
1788         }
1789         if ((jme->chip_main_rev == 3) &&
1790             ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1791             phy_comm0 = 0xE088;
1792         break;
1793     case PCI_DEVICE_ID_JMICRON_JMC260:
1794         if (((jme->chip_main_rev == 5) &&
1795             ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1796             (jme->chip_sub_rev == 3))) ||
1797             (jme->chip_main_rev >= 6)) {
1798             phy_comm0 = 0x008A;
1799             phy_comm1 = 0x4109;
1800         }
1801         if ((jme->chip_main_rev == 3) &&
1802             ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1803             phy_comm0 = 0xE088;
1804         if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
1805             phy_comm0 = 0x608A;
1806         if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
1807             phy_comm0 = 0x408A;
1808         break;
1809     default:
1810         return -ENODEV;
1811     }
1812     if (phy_comm0)
1813         jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
1814     if (phy_comm1)
1815         jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
1816 
1817     return 0;
1818 }
1819 
1820 static int
1821 jme_open(struct net_device *netdev)
1822 {
1823     struct jme_adapter *jme = netdev_priv(netdev);
1824     int rc;
1825 
1826     jme_clear_pm_disable_wol(jme);
1827     JME_NAPI_ENABLE(jme);
1828 
1829     tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
1830     tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
1831     tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
1832 
1833     rc = jme_request_irq(jme);
1834     if (rc)
1835         goto err_out;
1836 
1837     jme_start_irq(jme);
1838 
1839     jme_phy_on(jme);
1840     if (test_bit(JME_FLAG_SSET, &jme->flags))
1841         jme_set_link_ksettings(netdev, &jme->old_cmd);
1842     else
1843         jme_reset_phy_processor(jme);
1844     jme_phy_calibration(jme);
1845     jme_phy_setEA(jme);
1846     jme_reset_link(jme);
1847 
1848     return 0;
1849 
1850 err_out:
1851     netif_stop_queue(netdev);
1852     netif_carrier_off(netdev);
1853     return rc;
1854 }
1855 
1856 static void
1857 jme_set_100m_half(struct jme_adapter *jme)
1858 {
1859     u32 bmcr, tmp;
1860 
1861     jme_phy_on(jme);
1862     bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1863     tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1864                BMCR_SPEED1000 | BMCR_FULLDPLX);
1865     tmp |= BMCR_SPEED100;
1866 
1867     if (bmcr != tmp)
1868         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1869 
1870     if (jme->fpgaver)
1871         jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1872     else
1873         jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1874 }
1875 
1876 #define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1877 static void
1878 jme_wait_link(struct jme_adapter *jme)
1879 {
1880     u32 phylink, to = JME_WAIT_LINK_TIME;
1881 
1882     msleep(1000);
1883     phylink = jme_linkstat_from_phy(jme);
1884     while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1885         usleep_range(10000, 11000);
1886         phylink = jme_linkstat_from_phy(jme);
1887     }
1888 }
1889 
1890 static void
1891 jme_powersave_phy(struct jme_adapter *jme)
1892 {
1893     if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
1894         jme_set_100m_half(jme);
1895         if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1896             jme_wait_link(jme);
1897         jme_clear_pm_enable_wol(jme);
1898     } else {
1899         jme_phy_off(jme);
1900     }
1901 }
1902 
1903 static int
1904 jme_close(struct net_device *netdev)
1905 {
1906     struct jme_adapter *jme = netdev_priv(netdev);
1907 
1908     netif_stop_queue(netdev);
1909     netif_carrier_off(netdev);
1910 
1911     jme_stop_irq(jme);
1912     jme_free_irq(jme);
1913 
1914     JME_NAPI_DISABLE(jme);
1915 
1916     cancel_work_sync(&jme->linkch_task);
1917     tasklet_kill(&jme->txclean_task);
1918     tasklet_kill(&jme->rxclean_task);
1919     tasklet_kill(&jme->rxempty_task);
1920 
1921     jme_disable_rx_engine(jme);
1922     jme_disable_tx_engine(jme);
1923     jme_reset_mac_processor(jme);
1924     jme_free_rx_resources(jme);
1925     jme_free_tx_resources(jme);
1926     jme->phylink = 0;
1927     jme_phy_off(jme);
1928 
1929     return 0;
1930 }
1931 
1932 static int
1933 jme_alloc_txdesc(struct jme_adapter *jme,
1934             struct sk_buff *skb)
1935 {
1936     struct jme_ring *txring = &(jme->txring[0]);
1937     int idx, nr_alloc, mask = jme->tx_ring_mask;
1938 
1939     idx = txring->next_to_use;
1940     nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1941 
1942     if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1943         return -1;
1944 
1945     atomic_sub(nr_alloc, &txring->nr_free);
1946 
1947     txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1948 
1949     return idx;
1950 }
1951 
1952 static int
1953 jme_fill_tx_map(struct pci_dev *pdev,
1954         struct txdesc *txdesc,
1955         struct jme_buffer_info *txbi,
1956         struct page *page,
1957         u32 page_offset,
1958         u32 len,
1959         bool hidma)
1960 {
1961     dma_addr_t dmaaddr;
1962 
1963     dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len,
1964                    DMA_TO_DEVICE);
1965 
1966     if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr)))
1967         return -EINVAL;
1968 
1969     dma_sync_single_for_device(&pdev->dev, dmaaddr, len, DMA_TO_DEVICE);
1970 
1971     txdesc->dw[0] = 0;
1972     txdesc->dw[1] = 0;
1973     txdesc->desc2.flags = TXFLAG_OWN;
1974     txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0;
1975     txdesc->desc2.datalen   = cpu_to_le16(len);
1976     txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
1977     txdesc->desc2.bufaddrl  = cpu_to_le32(
1978                     (__u64)dmaaddr & 0xFFFFFFFFUL);
1979 
1980     txbi->mapping = dmaaddr;
1981     txbi->len = len;
1982     return 0;
1983 }
1984 
1985 static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
1986 {
1987     struct jme_ring *txring = &(jme->txring[0]);
1988     struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1989     int mask = jme->tx_ring_mask;
1990     int j;
1991 
1992     for (j = 0 ; j < count ; j++) {
1993         ctxbi = txbi + ((startidx + j + 2) & (mask));
1994         dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len,
1995                    DMA_TO_DEVICE);
1996 
1997         ctxbi->mapping = 0;
1998         ctxbi->len = 0;
1999     }
2000 }
2001 
2002 static int
2003 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2004 {
2005     struct jme_ring *txring = &(jme->txring[0]);
2006     struct txdesc *txdesc = txring->desc, *ctxdesc;
2007     struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2008     bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
2009     int i, nr_frags = skb_shinfo(skb)->nr_frags;
2010     int mask = jme->tx_ring_mask;
2011     u32 len;
2012     int ret = 0;
2013 
2014     for (i = 0 ; i < nr_frags ; ++i) {
2015         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2016 
2017         ctxdesc = txdesc + ((idx + i + 2) & (mask));
2018         ctxbi = txbi + ((idx + i + 2) & (mask));
2019 
2020         ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2021                       skb_frag_page(frag), skb_frag_off(frag),
2022                       skb_frag_size(frag), hidma);
2023         if (ret) {
2024             jme_drop_tx_map(jme, idx, i);
2025             goto out;
2026         }
2027     }
2028 
2029     len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2030     ctxdesc = txdesc + ((idx + 1) & (mask));
2031     ctxbi = txbi + ((idx + 1) & (mask));
2032     ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2033             offset_in_page(skb->data), len, hidma);
2034     if (ret)
2035         jme_drop_tx_map(jme, idx, i);
2036 
2037 out:
2038     return ret;
2039 
2040 }
2041 
2042 
2043 static int
2044 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2045 {
2046     *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
2047     if (*mss) {
2048         *flags |= TXFLAG_LSEN;
2049 
2050         if (skb->protocol == htons(ETH_P_IP)) {
2051             struct iphdr *iph = ip_hdr(skb);
2052 
2053             iph->check = 0;
2054             tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2055                                 iph->daddr, 0,
2056                                 IPPROTO_TCP,
2057                                 0);
2058         } else {
2059             tcp_v6_gso_csum_prep(skb);
2060         }
2061 
2062         return 0;
2063     }
2064 
2065     return 1;
2066 }
2067 
2068 static void
2069 jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
2070 {
2071     if (skb->ip_summed == CHECKSUM_PARTIAL) {
2072         u8 ip_proto;
2073 
2074         switch (skb->protocol) {
2075         case htons(ETH_P_IP):
2076             ip_proto = ip_hdr(skb)->protocol;
2077             break;
2078         case htons(ETH_P_IPV6):
2079             ip_proto = ipv6_hdr(skb)->nexthdr;
2080             break;
2081         default:
2082             ip_proto = 0;
2083             break;
2084         }
2085 
2086         switch (ip_proto) {
2087         case IPPROTO_TCP:
2088             *flags |= TXFLAG_TCPCS;
2089             break;
2090         case IPPROTO_UDP:
2091             *flags |= TXFLAG_UDPCS;
2092             break;
2093         default:
2094             netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
2095             break;
2096         }
2097     }
2098 }
2099 
2100 static inline void
2101 jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
2102 {
2103     if (skb_vlan_tag_present(skb)) {
2104         *flags |= TXFLAG_TAGON;
2105         *vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2106     }
2107 }
2108 
2109 static int
2110 jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2111 {
2112     struct jme_ring *txring = &(jme->txring[0]);
2113     struct txdesc *txdesc;
2114     struct jme_buffer_info *txbi;
2115     u8 flags;
2116     int ret = 0;
2117 
2118     txdesc = (struct txdesc *)txring->desc + idx;
2119     txbi = txring->bufinf + idx;
2120 
2121     txdesc->dw[0] = 0;
2122     txdesc->dw[1] = 0;
2123     txdesc->dw[2] = 0;
2124     txdesc->dw[3] = 0;
2125     txdesc->desc1.pktsize = cpu_to_le16(skb->len);
2126     /*
2127      * Set OWN bit at final.
2128      * When kernel transmit faster than NIC.
2129      * And NIC trying to send this descriptor before we tell
2130      * it to start sending this TX queue.
2131      * Other fields are already filled correctly.
2132      */
2133     wmb();
2134     flags = TXFLAG_OWN | TXFLAG_INT;
2135     /*
2136      * Set checksum flags while not tso
2137      */
2138     if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2139         jme_tx_csum(jme, skb, &flags);
2140     jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2141     ret = jme_map_tx_skb(jme, skb, idx);
2142     if (ret)
2143         return ret;
2144 
2145     txdesc->desc1.flags = flags;
2146     /*
2147      * Set tx buffer info after telling NIC to send
2148      * For better tx_clean timing
2149      */
2150     wmb();
2151     txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
2152     txbi->skb = skb;
2153     txbi->len = skb->len;
2154     txbi->start_xmit = jiffies;
2155     if (!txbi->start_xmit)
2156         txbi->start_xmit = (0UL-1);
2157 
2158     return 0;
2159 }
2160 
2161 static void
2162 jme_stop_queue_if_full(struct jme_adapter *jme)
2163 {
2164     struct jme_ring *txring = &(jme->txring[0]);
2165     struct jme_buffer_info *txbi = txring->bufinf;
2166     int idx = atomic_read(&txring->next_to_clean);
2167 
2168     txbi += idx;
2169 
2170     smp_wmb();
2171     if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
2172         netif_stop_queue(jme->dev);
2173         netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
2174         smp_wmb();
2175         if (atomic_read(&txring->nr_free)
2176             >= (jme->tx_wake_threshold)) {
2177             netif_wake_queue(jme->dev);
2178             netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
2179         }
2180     }
2181 
2182     if (unlikely(txbi->start_xmit &&
2183             time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) &&
2184             txbi->skb)) {
2185         netif_stop_queue(jme->dev);
2186         netif_info(jme, tx_queued, jme->dev,
2187                "TX Queue Stopped %d@%lu\n", idx, jiffies);
2188     }
2189 }
2190 
2191 /*
2192  * This function is already protected by netif_tx_lock()
2193  */
2194 
2195 static netdev_tx_t
2196 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2197 {
2198     struct jme_adapter *jme = netdev_priv(netdev);
2199     int idx;
2200 
2201     if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) {
2202         dev_kfree_skb_any(skb);
2203         ++(NET_STAT(jme).tx_dropped);
2204         return NETDEV_TX_OK;
2205     }
2206 
2207     idx = jme_alloc_txdesc(jme, skb);
2208 
2209     if (unlikely(idx < 0)) {
2210         netif_stop_queue(netdev);
2211         netif_err(jme, tx_err, jme->dev,
2212               "BUG! Tx ring full when queue awake!\n");
2213 
2214         return NETDEV_TX_BUSY;
2215     }
2216 
2217     if (jme_fill_tx_desc(jme, skb, idx))
2218         return NETDEV_TX_OK;
2219 
2220     jwrite32(jme, JME_TXCS, jme->reg_txcs |
2221                 TXCS_SELECT_QUEUE0 |
2222                 TXCS_QUEUE0S |
2223                 TXCS_ENABLE);
2224 
2225     tx_dbg(jme, "xmit: %d+%d@%lu\n",
2226            idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
2227     jme_stop_queue_if_full(jme);
2228 
2229     return NETDEV_TX_OK;
2230 }
2231 
2232 static void
2233 jme_set_unicastaddr(struct net_device *netdev)
2234 {
2235     struct jme_adapter *jme = netdev_priv(netdev);
2236     u32 val;
2237 
2238     val = (netdev->dev_addr[3] & 0xff) << 24 |
2239           (netdev->dev_addr[2] & 0xff) << 16 |
2240           (netdev->dev_addr[1] & 0xff) <<  8 |
2241           (netdev->dev_addr[0] & 0xff);
2242     jwrite32(jme, JME_RXUMA_LO, val);
2243     val = (netdev->dev_addr[5] & 0xff) << 8 |
2244           (netdev->dev_addr[4] & 0xff);
2245     jwrite32(jme, JME_RXUMA_HI, val);
2246 }
2247 
2248 static int
2249 jme_set_macaddr(struct net_device *netdev, void *p)
2250 {
2251     struct jme_adapter *jme = netdev_priv(netdev);
2252     struct sockaddr *addr = p;
2253 
2254     if (netif_running(netdev))
2255         return -EBUSY;
2256 
2257     spin_lock_bh(&jme->macaddr_lock);
2258     eth_hw_addr_set(netdev, addr->sa_data);
2259     jme_set_unicastaddr(netdev);
2260     spin_unlock_bh(&jme->macaddr_lock);
2261 
2262     return 0;
2263 }
2264 
2265 static void
2266 jme_set_multi(struct net_device *netdev)
2267 {
2268     struct jme_adapter *jme = netdev_priv(netdev);
2269     u32 mc_hash[2] = {};
2270 
2271     spin_lock_bh(&jme->rxmcs_lock);
2272 
2273     jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
2274 
2275     if (netdev->flags & IFF_PROMISC) {
2276         jme->reg_rxmcs |= RXMCS_ALLFRAME;
2277     } else if (netdev->flags & IFF_ALLMULTI) {
2278         jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2279     } else if (netdev->flags & IFF_MULTICAST) {
2280         struct netdev_hw_addr *ha;
2281         int bit_nr;
2282 
2283         jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2284         netdev_for_each_mc_addr(ha, netdev) {
2285             bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
2286             mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2287         }
2288 
2289         jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2290         jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
2291     }
2292 
2293     wmb();
2294     jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2295 
2296     spin_unlock_bh(&jme->rxmcs_lock);
2297 }
2298 
2299 static int
2300 jme_change_mtu(struct net_device *netdev, int new_mtu)
2301 {
2302     struct jme_adapter *jme = netdev_priv(netdev);
2303 
2304     netdev->mtu = new_mtu;
2305     netdev_update_features(netdev);
2306 
2307     jme_restart_rx_engine(jme);
2308     jme_reset_link(jme);
2309 
2310     return 0;
2311 }
2312 
2313 static void
2314 jme_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2315 {
2316     struct jme_adapter *jme = netdev_priv(netdev);
2317 
2318     jme->phylink = 0;
2319     jme_reset_phy_processor(jme);
2320     if (test_bit(JME_FLAG_SSET, &jme->flags))
2321         jme_set_link_ksettings(netdev, &jme->old_cmd);
2322 
2323     /*
2324      * Force to Reset the link again
2325      */
2326     jme_reset_link(jme);
2327 }
2328 
2329 static void
2330 jme_get_drvinfo(struct net_device *netdev,
2331              struct ethtool_drvinfo *info)
2332 {
2333     struct jme_adapter *jme = netdev_priv(netdev);
2334 
2335     strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2336     strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2337     strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
2338 }
2339 
2340 static int
2341 jme_get_regs_len(struct net_device *netdev)
2342 {
2343     return JME_REG_LEN;
2344 }
2345 
2346 static void
2347 mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2348 {
2349     int i;
2350 
2351     for (i = 0 ; i < len ; i += 4)
2352         p[i >> 2] = jread32(jme, reg + i);
2353 }
2354 
2355 static void
2356 mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2357 {
2358     int i;
2359     u16 *p16 = (u16 *)p;
2360 
2361     for (i = 0 ; i < reg_nr ; ++i)
2362         p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2363 }
2364 
2365 static void
2366 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2367 {
2368     struct jme_adapter *jme = netdev_priv(netdev);
2369     u32 *p32 = (u32 *)p;
2370 
2371     memset(p, 0xFF, JME_REG_LEN);
2372 
2373     regs->version = 1;
2374     mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2375 
2376     p32 += 0x100 >> 2;
2377     mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2378 
2379     p32 += 0x100 >> 2;
2380     mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2381 
2382     p32 += 0x100 >> 2;
2383     mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2384 
2385     p32 += 0x100 >> 2;
2386     mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2387 }
2388 
2389 static int jme_get_coalesce(struct net_device *netdev,
2390                 struct ethtool_coalesce *ecmd,
2391                 struct kernel_ethtool_coalesce *kernel_coal,
2392                 struct netlink_ext_ack *extack)
2393 {
2394     struct jme_adapter *jme = netdev_priv(netdev);
2395 
2396     ecmd->tx_coalesce_usecs = PCC_TX_TO;
2397     ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2398 
2399     if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2400         ecmd->use_adaptive_rx_coalesce = false;
2401         ecmd->rx_coalesce_usecs = 0;
2402         ecmd->rx_max_coalesced_frames = 0;
2403         return 0;
2404     }
2405 
2406     ecmd->use_adaptive_rx_coalesce = true;
2407 
2408     switch (jme->dpi.cur) {
2409     case PCC_P1:
2410         ecmd->rx_coalesce_usecs = PCC_P1_TO;
2411         ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2412         break;
2413     case PCC_P2:
2414         ecmd->rx_coalesce_usecs = PCC_P2_TO;
2415         ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2416         break;
2417     case PCC_P3:
2418         ecmd->rx_coalesce_usecs = PCC_P3_TO;
2419         ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2420         break;
2421     default:
2422         break;
2423     }
2424 
2425     return 0;
2426 }
2427 
2428 static int jme_set_coalesce(struct net_device *netdev,
2429                 struct ethtool_coalesce *ecmd,
2430                 struct kernel_ethtool_coalesce *kernel_coal,
2431                 struct netlink_ext_ack *extack)
2432 {
2433     struct jme_adapter *jme = netdev_priv(netdev);
2434     struct dynpcc_info *dpi = &(jme->dpi);
2435 
2436     if (netif_running(netdev))
2437         return -EBUSY;
2438 
2439     if (ecmd->use_adaptive_rx_coalesce &&
2440         test_bit(JME_FLAG_POLL, &jme->flags)) {
2441         clear_bit(JME_FLAG_POLL, &jme->flags);
2442         jme->jme_rx = netif_rx;
2443         dpi->cur        = PCC_P1;
2444         dpi->attempt        = PCC_P1;
2445         dpi->cnt        = 0;
2446         jme_set_rx_pcc(jme, PCC_P1);
2447         jme_interrupt_mode(jme);
2448     } else if (!(ecmd->use_adaptive_rx_coalesce) &&
2449            !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2450         set_bit(JME_FLAG_POLL, &jme->flags);
2451         jme->jme_rx = netif_receive_skb;
2452         jme_interrupt_mode(jme);
2453     }
2454 
2455     return 0;
2456 }
2457 
2458 static void
2459 jme_get_pauseparam(struct net_device *netdev,
2460             struct ethtool_pauseparam *ecmd)
2461 {
2462     struct jme_adapter *jme = netdev_priv(netdev);
2463     u32 val;
2464 
2465     ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2466     ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2467 
2468     spin_lock_bh(&jme->phy_lock);
2469     val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2470     spin_unlock_bh(&jme->phy_lock);
2471 
2472     ecmd->autoneg =
2473         (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2474 }
2475 
2476 static int
2477 jme_set_pauseparam(struct net_device *netdev,
2478             struct ethtool_pauseparam *ecmd)
2479 {
2480     struct jme_adapter *jme = netdev_priv(netdev);
2481     u32 val;
2482 
2483     if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2484         (ecmd->tx_pause != 0)) {
2485 
2486         if (ecmd->tx_pause)
2487             jme->reg_txpfc |= TXPFC_PF_EN;
2488         else
2489             jme->reg_txpfc &= ~TXPFC_PF_EN;
2490 
2491         jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2492     }
2493 
2494     spin_lock_bh(&jme->rxmcs_lock);
2495     if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2496         (ecmd->rx_pause != 0)) {
2497 
2498         if (ecmd->rx_pause)
2499             jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2500         else
2501             jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2502 
2503         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2504     }
2505     spin_unlock_bh(&jme->rxmcs_lock);
2506 
2507     spin_lock_bh(&jme->phy_lock);
2508     val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2509     if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2510         (ecmd->autoneg != 0)) {
2511 
2512         if (ecmd->autoneg)
2513             val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2514         else
2515             val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2516 
2517         jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2518                 MII_ADVERTISE, val);
2519     }
2520     spin_unlock_bh(&jme->phy_lock);
2521 
2522     return 0;
2523 }
2524 
2525 static void
2526 jme_get_wol(struct net_device *netdev,
2527         struct ethtool_wolinfo *wol)
2528 {
2529     struct jme_adapter *jme = netdev_priv(netdev);
2530 
2531     wol->supported = WAKE_MAGIC | WAKE_PHY;
2532 
2533     wol->wolopts = 0;
2534 
2535     if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2536         wol->wolopts |= WAKE_PHY;
2537 
2538     if (jme->reg_pmcs & PMCS_MFEN)
2539         wol->wolopts |= WAKE_MAGIC;
2540 
2541 }
2542 
2543 static int
2544 jme_set_wol(struct net_device *netdev,
2545         struct ethtool_wolinfo *wol)
2546 {
2547     struct jme_adapter *jme = netdev_priv(netdev);
2548 
2549     if (wol->wolopts & (WAKE_MAGICSECURE |
2550                 WAKE_UCAST |
2551                 WAKE_MCAST |
2552                 WAKE_BCAST |
2553                 WAKE_ARP))
2554         return -EOPNOTSUPP;
2555 
2556     jme->reg_pmcs = 0;
2557 
2558     if (wol->wolopts & WAKE_PHY)
2559         jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2560 
2561     if (wol->wolopts & WAKE_MAGIC)
2562         jme->reg_pmcs |= PMCS_MFEN;
2563 
2564     return 0;
2565 }
2566 
2567 static int
2568 jme_get_link_ksettings(struct net_device *netdev,
2569                struct ethtool_link_ksettings *cmd)
2570 {
2571     struct jme_adapter *jme = netdev_priv(netdev);
2572 
2573     spin_lock_bh(&jme->phy_lock);
2574     mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
2575     spin_unlock_bh(&jme->phy_lock);
2576     return 0;
2577 }
2578 
2579 static int
2580 jme_set_link_ksettings(struct net_device *netdev,
2581                const struct ethtool_link_ksettings *cmd)
2582 {
2583     struct jme_adapter *jme = netdev_priv(netdev);
2584     int rc, fdc = 0;
2585 
2586     if (cmd->base.speed == SPEED_1000 &&
2587         cmd->base.autoneg != AUTONEG_ENABLE)
2588         return -EINVAL;
2589 
2590     /*
2591      * Check If user changed duplex only while force_media.
2592      * Hardware would not generate link change interrupt.
2593      */
2594     if (jme->mii_if.force_media &&
2595         cmd->base.autoneg != AUTONEG_ENABLE &&
2596         (jme->mii_if.full_duplex != cmd->base.duplex))
2597         fdc = 1;
2598 
2599     spin_lock_bh(&jme->phy_lock);
2600     rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd);
2601     spin_unlock_bh(&jme->phy_lock);
2602 
2603     if (!rc) {
2604         if (fdc)
2605             jme_reset_link(jme);
2606         jme->old_cmd = *cmd;
2607         set_bit(JME_FLAG_SSET, &jme->flags);
2608     }
2609 
2610     return rc;
2611 }
2612 
2613 static int
2614 jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2615 {
2616     int rc;
2617     struct jme_adapter *jme = netdev_priv(netdev);
2618     struct mii_ioctl_data *mii_data = if_mii(rq);
2619     unsigned int duplex_chg;
2620 
2621     if (cmd == SIOCSMIIREG) {
2622         u16 val = mii_data->val_in;
2623         if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
2624             (val & BMCR_SPEED1000))
2625             return -EINVAL;
2626     }
2627 
2628     spin_lock_bh(&jme->phy_lock);
2629     rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
2630     spin_unlock_bh(&jme->phy_lock);
2631 
2632     if (!rc && (cmd == SIOCSMIIREG)) {
2633         if (duplex_chg)
2634             jme_reset_link(jme);
2635         jme_get_link_ksettings(netdev, &jme->old_cmd);
2636         set_bit(JME_FLAG_SSET, &jme->flags);
2637     }
2638 
2639     return rc;
2640 }
2641 
2642 static u32
2643 jme_get_link(struct net_device *netdev)
2644 {
2645     struct jme_adapter *jme = netdev_priv(netdev);
2646     return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2647 }
2648 
2649 static u32
2650 jme_get_msglevel(struct net_device *netdev)
2651 {
2652     struct jme_adapter *jme = netdev_priv(netdev);
2653     return jme->msg_enable;
2654 }
2655 
2656 static void
2657 jme_set_msglevel(struct net_device *netdev, u32 value)
2658 {
2659     struct jme_adapter *jme = netdev_priv(netdev);
2660     jme->msg_enable = value;
2661 }
2662 
2663 static netdev_features_t
2664 jme_fix_features(struct net_device *netdev, netdev_features_t features)
2665 {
2666     if (netdev->mtu > 1900)
2667         features &= ~(NETIF_F_ALL_TSO | NETIF_F_CSUM_MASK);
2668     return features;
2669 }
2670 
2671 static int
2672 jme_set_features(struct net_device *netdev, netdev_features_t features)
2673 {
2674     struct jme_adapter *jme = netdev_priv(netdev);
2675 
2676     spin_lock_bh(&jme->rxmcs_lock);
2677     if (features & NETIF_F_RXCSUM)
2678         jme->reg_rxmcs |= RXMCS_CHECKSUM;
2679     else
2680         jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2681     jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2682     spin_unlock_bh(&jme->rxmcs_lock);
2683 
2684     return 0;
2685 }
2686 
2687 #ifdef CONFIG_NET_POLL_CONTROLLER
2688 static void jme_netpoll(struct net_device *dev)
2689 {
2690     unsigned long flags;
2691 
2692     local_irq_save(flags);
2693     jme_intr(dev->irq, dev);
2694     local_irq_restore(flags);
2695 }
2696 #endif
2697 
2698 static int
2699 jme_nway_reset(struct net_device *netdev)
2700 {
2701     struct jme_adapter *jme = netdev_priv(netdev);
2702     jme_restart_an(jme);
2703     return 0;
2704 }
2705 
2706 static u8
2707 jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2708 {
2709     u32 val;
2710     int to;
2711 
2712     val = jread32(jme, JME_SMBCSR);
2713     to = JME_SMB_BUSY_TIMEOUT;
2714     while ((val & SMBCSR_BUSY) && --to) {
2715         msleep(1);
2716         val = jread32(jme, JME_SMBCSR);
2717     }
2718     if (!to) {
2719         netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2720         return 0xFF;
2721     }
2722 
2723     jwrite32(jme, JME_SMBINTF,
2724         ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2725         SMBINTF_HWRWN_READ |
2726         SMBINTF_HWCMD);
2727 
2728     val = jread32(jme, JME_SMBINTF);
2729     to = JME_SMB_BUSY_TIMEOUT;
2730     while ((val & SMBINTF_HWCMD) && --to) {
2731         msleep(1);
2732         val = jread32(jme, JME_SMBINTF);
2733     }
2734     if (!to) {
2735         netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2736         return 0xFF;
2737     }
2738 
2739     return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2740 }
2741 
2742 static void
2743 jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2744 {
2745     u32 val;
2746     int to;
2747 
2748     val = jread32(jme, JME_SMBCSR);
2749     to = JME_SMB_BUSY_TIMEOUT;
2750     while ((val & SMBCSR_BUSY) && --to) {
2751         msleep(1);
2752         val = jread32(jme, JME_SMBCSR);
2753     }
2754     if (!to) {
2755         netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2756         return;
2757     }
2758 
2759     jwrite32(jme, JME_SMBINTF,
2760         ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2761         ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2762         SMBINTF_HWRWN_WRITE |
2763         SMBINTF_HWCMD);
2764 
2765     val = jread32(jme, JME_SMBINTF);
2766     to = JME_SMB_BUSY_TIMEOUT;
2767     while ((val & SMBINTF_HWCMD) && --to) {
2768         msleep(1);
2769         val = jread32(jme, JME_SMBINTF);
2770     }
2771     if (!to) {
2772         netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2773         return;
2774     }
2775 
2776     mdelay(2);
2777 }
2778 
2779 static int
2780 jme_get_eeprom_len(struct net_device *netdev)
2781 {
2782     struct jme_adapter *jme = netdev_priv(netdev);
2783     u32 val;
2784     val = jread32(jme, JME_SMBCSR);
2785     return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2786 }
2787 
2788 static int
2789 jme_get_eeprom(struct net_device *netdev,
2790         struct ethtool_eeprom *eeprom, u8 *data)
2791 {
2792     struct jme_adapter *jme = netdev_priv(netdev);
2793     int i, offset = eeprom->offset, len = eeprom->len;
2794 
2795     /*
2796      * ethtool will check the boundary for us
2797      */
2798     eeprom->magic = JME_EEPROM_MAGIC;
2799     for (i = 0 ; i < len ; ++i)
2800         data[i] = jme_smb_read(jme, i + offset);
2801 
2802     return 0;
2803 }
2804 
2805 static int
2806 jme_set_eeprom(struct net_device *netdev,
2807         struct ethtool_eeprom *eeprom, u8 *data)
2808 {
2809     struct jme_adapter *jme = netdev_priv(netdev);
2810     int i, offset = eeprom->offset, len = eeprom->len;
2811 
2812     if (eeprom->magic != JME_EEPROM_MAGIC)
2813         return -EINVAL;
2814 
2815     /*
2816      * ethtool will check the boundary for us
2817      */
2818     for (i = 0 ; i < len ; ++i)
2819         jme_smb_write(jme, i + offset, data[i]);
2820 
2821     return 0;
2822 }
2823 
2824 static const struct ethtool_ops jme_ethtool_ops = {
2825     .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2826                      ETHTOOL_COALESCE_MAX_FRAMES |
2827                      ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2828     .get_drvinfo            = jme_get_drvinfo,
2829     .get_regs_len       = jme_get_regs_len,
2830     .get_regs       = jme_get_regs,
2831     .get_coalesce       = jme_get_coalesce,
2832     .set_coalesce       = jme_set_coalesce,
2833     .get_pauseparam     = jme_get_pauseparam,
2834     .set_pauseparam     = jme_set_pauseparam,
2835     .get_wol        = jme_get_wol,
2836     .set_wol        = jme_set_wol,
2837     .get_link       = jme_get_link,
2838     .get_msglevel           = jme_get_msglevel,
2839     .set_msglevel           = jme_set_msglevel,
2840     .nway_reset             = jme_nway_reset,
2841     .get_eeprom_len     = jme_get_eeprom_len,
2842     .get_eeprom     = jme_get_eeprom,
2843     .set_eeprom     = jme_set_eeprom,
2844     .get_link_ksettings = jme_get_link_ksettings,
2845     .set_link_ksettings = jme_set_link_ksettings,
2846 };
2847 
2848 static int
2849 jme_pci_dma64(struct pci_dev *pdev)
2850 {
2851     if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2852         !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
2853         return 1;
2854 
2855     if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2856         !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
2857         return 1;
2858 
2859     if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
2860         return 0;
2861 
2862     return -1;
2863 }
2864 
2865 static inline void
2866 jme_phy_init(struct jme_adapter *jme)
2867 {
2868     u16 reg26;
2869 
2870     reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2871     jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2872 }
2873 
2874 static inline void
2875 jme_check_hw_ver(struct jme_adapter *jme)
2876 {
2877     u32 chipmode;
2878 
2879     chipmode = jread32(jme, JME_CHIPMODE);
2880 
2881     jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2882     jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2883     jme->chip_main_rev = jme->chiprev & 0xF;
2884     jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2885 }
2886 
2887 static const struct net_device_ops jme_netdev_ops = {
2888     .ndo_open       = jme_open,
2889     .ndo_stop       = jme_close,
2890     .ndo_validate_addr  = eth_validate_addr,
2891     .ndo_eth_ioctl      = jme_ioctl,
2892     .ndo_start_xmit     = jme_start_xmit,
2893     .ndo_set_mac_address    = jme_set_macaddr,
2894     .ndo_set_rx_mode    = jme_set_multi,
2895     .ndo_change_mtu     = jme_change_mtu,
2896     .ndo_tx_timeout     = jme_tx_timeout,
2897     .ndo_fix_features       = jme_fix_features,
2898     .ndo_set_features       = jme_set_features,
2899 #ifdef CONFIG_NET_POLL_CONTROLLER
2900     .ndo_poll_controller    = jme_netpoll,
2901 #endif
2902 };
2903 
2904 static int
2905 jme_init_one(struct pci_dev *pdev,
2906          const struct pci_device_id *ent)
2907 {
2908     int rc = 0, using_dac, i;
2909     struct net_device *netdev;
2910     struct jme_adapter *jme;
2911     u16 bmcr, bmsr;
2912     u32 apmc;
2913 
2914     /*
2915      * set up PCI device basics
2916      */
2917     pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2918                    PCIE_LINK_STATE_CLKPM);
2919 
2920     rc = pci_enable_device(pdev);
2921     if (rc) {
2922         pr_err("Cannot enable PCI device\n");
2923         goto err_out;
2924     }
2925 
2926     using_dac = jme_pci_dma64(pdev);
2927     if (using_dac < 0) {
2928         pr_err("Cannot set PCI DMA Mask\n");
2929         rc = -EIO;
2930         goto err_out_disable_pdev;
2931     }
2932 
2933     if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2934         pr_err("No PCI resource region found\n");
2935         rc = -ENOMEM;
2936         goto err_out_disable_pdev;
2937     }
2938 
2939     rc = pci_request_regions(pdev, DRV_NAME);
2940     if (rc) {
2941         pr_err("Cannot obtain PCI resource region\n");
2942         goto err_out_disable_pdev;
2943     }
2944 
2945     pci_set_master(pdev);
2946 
2947     /*
2948      * alloc and init net device
2949      */
2950     netdev = alloc_etherdev(sizeof(*jme));
2951     if (!netdev) {
2952         rc = -ENOMEM;
2953         goto err_out_release_regions;
2954     }
2955     netdev->netdev_ops = &jme_netdev_ops;
2956     netdev->ethtool_ops     = &jme_ethtool_ops;
2957     netdev->watchdog_timeo      = TX_TIMEOUT;
2958     netdev->hw_features     =   NETIF_F_IP_CSUM |
2959                         NETIF_F_IPV6_CSUM |
2960                         NETIF_F_SG |
2961                         NETIF_F_TSO |
2962                         NETIF_F_TSO6 |
2963                         NETIF_F_RXCSUM;
2964     netdev->features        =   NETIF_F_IP_CSUM |
2965                         NETIF_F_IPV6_CSUM |
2966                         NETIF_F_SG |
2967                         NETIF_F_TSO |
2968                         NETIF_F_TSO6 |
2969                         NETIF_F_HW_VLAN_CTAG_TX |
2970                         NETIF_F_HW_VLAN_CTAG_RX;
2971     if (using_dac)
2972         netdev->features    |=  NETIF_F_HIGHDMA;
2973 
2974     /* MTU range: 1280 - 9202*/
2975     netdev->min_mtu = IPV6_MIN_MTU;
2976     netdev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE - ETH_HLEN;
2977 
2978     SET_NETDEV_DEV(netdev, &pdev->dev);
2979     pci_set_drvdata(pdev, netdev);
2980 
2981     /*
2982      * init adapter info
2983      */
2984     jme = netdev_priv(netdev);
2985     jme->pdev = pdev;
2986     jme->dev = netdev;
2987     jme->jme_rx = netif_rx;
2988     jme->old_mtu = netdev->mtu = 1500;
2989     jme->phylink = 0;
2990     jme->tx_ring_size = 1 << 10;
2991     jme->tx_ring_mask = jme->tx_ring_size - 1;
2992     jme->tx_wake_threshold = 1 << 9;
2993     jme->rx_ring_size = 1 << 9;
2994     jme->rx_ring_mask = jme->rx_ring_size - 1;
2995     jme->msg_enable = JME_DEF_MSG_ENABLE;
2996     jme->regs = ioremap(pci_resource_start(pdev, 0),
2997                  pci_resource_len(pdev, 0));
2998     if (!(jme->regs)) {
2999         pr_err("Mapping PCI resource region error\n");
3000         rc = -ENOMEM;
3001         goto err_out_free_netdev;
3002     }
3003 
3004     if (no_pseudohp) {
3005         apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
3006         jwrite32(jme, JME_APMC, apmc);
3007     } else if (force_pseudohp) {
3008         apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
3009         jwrite32(jme, JME_APMC, apmc);
3010     }
3011 
3012     netif_napi_add(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT);
3013 
3014     spin_lock_init(&jme->phy_lock);
3015     spin_lock_init(&jme->macaddr_lock);
3016     spin_lock_init(&jme->rxmcs_lock);
3017 
3018     atomic_set(&jme->link_changing, 1);
3019     atomic_set(&jme->rx_cleaning, 1);
3020     atomic_set(&jme->tx_cleaning, 1);
3021     atomic_set(&jme->rx_empty, 1);
3022 
3023     tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
3024     INIT_WORK(&jme->linkch_task, jme_link_change_work);
3025     jme->dpi.cur = PCC_P1;
3026 
3027     jme->reg_ghc = 0;
3028     jme->reg_rxcs = RXCS_DEFAULT;
3029     jme->reg_rxmcs = RXMCS_DEFAULT;
3030     jme->reg_txpfc = 0;
3031     jme->reg_pmcs = PMCS_MFEN;
3032     jme->reg_gpreg1 = GPREG1_DEFAULT;
3033 
3034     if (jme->reg_rxmcs & RXMCS_CHECKSUM)
3035         netdev->features |= NETIF_F_RXCSUM;
3036 
3037     /*
3038      * Get Max Read Req Size from PCI Config Space
3039      */
3040     pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
3041     jme->mrrs &= PCI_DCSR_MRRS_MASK;
3042     switch (jme->mrrs) {
3043     case MRRS_128B:
3044         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
3045         break;
3046     case MRRS_256B:
3047         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
3048         break;
3049     default:
3050         jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
3051         break;
3052     }
3053 
3054     /*
3055      * Must check before reset_mac_processor
3056      */
3057     jme_check_hw_ver(jme);
3058     jme->mii_if.dev = netdev;
3059     if (jme->fpgaver) {
3060         jme->mii_if.phy_id = 0;
3061         for (i = 1 ; i < 32 ; ++i) {
3062             bmcr = jme_mdio_read(netdev, i, MII_BMCR);
3063             bmsr = jme_mdio_read(netdev, i, MII_BMSR);
3064             if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
3065                 jme->mii_if.phy_id = i;
3066                 break;
3067             }
3068         }
3069 
3070         if (!jme->mii_if.phy_id) {
3071             rc = -EIO;
3072             pr_err("Can not find phy_id\n");
3073             goto err_out_unmap;
3074         }
3075 
3076         jme->reg_ghc |= GHC_LINK_POLL;
3077     } else {
3078         jme->mii_if.phy_id = 1;
3079     }
3080     if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
3081         jme->mii_if.supports_gmii = true;
3082     else
3083         jme->mii_if.supports_gmii = false;
3084     jme->mii_if.phy_id_mask = 0x1F;
3085     jme->mii_if.reg_num_mask = 0x1F;
3086     jme->mii_if.mdio_read = jme_mdio_read;
3087     jme->mii_if.mdio_write = jme_mdio_write;
3088 
3089     jme_clear_pm_disable_wol(jme);
3090     device_init_wakeup(&pdev->dev, true);
3091 
3092     jme_set_phyfifo_5level(jme);
3093     jme->pcirev = pdev->revision;
3094     if (!jme->fpgaver)
3095         jme_phy_init(jme);
3096     jme_phy_off(jme);
3097 
3098     /*
3099      * Reset MAC processor and reload EEPROM for MAC Address
3100      */
3101     jme_reset_mac_processor(jme);
3102     rc = jme_reload_eeprom(jme);
3103     if (rc) {
3104         pr_err("Reload eeprom for reading MAC Address error\n");
3105         goto err_out_unmap;
3106     }
3107     jme_load_macaddr(netdev);
3108 
3109     /*
3110      * Tell stack that we are not ready to work until open()
3111      */
3112     netif_carrier_off(netdev);
3113 
3114     rc = register_netdev(netdev);
3115     if (rc) {
3116         pr_err("Cannot register net device\n");
3117         goto err_out_unmap;
3118     }
3119 
3120     netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
3121            (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
3122            "JMC250 Gigabit Ethernet" :
3123            (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
3124            "JMC260 Fast Ethernet" : "Unknown",
3125            (jme->fpgaver != 0) ? " (FPGA)" : "",
3126            (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
3127            jme->pcirev, netdev->dev_addr);
3128 
3129     return 0;
3130 
3131 err_out_unmap:
3132     iounmap(jme->regs);
3133 err_out_free_netdev:
3134     free_netdev(netdev);
3135 err_out_release_regions:
3136     pci_release_regions(pdev);
3137 err_out_disable_pdev:
3138     pci_disable_device(pdev);
3139 err_out:
3140     return rc;
3141 }
3142 
3143 static void
3144 jme_remove_one(struct pci_dev *pdev)
3145 {
3146     struct net_device *netdev = pci_get_drvdata(pdev);
3147     struct jme_adapter *jme = netdev_priv(netdev);
3148 
3149     unregister_netdev(netdev);
3150     iounmap(jme->regs);
3151     free_netdev(netdev);
3152     pci_release_regions(pdev);
3153     pci_disable_device(pdev);
3154 
3155 }
3156 
3157 static void
3158 jme_shutdown(struct pci_dev *pdev)
3159 {
3160     struct net_device *netdev = pci_get_drvdata(pdev);
3161     struct jme_adapter *jme = netdev_priv(netdev);
3162 
3163     jme_powersave_phy(jme);
3164     pci_pme_active(pdev, true);
3165 }
3166 
3167 #ifdef CONFIG_PM_SLEEP
3168 static int
3169 jme_suspend(struct device *dev)
3170 {
3171     struct net_device *netdev = dev_get_drvdata(dev);
3172     struct jme_adapter *jme = netdev_priv(netdev);
3173 
3174     if (!netif_running(netdev))
3175         return 0;
3176 
3177     atomic_dec(&jme->link_changing);
3178 
3179     netif_device_detach(netdev);
3180     netif_stop_queue(netdev);
3181     jme_stop_irq(jme);
3182 
3183     tasklet_disable(&jme->txclean_task);
3184     tasklet_disable(&jme->rxclean_task);
3185     tasklet_disable(&jme->rxempty_task);
3186 
3187     if (netif_carrier_ok(netdev)) {
3188         if (test_bit(JME_FLAG_POLL, &jme->flags))
3189             jme_polling_mode(jme);
3190 
3191         jme_stop_pcc_timer(jme);
3192         jme_disable_rx_engine(jme);
3193         jme_disable_tx_engine(jme);
3194         jme_reset_mac_processor(jme);
3195         jme_free_rx_resources(jme);
3196         jme_free_tx_resources(jme);
3197         netif_carrier_off(netdev);
3198         jme->phylink = 0;
3199     }
3200 
3201     tasklet_enable(&jme->txclean_task);
3202     tasklet_enable(&jme->rxclean_task);
3203     tasklet_enable(&jme->rxempty_task);
3204 
3205     jme_powersave_phy(jme);
3206 
3207     return 0;
3208 }
3209 
3210 static int
3211 jme_resume(struct device *dev)
3212 {
3213     struct net_device *netdev = dev_get_drvdata(dev);
3214     struct jme_adapter *jme = netdev_priv(netdev);
3215 
3216     if (!netif_running(netdev))
3217         return 0;
3218 
3219     jme_clear_pm_disable_wol(jme);
3220     jme_phy_on(jme);
3221     if (test_bit(JME_FLAG_SSET, &jme->flags))
3222         jme_set_link_ksettings(netdev, &jme->old_cmd);
3223     else
3224         jme_reset_phy_processor(jme);
3225     jme_phy_calibration(jme);
3226     jme_phy_setEA(jme);
3227     netif_device_attach(netdev);
3228 
3229     atomic_inc(&jme->link_changing);
3230 
3231     jme_reset_link(jme);
3232 
3233     jme_start_irq(jme);
3234 
3235     return 0;
3236 }
3237 
3238 static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
3239 #define JME_PM_OPS (&jme_pm_ops)
3240 
3241 #else
3242 
3243 #define JME_PM_OPS NULL
3244 #endif
3245 
3246 static const struct pci_device_id jme_pci_tbl[] = {
3247     { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
3248     { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3249     { }
3250 };
3251 
3252 static struct pci_driver jme_driver = {
3253     .name           = DRV_NAME,
3254     .id_table       = jme_pci_tbl,
3255     .probe          = jme_init_one,
3256     .remove         = jme_remove_one,
3257     .shutdown       = jme_shutdown,
3258     .driver.pm  = JME_PM_OPS,
3259 };
3260 
3261 static int __init
3262 jme_init_module(void)
3263 {
3264     pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
3265     return pci_register_driver(&jme_driver);
3266 }
3267 
3268 static void __exit
3269 jme_cleanup_module(void)
3270 {
3271     pci_unregister_driver(&jme_driver);
3272 }
3273 
3274 module_init(jme_init_module);
3275 module_exit(jme_cleanup_module);
3276 
3277 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3278 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3279 MODULE_LICENSE("GPL");
3280 MODULE_VERSION(DRV_VERSION);
3281 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);