Back to home page

OSCL-LXR

 
 

    


0001 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
0002  *
0003  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
0004  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
0005  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
0006  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
0007  * Copyright (C) 2006 Broadcom Corporation.
0008  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
0009  * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
0010  *
0011  * Distribute under GPL.
0012  */
0013 
0014 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0015 
0016 #include <linux/kernel.h>
0017 #include <linux/module.h>
0018 #include <linux/moduleparam.h>
0019 #include <linux/types.h>
0020 #include <linux/netdevice.h>
0021 #include <linux/ethtool.h>
0022 #include <linux/mii.h>
0023 #include <linux/if_ether.h>
0024 #include <linux/if_vlan.h>
0025 #include <linux/etherdevice.h>
0026 #include <linux/pci.h>
0027 #include <linux/delay.h>
0028 #include <linux/init.h>
0029 #include <linux/interrupt.h>
0030 #include <linux/dma-mapping.h>
0031 #include <linux/ssb/ssb.h>
0032 #include <linux/slab.h>
0033 #include <linux/phy.h>
0034 
0035 #include <linux/uaccess.h>
0036 #include <asm/io.h>
0037 #include <asm/irq.h>
0038 
0039 
0040 #include "b44.h"
0041 
0042 #define DRV_MODULE_NAME     "b44"
0043 #define DRV_DESCRIPTION     "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
0044 
0045 #define B44_DEF_MSG_ENABLE    \
0046     (NETIF_MSG_DRV      | \
0047      NETIF_MSG_PROBE    | \
0048      NETIF_MSG_LINK     | \
0049      NETIF_MSG_TIMER    | \
0050      NETIF_MSG_IFDOWN   | \
0051      NETIF_MSG_IFUP     | \
0052      NETIF_MSG_RX_ERR   | \
0053      NETIF_MSG_TX_ERR)
0054 
0055 /* length of time before we decide the hardware is borked,
0056  * and dev->tx_timeout() should be called to fix the problem
0057  */
0058 #define B44_TX_TIMEOUT          (5 * HZ)
0059 
0060 /* hardware minimum and maximum for a single frame's data payload */
0061 #define B44_MIN_MTU         ETH_ZLEN
0062 #define B44_MAX_MTU         ETH_DATA_LEN
0063 
0064 #define B44_RX_RING_SIZE        512
0065 #define B44_DEF_RX_RING_PENDING     200
0066 #define B44_RX_RING_BYTES   (sizeof(struct dma_desc) * \
0067                  B44_RX_RING_SIZE)
0068 #define B44_TX_RING_SIZE        512
0069 #define B44_DEF_TX_RING_PENDING     (B44_TX_RING_SIZE - 1)
0070 #define B44_TX_RING_BYTES   (sizeof(struct dma_desc) * \
0071                  B44_TX_RING_SIZE)
0072 
0073 #define TX_RING_GAP(BP) \
0074     (B44_TX_RING_SIZE - (BP)->tx_pending)
0075 #define TX_BUFFS_AVAIL(BP)                      \
0076     (((BP)->tx_cons <= (BP)->tx_prod) ?             \
0077       (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :        \
0078       (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
0079 #define NEXT_TX(N)      (((N) + 1) & (B44_TX_RING_SIZE - 1))
0080 
0081 #define RX_PKT_OFFSET       (RX_HEADER_LEN + 2)
0082 #define RX_PKT_BUF_SZ       (1536 + RX_PKT_OFFSET)
0083 
0084 /* minimum number of free TX descriptors required to wake up TX process */
0085 #define B44_TX_WAKEUP_THRESH        (B44_TX_RING_SIZE / 4)
0086 
0087 /* b44 internal pattern match filter info */
0088 #define B44_PATTERN_BASE    0x400
0089 #define B44_PATTERN_SIZE    0x80
0090 #define B44_PMASK_BASE      0x600
0091 #define B44_PMASK_SIZE      0x10
0092 #define B44_MAX_PATTERNS    16
0093 #define B44_ETHIPV6UDP_HLEN 62
0094 #define B44_ETHIPV4UDP_HLEN 42
0095 
0096 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
0097 MODULE_DESCRIPTION(DRV_DESCRIPTION);
0098 MODULE_LICENSE("GPL");
0099 
0100 static int b44_debug = -1;  /* -1 == use B44_DEF_MSG_ENABLE as value */
0101 module_param(b44_debug, int, 0);
0102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
0103 
0104 
0105 #ifdef CONFIG_B44_PCI
0106 static const struct pci_device_id b44_pci_tbl[] = {
0107     { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
0108     { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
0109     { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
0110     { 0 } /* terminate list with empty entry */
0111 };
0112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
0113 
0114 static struct pci_driver b44_pci_driver = {
0115     .name       = DRV_MODULE_NAME,
0116     .id_table   = b44_pci_tbl,
0117 };
0118 #endif /* CONFIG_B44_PCI */
0119 
0120 static const struct ssb_device_id b44_ssb_tbl[] = {
0121     SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
0122     {},
0123 };
0124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
0125 
0126 static void b44_halt(struct b44 *);
0127 static void b44_init_rings(struct b44 *);
0128 
0129 #define B44_FULL_RESET      1
0130 #define B44_FULL_RESET_SKIP_PHY 2
0131 #define B44_PARTIAL_RESET   3
0132 #define B44_CHIP_RESET_FULL 4
0133 #define B44_CHIP_RESET_PARTIAL  5
0134 
0135 static void b44_init_hw(struct b44 *, int);
0136 
0137 static int dma_desc_sync_size;
0138 static int instance;
0139 
0140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
0141 #define _B44(x...)  # x,
0142 B44_STAT_REG_DECLARE
0143 #undef _B44
0144 };
0145 
0146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
0147                         dma_addr_t dma_base,
0148                         unsigned long offset,
0149                         enum dma_data_direction dir)
0150 {
0151     dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
0152                    dma_desc_sync_size, dir);
0153 }
0154 
0155 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
0156                          dma_addr_t dma_base,
0157                          unsigned long offset,
0158                          enum dma_data_direction dir)
0159 {
0160     dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
0161                 dma_desc_sync_size, dir);
0162 }
0163 
0164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
0165 {
0166     return ssb_read32(bp->sdev, reg);
0167 }
0168 
0169 static inline void bw32(const struct b44 *bp,
0170             unsigned long reg, unsigned long val)
0171 {
0172     ssb_write32(bp->sdev, reg, val);
0173 }
0174 
0175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
0176             u32 bit, unsigned long timeout, const int clear)
0177 {
0178     unsigned long i;
0179 
0180     for (i = 0; i < timeout; i++) {
0181         u32 val = br32(bp, reg);
0182 
0183         if (clear && !(val & bit))
0184             break;
0185         if (!clear && (val & bit))
0186             break;
0187         udelay(10);
0188     }
0189     if (i == timeout) {
0190         if (net_ratelimit())
0191             netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
0192                    bit, reg, clear ? "clear" : "set");
0193 
0194         return -ENODEV;
0195     }
0196     return 0;
0197 }
0198 
0199 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
0200 {
0201     u32 val;
0202 
0203     bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
0204                 (index << CAM_CTRL_INDEX_SHIFT)));
0205 
0206     b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
0207 
0208     val = br32(bp, B44_CAM_DATA_LO);
0209 
0210     data[2] = (val >> 24) & 0xFF;
0211     data[3] = (val >> 16) & 0xFF;
0212     data[4] = (val >> 8) & 0xFF;
0213     data[5] = (val >> 0) & 0xFF;
0214 
0215     val = br32(bp, B44_CAM_DATA_HI);
0216 
0217     data[0] = (val >> 8) & 0xFF;
0218     data[1] = (val >> 0) & 0xFF;
0219 }
0220 
0221 static inline void __b44_cam_write(struct b44 *bp,
0222                    const unsigned char *data, int index)
0223 {
0224     u32 val;
0225 
0226     val  = ((u32) data[2]) << 24;
0227     val |= ((u32) data[3]) << 16;
0228     val |= ((u32) data[4]) <<  8;
0229     val |= ((u32) data[5]) <<  0;
0230     bw32(bp, B44_CAM_DATA_LO, val);
0231     val = (CAM_DATA_HI_VALID |
0232            (((u32) data[0]) << 8) |
0233            (((u32) data[1]) << 0));
0234     bw32(bp, B44_CAM_DATA_HI, val);
0235     bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
0236                 (index << CAM_CTRL_INDEX_SHIFT)));
0237     b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
0238 }
0239 
0240 static inline void __b44_disable_ints(struct b44 *bp)
0241 {
0242     bw32(bp, B44_IMASK, 0);
0243 }
0244 
0245 static void b44_disable_ints(struct b44 *bp)
0246 {
0247     __b44_disable_ints(bp);
0248 
0249     /* Flush posted writes. */
0250     br32(bp, B44_IMASK);
0251 }
0252 
0253 static void b44_enable_ints(struct b44 *bp)
0254 {
0255     bw32(bp, B44_IMASK, bp->imask);
0256 }
0257 
0258 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
0259 {
0260     int err;
0261 
0262     bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
0263     bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
0264                  (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
0265                  (phy_addr << MDIO_DATA_PMD_SHIFT) |
0266                  (reg << MDIO_DATA_RA_SHIFT) |
0267                  (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
0268     err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
0269     *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
0270 
0271     return err;
0272 }
0273 
0274 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
0275 {
0276     bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
0277     bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
0278                  (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
0279                  (phy_addr << MDIO_DATA_PMD_SHIFT) |
0280                  (reg << MDIO_DATA_RA_SHIFT) |
0281                  (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
0282                  (val & MDIO_DATA_DATA)));
0283     return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
0284 }
0285 
0286 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
0287 {
0288     if (bp->flags & B44_FLAG_EXTERNAL_PHY)
0289         return 0;
0290 
0291     return __b44_readphy(bp, bp->phy_addr, reg, val);
0292 }
0293 
0294 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
0295 {
0296     if (bp->flags & B44_FLAG_EXTERNAL_PHY)
0297         return 0;
0298 
0299     return __b44_writephy(bp, bp->phy_addr, reg, val);
0300 }
0301 
0302 /* miilib interface */
0303 static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
0304 {
0305     u32 val;
0306     struct b44 *bp = netdev_priv(dev);
0307     int rc = __b44_readphy(bp, phy_id, location, &val);
0308     if (rc)
0309         return 0xffffffff;
0310     return val;
0311 }
0312 
0313 static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
0314                    int val)
0315 {
0316     struct b44 *bp = netdev_priv(dev);
0317     __b44_writephy(bp, phy_id, location, val);
0318 }
0319 
0320 static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
0321 {
0322     u32 val;
0323     struct b44 *bp = bus->priv;
0324     int rc = __b44_readphy(bp, phy_id, location, &val);
0325     if (rc)
0326         return 0xffffffff;
0327     return val;
0328 }
0329 
0330 static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
0331                  u16 val)
0332 {
0333     struct b44 *bp = bus->priv;
0334     return __b44_writephy(bp, phy_id, location, val);
0335 }
0336 
0337 static int b44_phy_reset(struct b44 *bp)
0338 {
0339     u32 val;
0340     int err;
0341 
0342     if (bp->flags & B44_FLAG_EXTERNAL_PHY)
0343         return 0;
0344     err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
0345     if (err)
0346         return err;
0347     udelay(100);
0348     err = b44_readphy(bp, MII_BMCR, &val);
0349     if (!err) {
0350         if (val & BMCR_RESET) {
0351             netdev_err(bp->dev, "PHY Reset would not complete\n");
0352             err = -ENODEV;
0353         }
0354     }
0355 
0356     return err;
0357 }
0358 
0359 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
0360 {
0361     u32 val;
0362 
0363     bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
0364     bp->flags |= pause_flags;
0365 
0366     val = br32(bp, B44_RXCONFIG);
0367     if (pause_flags & B44_FLAG_RX_PAUSE)
0368         val |= RXCONFIG_FLOW;
0369     else
0370         val &= ~RXCONFIG_FLOW;
0371     bw32(bp, B44_RXCONFIG, val);
0372 
0373     val = br32(bp, B44_MAC_FLOW);
0374     if (pause_flags & B44_FLAG_TX_PAUSE)
0375         val |= (MAC_FLOW_PAUSE_ENAB |
0376             (0xc0 & MAC_FLOW_RX_HI_WATER));
0377     else
0378         val &= ~MAC_FLOW_PAUSE_ENAB;
0379     bw32(bp, B44_MAC_FLOW, val);
0380 }
0381 
0382 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
0383 {
0384     u32 pause_enab = 0;
0385 
0386     /* The driver supports only rx pause by default because
0387        the b44 mac tx pause mechanism generates excessive
0388        pause frames.
0389        Use ethtool to turn on b44 tx pause if necessary.
0390      */
0391     if ((local & ADVERTISE_PAUSE_CAP) &&
0392         (local & ADVERTISE_PAUSE_ASYM)){
0393         if ((remote & LPA_PAUSE_ASYM) &&
0394             !(remote & LPA_PAUSE_CAP))
0395             pause_enab |= B44_FLAG_RX_PAUSE;
0396     }
0397 
0398     __b44_set_flow_ctrl(bp, pause_enab);
0399 }
0400 
0401 #ifdef CONFIG_BCM47XX
0402 #include <linux/bcm47xx_nvram.h>
0403 static void b44_wap54g10_workaround(struct b44 *bp)
0404 {
0405     char buf[20];
0406     u32 val;
0407     int err;
0408 
0409     /*
0410      * workaround for bad hardware design in Linksys WAP54G v1.0
0411      * see https://dev.openwrt.org/ticket/146
0412      * check and reset bit "isolate"
0413      */
0414     if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
0415         return;
0416     if (simple_strtoul(buf, NULL, 0) == 2) {
0417         err = __b44_readphy(bp, 0, MII_BMCR, &val);
0418         if (err)
0419             goto error;
0420         if (!(val & BMCR_ISOLATE))
0421             return;
0422         val &= ~BMCR_ISOLATE;
0423         err = __b44_writephy(bp, 0, MII_BMCR, val);
0424         if (err)
0425             goto error;
0426     }
0427     return;
0428 error:
0429     pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
0430 }
0431 #else
0432 static inline void b44_wap54g10_workaround(struct b44 *bp)
0433 {
0434 }
0435 #endif
0436 
0437 static int b44_setup_phy(struct b44 *bp)
0438 {
0439     u32 val;
0440     int err;
0441 
0442     b44_wap54g10_workaround(bp);
0443 
0444     if (bp->flags & B44_FLAG_EXTERNAL_PHY)
0445         return 0;
0446     if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
0447         goto out;
0448     if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
0449                 val & MII_ALEDCTRL_ALLMSK)) != 0)
0450         goto out;
0451     if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
0452         goto out;
0453     if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
0454                 val | MII_TLEDCTRL_ENABLE)) != 0)
0455         goto out;
0456 
0457     if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
0458         u32 adv = ADVERTISE_CSMA;
0459 
0460         if (bp->flags & B44_FLAG_ADV_10HALF)
0461             adv |= ADVERTISE_10HALF;
0462         if (bp->flags & B44_FLAG_ADV_10FULL)
0463             adv |= ADVERTISE_10FULL;
0464         if (bp->flags & B44_FLAG_ADV_100HALF)
0465             adv |= ADVERTISE_100HALF;
0466         if (bp->flags & B44_FLAG_ADV_100FULL)
0467             adv |= ADVERTISE_100FULL;
0468 
0469         if (bp->flags & B44_FLAG_PAUSE_AUTO)
0470             adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
0471 
0472         if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
0473             goto out;
0474         if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
0475                                BMCR_ANRESTART))) != 0)
0476             goto out;
0477     } else {
0478         u32 bmcr;
0479 
0480         if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
0481             goto out;
0482         bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
0483         if (bp->flags & B44_FLAG_100_BASE_T)
0484             bmcr |= BMCR_SPEED100;
0485         if (bp->flags & B44_FLAG_FULL_DUPLEX)
0486             bmcr |= BMCR_FULLDPLX;
0487         if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
0488             goto out;
0489 
0490         /* Since we will not be negotiating there is no safe way
0491          * to determine if the link partner supports flow control
0492          * or not.  So just disable it completely in this case.
0493          */
0494         b44_set_flow_ctrl(bp, 0, 0);
0495     }
0496 
0497 out:
0498     return err;
0499 }
0500 
0501 static void b44_stats_update(struct b44 *bp)
0502 {
0503     unsigned long reg;
0504     u64 *val;
0505 
0506     val = &bp->hw_stats.tx_good_octets;
0507     u64_stats_update_begin(&bp->hw_stats.syncp);
0508 
0509     for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
0510         *val++ += br32(bp, reg);
0511     }
0512 
0513     for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
0514         *val++ += br32(bp, reg);
0515     }
0516 
0517     u64_stats_update_end(&bp->hw_stats.syncp);
0518 }
0519 
0520 static void b44_link_report(struct b44 *bp)
0521 {
0522     if (!netif_carrier_ok(bp->dev)) {
0523         netdev_info(bp->dev, "Link is down\n");
0524     } else {
0525         netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
0526                 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
0527                 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
0528 
0529         netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
0530                 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
0531                 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
0532     }
0533 }
0534 
0535 static void b44_check_phy(struct b44 *bp)
0536 {
0537     u32 bmsr, aux;
0538 
0539     if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
0540         bp->flags |= B44_FLAG_100_BASE_T;
0541         if (!netif_carrier_ok(bp->dev)) {
0542             u32 val = br32(bp, B44_TX_CTRL);
0543             if (bp->flags & B44_FLAG_FULL_DUPLEX)
0544                 val |= TX_CTRL_DUPLEX;
0545             else
0546                 val &= ~TX_CTRL_DUPLEX;
0547             bw32(bp, B44_TX_CTRL, val);
0548             netif_carrier_on(bp->dev);
0549             b44_link_report(bp);
0550         }
0551         return;
0552     }
0553 
0554     if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
0555         !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
0556         (bmsr != 0xffff)) {
0557         if (aux & MII_AUXCTRL_SPEED)
0558             bp->flags |= B44_FLAG_100_BASE_T;
0559         else
0560             bp->flags &= ~B44_FLAG_100_BASE_T;
0561         if (aux & MII_AUXCTRL_DUPLEX)
0562             bp->flags |= B44_FLAG_FULL_DUPLEX;
0563         else
0564             bp->flags &= ~B44_FLAG_FULL_DUPLEX;
0565 
0566         if (!netif_carrier_ok(bp->dev) &&
0567             (bmsr & BMSR_LSTATUS)) {
0568             u32 val = br32(bp, B44_TX_CTRL);
0569             u32 local_adv, remote_adv;
0570 
0571             if (bp->flags & B44_FLAG_FULL_DUPLEX)
0572                 val |= TX_CTRL_DUPLEX;
0573             else
0574                 val &= ~TX_CTRL_DUPLEX;
0575             bw32(bp, B44_TX_CTRL, val);
0576 
0577             if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
0578                 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
0579                 !b44_readphy(bp, MII_LPA, &remote_adv))
0580                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
0581 
0582             /* Link now up */
0583             netif_carrier_on(bp->dev);
0584             b44_link_report(bp);
0585         } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
0586             /* Link now down */
0587             netif_carrier_off(bp->dev);
0588             b44_link_report(bp);
0589         }
0590 
0591         if (bmsr & BMSR_RFAULT)
0592             netdev_warn(bp->dev, "Remote fault detected in PHY\n");
0593         if (bmsr & BMSR_JCD)
0594             netdev_warn(bp->dev, "Jabber detected in PHY\n");
0595     }
0596 }
0597 
0598 static void b44_timer(struct timer_list *t)
0599 {
0600     struct b44 *bp = from_timer(bp, t, timer);
0601 
0602     spin_lock_irq(&bp->lock);
0603 
0604     b44_check_phy(bp);
0605 
0606     b44_stats_update(bp);
0607 
0608     spin_unlock_irq(&bp->lock);
0609 
0610     mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
0611 }
0612 
0613 static void b44_tx(struct b44 *bp)
0614 {
0615     u32 cur, cons;
0616     unsigned bytes_compl = 0, pkts_compl = 0;
0617 
0618     cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
0619     cur /= sizeof(struct dma_desc);
0620 
0621     /* XXX needs updating when NETIF_F_SG is supported */
0622     for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
0623         struct ring_info *rp = &bp->tx_buffers[cons];
0624         struct sk_buff *skb = rp->skb;
0625 
0626         BUG_ON(skb == NULL);
0627 
0628         dma_unmap_single(bp->sdev->dma_dev,
0629                  rp->mapping,
0630                  skb->len,
0631                  DMA_TO_DEVICE);
0632         rp->skb = NULL;
0633 
0634         bytes_compl += skb->len;
0635         pkts_compl++;
0636 
0637         dev_consume_skb_irq(skb);
0638     }
0639 
0640     netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
0641     bp->tx_cons = cons;
0642     if (netif_queue_stopped(bp->dev) &&
0643         TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
0644         netif_wake_queue(bp->dev);
0645 
0646     bw32(bp, B44_GPTIMER, 0);
0647 }
0648 
0649 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
0650  * before the DMA address you give it.  So we allocate 30 more bytes
0651  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
0652  * point the chip at 30 bytes past where the rx_header will go.
0653  */
0654 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
0655 {
0656     struct dma_desc *dp;
0657     struct ring_info *src_map, *map;
0658     struct rx_header *rh;
0659     struct sk_buff *skb;
0660     dma_addr_t mapping;
0661     int dest_idx;
0662     u32 ctrl;
0663 
0664     src_map = NULL;
0665     if (src_idx >= 0)
0666         src_map = &bp->rx_buffers[src_idx];
0667     dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
0668     map = &bp->rx_buffers[dest_idx];
0669     skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
0670     if (skb == NULL)
0671         return -ENOMEM;
0672 
0673     mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
0674                  RX_PKT_BUF_SZ,
0675                  DMA_FROM_DEVICE);
0676 
0677     /* Hardware bug work-around, the chip is unable to do PCI DMA
0678        to/from anything above 1GB :-( */
0679     if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
0680         mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
0681         /* Sigh... */
0682         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
0683             dma_unmap_single(bp->sdev->dma_dev, mapping,
0684                          RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
0685         dev_kfree_skb_any(skb);
0686         skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
0687         if (skb == NULL)
0688             return -ENOMEM;
0689         mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
0690                      RX_PKT_BUF_SZ,
0691                      DMA_FROM_DEVICE);
0692         if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
0693             mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
0694             if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
0695                 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
0696             dev_kfree_skb_any(skb);
0697             return -ENOMEM;
0698         }
0699         bp->force_copybreak = 1;
0700     }
0701 
0702     rh = (struct rx_header *) skb->data;
0703 
0704     rh->len = 0;
0705     rh->flags = 0;
0706 
0707     map->skb = skb;
0708     map->mapping = mapping;
0709 
0710     if (src_map != NULL)
0711         src_map->skb = NULL;
0712 
0713     ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
0714     if (dest_idx == (B44_RX_RING_SIZE - 1))
0715         ctrl |= DESC_CTRL_EOT;
0716 
0717     dp = &bp->rx_ring[dest_idx];
0718     dp->ctrl = cpu_to_le32(ctrl);
0719     dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
0720 
0721     if (bp->flags & B44_FLAG_RX_RING_HACK)
0722         b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
0723                                 dest_idx * sizeof(*dp),
0724                                 DMA_BIDIRECTIONAL);
0725 
0726     return RX_PKT_BUF_SZ;
0727 }
0728 
0729 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
0730 {
0731     struct dma_desc *src_desc, *dest_desc;
0732     struct ring_info *src_map, *dest_map;
0733     struct rx_header *rh;
0734     int dest_idx;
0735     __le32 ctrl;
0736 
0737     dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
0738     dest_desc = &bp->rx_ring[dest_idx];
0739     dest_map = &bp->rx_buffers[dest_idx];
0740     src_desc = &bp->rx_ring[src_idx];
0741     src_map = &bp->rx_buffers[src_idx];
0742 
0743     dest_map->skb = src_map->skb;
0744     rh = (struct rx_header *) src_map->skb->data;
0745     rh->len = 0;
0746     rh->flags = 0;
0747     dest_map->mapping = src_map->mapping;
0748 
0749     if (bp->flags & B44_FLAG_RX_RING_HACK)
0750         b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
0751                              src_idx * sizeof(*src_desc),
0752                              DMA_BIDIRECTIONAL);
0753 
0754     ctrl = src_desc->ctrl;
0755     if (dest_idx == (B44_RX_RING_SIZE - 1))
0756         ctrl |= cpu_to_le32(DESC_CTRL_EOT);
0757     else
0758         ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
0759 
0760     dest_desc->ctrl = ctrl;
0761     dest_desc->addr = src_desc->addr;
0762 
0763     src_map->skb = NULL;
0764 
0765     if (bp->flags & B44_FLAG_RX_RING_HACK)
0766         b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
0767                          dest_idx * sizeof(*dest_desc),
0768                          DMA_BIDIRECTIONAL);
0769 
0770     dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
0771                    RX_PKT_BUF_SZ,
0772                    DMA_FROM_DEVICE);
0773 }
0774 
0775 static int b44_rx(struct b44 *bp, int budget)
0776 {
0777     int received;
0778     u32 cons, prod;
0779 
0780     received = 0;
0781     prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
0782     prod /= sizeof(struct dma_desc);
0783     cons = bp->rx_cons;
0784 
0785     while (cons != prod && budget > 0) {
0786         struct ring_info *rp = &bp->rx_buffers[cons];
0787         struct sk_buff *skb = rp->skb;
0788         dma_addr_t map = rp->mapping;
0789         struct rx_header *rh;
0790         u16 len;
0791 
0792         dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
0793                     RX_PKT_BUF_SZ,
0794                     DMA_FROM_DEVICE);
0795         rh = (struct rx_header *) skb->data;
0796         len = le16_to_cpu(rh->len);
0797         if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
0798             (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
0799         drop_it:
0800             b44_recycle_rx(bp, cons, bp->rx_prod);
0801         drop_it_no_recycle:
0802             bp->dev->stats.rx_dropped++;
0803             goto next_pkt;
0804         }
0805 
0806         if (len == 0) {
0807             int i = 0;
0808 
0809             do {
0810                 udelay(2);
0811                 barrier();
0812                 len = le16_to_cpu(rh->len);
0813             } while (len == 0 && i++ < 5);
0814             if (len == 0)
0815                 goto drop_it;
0816         }
0817 
0818         /* Omit CRC. */
0819         len -= 4;
0820 
0821         if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
0822             int skb_size;
0823             skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
0824             if (skb_size < 0)
0825                 goto drop_it;
0826             dma_unmap_single(bp->sdev->dma_dev, map,
0827                      skb_size, DMA_FROM_DEVICE);
0828             /* Leave out rx_header */
0829             skb_put(skb, len + RX_PKT_OFFSET);
0830             skb_pull(skb, RX_PKT_OFFSET);
0831         } else {
0832             struct sk_buff *copy_skb;
0833 
0834             b44_recycle_rx(bp, cons, bp->rx_prod);
0835             copy_skb = napi_alloc_skb(&bp->napi, len);
0836             if (copy_skb == NULL)
0837                 goto drop_it_no_recycle;
0838 
0839             skb_put(copy_skb, len);
0840             /* DMA sync done above, copy just the actual packet */
0841             skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
0842                              copy_skb->data, len);
0843             skb = copy_skb;
0844         }
0845         skb_checksum_none_assert(skb);
0846         skb->protocol = eth_type_trans(skb, bp->dev);
0847         netif_receive_skb(skb);
0848         received++;
0849         budget--;
0850     next_pkt:
0851         bp->rx_prod = (bp->rx_prod + 1) &
0852             (B44_RX_RING_SIZE - 1);
0853         cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
0854     }
0855 
0856     bp->rx_cons = cons;
0857     bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
0858 
0859     return received;
0860 }
0861 
0862 static int b44_poll(struct napi_struct *napi, int budget)
0863 {
0864     struct b44 *bp = container_of(napi, struct b44, napi);
0865     int work_done;
0866     unsigned long flags;
0867 
0868     spin_lock_irqsave(&bp->lock, flags);
0869 
0870     if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
0871         /* spin_lock(&bp->tx_lock); */
0872         b44_tx(bp);
0873         /* spin_unlock(&bp->tx_lock); */
0874     }
0875     if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
0876         bp->istat &= ~ISTAT_RFO;
0877         b44_disable_ints(bp);
0878         ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
0879         b44_init_rings(bp);
0880         b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
0881         netif_wake_queue(bp->dev);
0882     }
0883 
0884     spin_unlock_irqrestore(&bp->lock, flags);
0885 
0886     work_done = 0;
0887     if (bp->istat & ISTAT_RX)
0888         work_done += b44_rx(bp, budget);
0889 
0890     if (bp->istat & ISTAT_ERRORS) {
0891         spin_lock_irqsave(&bp->lock, flags);
0892         b44_halt(bp);
0893         b44_init_rings(bp);
0894         b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
0895         netif_wake_queue(bp->dev);
0896         spin_unlock_irqrestore(&bp->lock, flags);
0897         work_done = 0;
0898     }
0899 
0900     if (work_done < budget) {
0901         napi_complete_done(napi, work_done);
0902         b44_enable_ints(bp);
0903     }
0904 
0905     return work_done;
0906 }
0907 
0908 static irqreturn_t b44_interrupt(int irq, void *dev_id)
0909 {
0910     struct net_device *dev = dev_id;
0911     struct b44 *bp = netdev_priv(dev);
0912     u32 istat, imask;
0913     int handled = 0;
0914 
0915     spin_lock(&bp->lock);
0916 
0917     istat = br32(bp, B44_ISTAT);
0918     imask = br32(bp, B44_IMASK);
0919 
0920     /* The interrupt mask register controls which interrupt bits
0921      * will actually raise an interrupt to the CPU when set by hw/firmware,
0922      * but doesn't mask off the bits.
0923      */
0924     istat &= imask;
0925     if (istat) {
0926         handled = 1;
0927 
0928         if (unlikely(!netif_running(dev))) {
0929             netdev_info(dev, "late interrupt\n");
0930             goto irq_ack;
0931         }
0932 
0933         if (napi_schedule_prep(&bp->napi)) {
0934             /* NOTE: These writes are posted by the readback of
0935              *       the ISTAT register below.
0936              */
0937             bp->istat = istat;
0938             __b44_disable_ints(bp);
0939             __napi_schedule(&bp->napi);
0940         }
0941 
0942 irq_ack:
0943         bw32(bp, B44_ISTAT, istat);
0944         br32(bp, B44_ISTAT);
0945     }
0946     spin_unlock(&bp->lock);
0947     return IRQ_RETVAL(handled);
0948 }
0949 
0950 static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
0951 {
0952     struct b44 *bp = netdev_priv(dev);
0953 
0954     netdev_err(dev, "transmit timed out, resetting\n");
0955 
0956     spin_lock_irq(&bp->lock);
0957 
0958     b44_halt(bp);
0959     b44_init_rings(bp);
0960     b44_init_hw(bp, B44_FULL_RESET);
0961 
0962     spin_unlock_irq(&bp->lock);
0963 
0964     b44_enable_ints(bp);
0965 
0966     netif_wake_queue(dev);
0967 }
0968 
0969 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
0970 {
0971     struct b44 *bp = netdev_priv(dev);
0972     int rc = NETDEV_TX_OK;
0973     dma_addr_t mapping;
0974     u32 len, entry, ctrl;
0975     unsigned long flags;
0976 
0977     len = skb->len;
0978     spin_lock_irqsave(&bp->lock, flags);
0979 
0980     /* This is a hard error, log it. */
0981     if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
0982         netif_stop_queue(dev);
0983         netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
0984         goto err_out;
0985     }
0986 
0987     mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
0988     if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
0989         struct sk_buff *bounce_skb;
0990 
0991         /* Chip can't handle DMA to/from >1GB, use bounce buffer */
0992         if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
0993             dma_unmap_single(bp->sdev->dma_dev, mapping, len,
0994                          DMA_TO_DEVICE);
0995 
0996         bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
0997         if (!bounce_skb)
0998             goto err_out;
0999 
1000         mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
1001                      len, DMA_TO_DEVICE);
1002         if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
1003             if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
1004                 dma_unmap_single(bp->sdev->dma_dev, mapping,
1005                              len, DMA_TO_DEVICE);
1006             dev_kfree_skb_any(bounce_skb);
1007             goto err_out;
1008         }
1009 
1010         skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1011         dev_consume_skb_any(skb);
1012         skb = bounce_skb;
1013     }
1014 
1015     entry = bp->tx_prod;
1016     bp->tx_buffers[entry].skb = skb;
1017     bp->tx_buffers[entry].mapping = mapping;
1018 
1019     ctrl  = (len & DESC_CTRL_LEN);
1020     ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1021     if (entry == (B44_TX_RING_SIZE - 1))
1022         ctrl |= DESC_CTRL_EOT;
1023 
1024     bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1025     bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1026 
1027     if (bp->flags & B44_FLAG_TX_RING_HACK)
1028         b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1029                                 entry * sizeof(bp->tx_ring[0]),
1030                                 DMA_TO_DEVICE);
1031 
1032     entry = NEXT_TX(entry);
1033 
1034     bp->tx_prod = entry;
1035 
1036     wmb();
1037 
1038     bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1039     if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1040         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1041     if (bp->flags & B44_FLAG_REORDER_BUG)
1042         br32(bp, B44_DMATX_PTR);
1043 
1044     netdev_sent_queue(dev, skb->len);
1045 
1046     if (TX_BUFFS_AVAIL(bp) < 1)
1047         netif_stop_queue(dev);
1048 
1049 out_unlock:
1050     spin_unlock_irqrestore(&bp->lock, flags);
1051 
1052     return rc;
1053 
1054 err_out:
1055     rc = NETDEV_TX_BUSY;
1056     goto out_unlock;
1057 }
1058 
1059 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1060 {
1061     struct b44 *bp = netdev_priv(dev);
1062 
1063     if (!netif_running(dev)) {
1064         /* We'll just catch it later when the
1065          * device is up'd.
1066          */
1067         dev->mtu = new_mtu;
1068         return 0;
1069     }
1070 
1071     spin_lock_irq(&bp->lock);
1072     b44_halt(bp);
1073     dev->mtu = new_mtu;
1074     b44_init_rings(bp);
1075     b44_init_hw(bp, B44_FULL_RESET);
1076     spin_unlock_irq(&bp->lock);
1077 
1078     b44_enable_ints(bp);
1079 
1080     return 0;
1081 }
1082 
1083 /* Free up pending packets in all rx/tx rings.
1084  *
1085  * The chip has been shut down and the driver detached from
1086  * the networking, so no interrupts or new tx packets will
1087  * end up in the driver.  bp->lock is not held and we are not
1088  * in an interrupt context and thus may sleep.
1089  */
1090 static void b44_free_rings(struct b44 *bp)
1091 {
1092     struct ring_info *rp;
1093     int i;
1094 
1095     for (i = 0; i < B44_RX_RING_SIZE; i++) {
1096         rp = &bp->rx_buffers[i];
1097 
1098         if (rp->skb == NULL)
1099             continue;
1100         dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1101                  DMA_FROM_DEVICE);
1102         dev_kfree_skb_any(rp->skb);
1103         rp->skb = NULL;
1104     }
1105 
1106     /* XXX needs changes once NETIF_F_SG is set... */
1107     for (i = 0; i < B44_TX_RING_SIZE; i++) {
1108         rp = &bp->tx_buffers[i];
1109 
1110         if (rp->skb == NULL)
1111             continue;
1112         dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1113                  DMA_TO_DEVICE);
1114         dev_kfree_skb_any(rp->skb);
1115         rp->skb = NULL;
1116     }
1117 }
1118 
1119 /* Initialize tx/rx rings for packet processing.
1120  *
1121  * The chip has been shut down and the driver detached from
1122  * the networking, so no interrupts or new tx packets will
1123  * end up in the driver.
1124  */
1125 static void b44_init_rings(struct b44 *bp)
1126 {
1127     int i;
1128 
1129     b44_free_rings(bp);
1130 
1131     memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1132     memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1133 
1134     if (bp->flags & B44_FLAG_RX_RING_HACK)
1135         dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1136                        DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1137 
1138     if (bp->flags & B44_FLAG_TX_RING_HACK)
1139         dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1140                        DMA_TABLE_BYTES, DMA_TO_DEVICE);
1141 
1142     for (i = 0; i < bp->rx_pending; i++) {
1143         if (b44_alloc_rx_skb(bp, -1, i) < 0)
1144             break;
1145     }
1146 }
1147 
1148 /*
1149  * Must not be invoked with interrupt sources disabled and
1150  * the hardware shutdown down.
1151  */
1152 static void b44_free_consistent(struct b44 *bp)
1153 {
1154     kfree(bp->rx_buffers);
1155     bp->rx_buffers = NULL;
1156     kfree(bp->tx_buffers);
1157     bp->tx_buffers = NULL;
1158     if (bp->rx_ring) {
1159         if (bp->flags & B44_FLAG_RX_RING_HACK) {
1160             dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1161                      DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1162             kfree(bp->rx_ring);
1163         } else
1164             dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1165                       bp->rx_ring, bp->rx_ring_dma);
1166         bp->rx_ring = NULL;
1167         bp->flags &= ~B44_FLAG_RX_RING_HACK;
1168     }
1169     if (bp->tx_ring) {
1170         if (bp->flags & B44_FLAG_TX_RING_HACK) {
1171             dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1172                      DMA_TABLE_BYTES, DMA_TO_DEVICE);
1173             kfree(bp->tx_ring);
1174         } else
1175             dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1176                       bp->tx_ring, bp->tx_ring_dma);
1177         bp->tx_ring = NULL;
1178         bp->flags &= ~B44_FLAG_TX_RING_HACK;
1179     }
1180 }
1181 
1182 /*
1183  * Must not be invoked with interrupt sources disabled and
1184  * the hardware shutdown down.  Can sleep.
1185  */
1186 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1187 {
1188     int size;
1189 
1190     size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1191     bp->rx_buffers = kzalloc(size, gfp);
1192     if (!bp->rx_buffers)
1193         goto out_err;
1194 
1195     size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1196     bp->tx_buffers = kzalloc(size, gfp);
1197     if (!bp->tx_buffers)
1198         goto out_err;
1199 
1200     size = DMA_TABLE_BYTES;
1201     bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1202                      &bp->rx_ring_dma, gfp);
1203     if (!bp->rx_ring) {
1204         /* Allocation may have failed due to dma_alloc_coherent
1205            insisting on use of GFP_DMA, which is more restrictive
1206            than necessary...  */
1207         struct dma_desc *rx_ring;
1208         dma_addr_t rx_ring_dma;
1209 
1210         rx_ring = kzalloc(size, gfp);
1211         if (!rx_ring)
1212             goto out_err;
1213 
1214         rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1215                          DMA_TABLE_BYTES,
1216                          DMA_BIDIRECTIONAL);
1217 
1218         if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1219             rx_ring_dma + size > DMA_BIT_MASK(30)) {
1220             kfree(rx_ring);
1221             goto out_err;
1222         }
1223 
1224         bp->rx_ring = rx_ring;
1225         bp->rx_ring_dma = rx_ring_dma;
1226         bp->flags |= B44_FLAG_RX_RING_HACK;
1227     }
1228 
1229     bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1230                      &bp->tx_ring_dma, gfp);
1231     if (!bp->tx_ring) {
1232         /* Allocation may have failed due to ssb_dma_alloc_consistent
1233            insisting on use of GFP_DMA, which is more restrictive
1234            than necessary...  */
1235         struct dma_desc *tx_ring;
1236         dma_addr_t tx_ring_dma;
1237 
1238         tx_ring = kzalloc(size, gfp);
1239         if (!tx_ring)
1240             goto out_err;
1241 
1242         tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1243                          DMA_TABLE_BYTES,
1244                          DMA_TO_DEVICE);
1245 
1246         if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1247             tx_ring_dma + size > DMA_BIT_MASK(30)) {
1248             kfree(tx_ring);
1249             goto out_err;
1250         }
1251 
1252         bp->tx_ring = tx_ring;
1253         bp->tx_ring_dma = tx_ring_dma;
1254         bp->flags |= B44_FLAG_TX_RING_HACK;
1255     }
1256 
1257     return 0;
1258 
1259 out_err:
1260     b44_free_consistent(bp);
1261     return -ENOMEM;
1262 }
1263 
1264 /* bp->lock is held. */
1265 static void b44_clear_stats(struct b44 *bp)
1266 {
1267     unsigned long reg;
1268 
1269     bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1270     for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1271         br32(bp, reg);
1272     for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1273         br32(bp, reg);
1274 }
1275 
1276 /* bp->lock is held. */
1277 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1278 {
1279     struct ssb_device *sdev = bp->sdev;
1280     bool was_enabled;
1281 
1282     was_enabled = ssb_device_is_enabled(bp->sdev);
1283 
1284     ssb_device_enable(bp->sdev, 0);
1285     ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1286 
1287     if (was_enabled) {
1288         bw32(bp, B44_RCV_LAZY, 0);
1289         bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1290         b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1291         bw32(bp, B44_DMATX_CTRL, 0);
1292         bp->tx_prod = bp->tx_cons = 0;
1293         if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1294             b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1295                      100, 0);
1296         }
1297         bw32(bp, B44_DMARX_CTRL, 0);
1298         bp->rx_prod = bp->rx_cons = 0;
1299     }
1300 
1301     b44_clear_stats(bp);
1302 
1303     /*
1304      * Don't enable PHY if we are doing a partial reset
1305      * we are probably going to power down
1306      */
1307     if (reset_kind == B44_CHIP_RESET_PARTIAL)
1308         return;
1309 
1310     switch (sdev->bus->bustype) {
1311     case SSB_BUSTYPE_SSB:
1312         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1313              (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1314                     B44_MDC_RATIO)
1315              & MDIO_CTRL_MAXF_MASK)));
1316         break;
1317     case SSB_BUSTYPE_PCI:
1318         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1319              (0x0d & MDIO_CTRL_MAXF_MASK)));
1320         break;
1321     case SSB_BUSTYPE_PCMCIA:
1322     case SSB_BUSTYPE_SDIO:
1323         WARN_ON(1); /* A device with this bus does not exist. */
1324         break;
1325     }
1326 
1327     br32(bp, B44_MDIO_CTRL);
1328 
1329     if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1330         bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1331         br32(bp, B44_ENET_CTRL);
1332         bp->flags |= B44_FLAG_EXTERNAL_PHY;
1333     } else {
1334         u32 val = br32(bp, B44_DEVCTRL);
1335 
1336         if (val & DEVCTRL_EPR) {
1337             bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1338             br32(bp, B44_DEVCTRL);
1339             udelay(100);
1340         }
1341         bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1342     }
1343 }
1344 
1345 /* bp->lock is held. */
1346 static void b44_halt(struct b44 *bp)
1347 {
1348     b44_disable_ints(bp);
1349     /* reset PHY */
1350     b44_phy_reset(bp);
1351     /* power down PHY */
1352     netdev_info(bp->dev, "powering down PHY\n");
1353     bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1354     /* now reset the chip, but without enabling the MAC&PHY
1355      * part of it. This has to be done _after_ we shut down the PHY */
1356     if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1357         b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1358     else
1359         b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1360 }
1361 
1362 /* bp->lock is held. */
1363 static void __b44_set_mac_addr(struct b44 *bp)
1364 {
1365     bw32(bp, B44_CAM_CTRL, 0);
1366     if (!(bp->dev->flags & IFF_PROMISC)) {
1367         u32 val;
1368 
1369         __b44_cam_write(bp, bp->dev->dev_addr, 0);
1370         val = br32(bp, B44_CAM_CTRL);
1371         bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1372     }
1373 }
1374 
1375 static int b44_set_mac_addr(struct net_device *dev, void *p)
1376 {
1377     struct b44 *bp = netdev_priv(dev);
1378     struct sockaddr *addr = p;
1379     u32 val;
1380 
1381     if (netif_running(dev))
1382         return -EBUSY;
1383 
1384     if (!is_valid_ether_addr(addr->sa_data))
1385         return -EINVAL;
1386 
1387     eth_hw_addr_set(dev, addr->sa_data);
1388 
1389     spin_lock_irq(&bp->lock);
1390 
1391     val = br32(bp, B44_RXCONFIG);
1392     if (!(val & RXCONFIG_CAM_ABSENT))
1393         __b44_set_mac_addr(bp);
1394 
1395     spin_unlock_irq(&bp->lock);
1396 
1397     return 0;
1398 }
1399 
1400 /* Called at device open time to get the chip ready for
1401  * packet processing.  Invoked with bp->lock held.
1402  */
1403 static void __b44_set_rx_mode(struct net_device *);
1404 static void b44_init_hw(struct b44 *bp, int reset_kind)
1405 {
1406     u32 val;
1407 
1408     b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1409     if (reset_kind == B44_FULL_RESET) {
1410         b44_phy_reset(bp);
1411         b44_setup_phy(bp);
1412     }
1413 
1414     /* Enable CRC32, set proper LED modes and power on PHY */
1415     bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1416     bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1417 
1418     /* This sets the MAC address too.  */
1419     __b44_set_rx_mode(bp->dev);
1420 
1421     /* MTU + eth header + possible VLAN tag + struct rx_header */
1422     bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1423     bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1424 
1425     bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1426     if (reset_kind == B44_PARTIAL_RESET) {
1427         bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1428                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1429     } else {
1430         bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1431         bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1432         bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1433                       (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1434         bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1435 
1436         bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1437         bp->rx_prod = bp->rx_pending;
1438 
1439         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1440     }
1441 
1442     val = br32(bp, B44_ENET_CTRL);
1443     bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1444 
1445     netdev_reset_queue(bp->dev);
1446 }
1447 
1448 static int b44_open(struct net_device *dev)
1449 {
1450     struct b44 *bp = netdev_priv(dev);
1451     int err;
1452 
1453     err = b44_alloc_consistent(bp, GFP_KERNEL);
1454     if (err)
1455         goto out;
1456 
1457     napi_enable(&bp->napi);
1458 
1459     b44_init_rings(bp);
1460     b44_init_hw(bp, B44_FULL_RESET);
1461 
1462     b44_check_phy(bp);
1463 
1464     err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1465     if (unlikely(err < 0)) {
1466         napi_disable(&bp->napi);
1467         b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1468         b44_free_rings(bp);
1469         b44_free_consistent(bp);
1470         goto out;
1471     }
1472 
1473     timer_setup(&bp->timer, b44_timer, 0);
1474     bp->timer.expires = jiffies + HZ;
1475     add_timer(&bp->timer);
1476 
1477     b44_enable_ints(bp);
1478 
1479     if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1480         phy_start(dev->phydev);
1481 
1482     netif_start_queue(dev);
1483 out:
1484     return err;
1485 }
1486 
1487 #ifdef CONFIG_NET_POLL_CONTROLLER
1488 /*
1489  * Polling receive - used by netconsole and other diagnostic tools
1490  * to allow network i/o with interrupts disabled.
1491  */
1492 static void b44_poll_controller(struct net_device *dev)
1493 {
1494     disable_irq(dev->irq);
1495     b44_interrupt(dev->irq, dev);
1496     enable_irq(dev->irq);
1497 }
1498 #endif
1499 
1500 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1501 {
1502     u32 i;
1503     u32 *pattern = (u32 *) pp;
1504 
1505     for (i = 0; i < bytes; i += sizeof(u32)) {
1506         bw32(bp, B44_FILT_ADDR, table_offset + i);
1507         bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1508     }
1509 }
1510 
1511 static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
1512                  int offset)
1513 {
1514     int magicsync = 6;
1515     int k, j, len = offset;
1516     int ethaddr_bytes = ETH_ALEN;
1517 
1518     memset(ppattern + offset, 0xff, magicsync);
1519     for (j = 0; j < magicsync; j++) {
1520         pmask[len >> 3] |= BIT(len & 7);
1521         len++;
1522     }
1523 
1524     for (j = 0; j < B44_MAX_PATTERNS; j++) {
1525         if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1526             ethaddr_bytes = ETH_ALEN;
1527         else
1528             ethaddr_bytes = B44_PATTERN_SIZE - len;
1529         if (ethaddr_bytes <=0)
1530             break;
1531         for (k = 0; k< ethaddr_bytes; k++) {
1532             ppattern[offset + magicsync +
1533                 (j * ETH_ALEN) + k] = macaddr[k];
1534             pmask[len >> 3] |= BIT(len & 7);
1535             len++;
1536         }
1537     }
1538     return len - 1;
1539 }
1540 
1541 /* Setup magic packet patterns in the b44 WOL
1542  * pattern matching filter.
1543  */
1544 static void b44_setup_pseudo_magicp(struct b44 *bp)
1545 {
1546 
1547     u32 val;
1548     int plen0, plen1, plen2;
1549     u8 *pwol_pattern;
1550     u8 pwol_mask[B44_PMASK_SIZE];
1551 
1552     pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1553     if (!pwol_pattern)
1554         return;
1555 
1556     /* Ipv4 magic packet pattern - pattern 0.*/
1557     memset(pwol_mask, 0, B44_PMASK_SIZE);
1558     plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1559                   B44_ETHIPV4UDP_HLEN);
1560 
1561     bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1562     bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1563 
1564     /* Raw ethernet II magic packet pattern - pattern 1 */
1565     memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1566     memset(pwol_mask, 0, B44_PMASK_SIZE);
1567     plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1568                   ETH_HLEN);
1569 
1570     bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1571                B44_PATTERN_BASE + B44_PATTERN_SIZE);
1572     bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1573                B44_PMASK_BASE + B44_PMASK_SIZE);
1574 
1575     /* Ipv6 magic packet pattern - pattern 2 */
1576     memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1577     memset(pwol_mask, 0, B44_PMASK_SIZE);
1578     plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1579                   B44_ETHIPV6UDP_HLEN);
1580 
1581     bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1582                B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1583     bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1584                B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1585 
1586     kfree(pwol_pattern);
1587 
1588     /* set these pattern's lengths: one less than each real length */
1589     val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1590     bw32(bp, B44_WKUP_LEN, val);
1591 
1592     /* enable wakeup pattern matching */
1593     val = br32(bp, B44_DEVCTRL);
1594     bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1595 
1596 }
1597 
1598 #ifdef CONFIG_B44_PCI
1599 static void b44_setup_wol_pci(struct b44 *bp)
1600 {
1601     u16 val;
1602 
1603     if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1604         bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1605         pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1606         pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1607     }
1608 }
1609 #else
1610 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1611 #endif /* CONFIG_B44_PCI */
1612 
1613 static void b44_setup_wol(struct b44 *bp)
1614 {
1615     u32 val;
1616 
1617     bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1618 
1619     if (bp->flags & B44_FLAG_B0_ANDLATER) {
1620 
1621         bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1622 
1623         val = bp->dev->dev_addr[2] << 24 |
1624             bp->dev->dev_addr[3] << 16 |
1625             bp->dev->dev_addr[4] << 8 |
1626             bp->dev->dev_addr[5];
1627         bw32(bp, B44_ADDR_LO, val);
1628 
1629         val = bp->dev->dev_addr[0] << 8 |
1630             bp->dev->dev_addr[1];
1631         bw32(bp, B44_ADDR_HI, val);
1632 
1633         val = br32(bp, B44_DEVCTRL);
1634         bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1635 
1636     } else {
1637         b44_setup_pseudo_magicp(bp);
1638     }
1639     b44_setup_wol_pci(bp);
1640 }
1641 
1642 static int b44_close(struct net_device *dev)
1643 {
1644     struct b44 *bp = netdev_priv(dev);
1645 
1646     netif_stop_queue(dev);
1647 
1648     if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1649         phy_stop(dev->phydev);
1650 
1651     napi_disable(&bp->napi);
1652 
1653     del_timer_sync(&bp->timer);
1654 
1655     spin_lock_irq(&bp->lock);
1656 
1657     b44_halt(bp);
1658     b44_free_rings(bp);
1659     netif_carrier_off(dev);
1660 
1661     spin_unlock_irq(&bp->lock);
1662 
1663     free_irq(dev->irq, dev);
1664 
1665     if (bp->flags & B44_FLAG_WOL_ENABLE) {
1666         b44_init_hw(bp, B44_PARTIAL_RESET);
1667         b44_setup_wol(bp);
1668     }
1669 
1670     b44_free_consistent(bp);
1671 
1672     return 0;
1673 }
1674 
1675 static void b44_get_stats64(struct net_device *dev,
1676                 struct rtnl_link_stats64 *nstat)
1677 {
1678     struct b44 *bp = netdev_priv(dev);
1679     struct b44_hw_stats *hwstat = &bp->hw_stats;
1680     unsigned int start;
1681 
1682     do {
1683         start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1684 
1685         /* Convert HW stats into rtnl_link_stats64 stats. */
1686         nstat->rx_packets = hwstat->rx_pkts;
1687         nstat->tx_packets = hwstat->tx_pkts;
1688         nstat->rx_bytes   = hwstat->rx_octets;
1689         nstat->tx_bytes   = hwstat->tx_octets;
1690         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1691                      hwstat->tx_oversize_pkts +
1692                      hwstat->tx_underruns +
1693                      hwstat->tx_excessive_cols +
1694                      hwstat->tx_late_cols);
1695         nstat->multicast  = hwstat->rx_multicast_pkts;
1696         nstat->collisions = hwstat->tx_total_cols;
1697 
1698         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1699                        hwstat->rx_undersize);
1700         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1701         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1702         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1703         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1704                        hwstat->rx_oversize_pkts +
1705                        hwstat->rx_missed_pkts +
1706                        hwstat->rx_crc_align_errs +
1707                        hwstat->rx_undersize +
1708                        hwstat->rx_crc_errs +
1709                        hwstat->rx_align_errs +
1710                        hwstat->rx_symbol_errs);
1711 
1712         nstat->tx_aborted_errors = hwstat->tx_underruns;
1713 #if 0
1714         /* Carrier lost counter seems to be broken for some devices */
1715         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1716 #endif
1717     } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1718 
1719 }
1720 
1721 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1722 {
1723     struct netdev_hw_addr *ha;
1724     int i, num_ents;
1725 
1726     num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1727     i = 0;
1728     netdev_for_each_mc_addr(ha, dev) {
1729         if (i == num_ents)
1730             break;
1731         __b44_cam_write(bp, ha->addr, i++ + 1);
1732     }
1733     return i+1;
1734 }
1735 
1736 static void __b44_set_rx_mode(struct net_device *dev)
1737 {
1738     struct b44 *bp = netdev_priv(dev);
1739     u32 val;
1740 
1741     val = br32(bp, B44_RXCONFIG);
1742     val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1743     if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1744         val |= RXCONFIG_PROMISC;
1745         bw32(bp, B44_RXCONFIG, val);
1746     } else {
1747         unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1748         int i = 1;
1749 
1750         __b44_set_mac_addr(bp);
1751 
1752         if ((dev->flags & IFF_ALLMULTI) ||
1753             (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1754             val |= RXCONFIG_ALLMULTI;
1755         else
1756             i = __b44_load_mcast(bp, dev);
1757 
1758         for (; i < 64; i++)
1759             __b44_cam_write(bp, zero, i);
1760 
1761         bw32(bp, B44_RXCONFIG, val);
1762         val = br32(bp, B44_CAM_CTRL);
1763             bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1764     }
1765 }
1766 
1767 static void b44_set_rx_mode(struct net_device *dev)
1768 {
1769     struct b44 *bp = netdev_priv(dev);
1770 
1771     spin_lock_irq(&bp->lock);
1772     __b44_set_rx_mode(dev);
1773     spin_unlock_irq(&bp->lock);
1774 }
1775 
1776 static u32 b44_get_msglevel(struct net_device *dev)
1777 {
1778     struct b44 *bp = netdev_priv(dev);
1779     return bp->msg_enable;
1780 }
1781 
1782 static void b44_set_msglevel(struct net_device *dev, u32 value)
1783 {
1784     struct b44 *bp = netdev_priv(dev);
1785     bp->msg_enable = value;
1786 }
1787 
1788 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1789 {
1790     struct b44 *bp = netdev_priv(dev);
1791     struct ssb_bus *bus = bp->sdev->bus;
1792 
1793     strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1794     switch (bus->bustype) {
1795     case SSB_BUSTYPE_PCI:
1796         strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1797         break;
1798     case SSB_BUSTYPE_SSB:
1799         strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1800         break;
1801     case SSB_BUSTYPE_PCMCIA:
1802     case SSB_BUSTYPE_SDIO:
1803         WARN_ON(1); /* A device with this bus does not exist. */
1804         break;
1805     }
1806 }
1807 
1808 static int b44_nway_reset(struct net_device *dev)
1809 {
1810     struct b44 *bp = netdev_priv(dev);
1811     u32 bmcr;
1812     int r;
1813 
1814     spin_lock_irq(&bp->lock);
1815     b44_readphy(bp, MII_BMCR, &bmcr);
1816     b44_readphy(bp, MII_BMCR, &bmcr);
1817     r = -EINVAL;
1818     if (bmcr & BMCR_ANENABLE) {
1819         b44_writephy(bp, MII_BMCR,
1820                  bmcr | BMCR_ANRESTART);
1821         r = 0;
1822     }
1823     spin_unlock_irq(&bp->lock);
1824 
1825     return r;
1826 }
1827 
1828 static int b44_get_link_ksettings(struct net_device *dev,
1829                   struct ethtool_link_ksettings *cmd)
1830 {
1831     struct b44 *bp = netdev_priv(dev);
1832     u32 supported, advertising;
1833 
1834     if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1835         BUG_ON(!dev->phydev);
1836         phy_ethtool_ksettings_get(dev->phydev, cmd);
1837 
1838         return 0;
1839     }
1840 
1841     supported = (SUPPORTED_Autoneg);
1842     supported |= (SUPPORTED_100baseT_Half |
1843               SUPPORTED_100baseT_Full |
1844               SUPPORTED_10baseT_Half |
1845               SUPPORTED_10baseT_Full |
1846               SUPPORTED_MII);
1847 
1848     advertising = 0;
1849     if (bp->flags & B44_FLAG_ADV_10HALF)
1850         advertising |= ADVERTISED_10baseT_Half;
1851     if (bp->flags & B44_FLAG_ADV_10FULL)
1852         advertising |= ADVERTISED_10baseT_Full;
1853     if (bp->flags & B44_FLAG_ADV_100HALF)
1854         advertising |= ADVERTISED_100baseT_Half;
1855     if (bp->flags & B44_FLAG_ADV_100FULL)
1856         advertising |= ADVERTISED_100baseT_Full;
1857     advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1858     cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1859         SPEED_100 : SPEED_10;
1860     cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1861         DUPLEX_FULL : DUPLEX_HALF;
1862     cmd->base.port = 0;
1863     cmd->base.phy_address = bp->phy_addr;
1864     cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1865         AUTONEG_DISABLE : AUTONEG_ENABLE;
1866     if (cmd->base.autoneg == AUTONEG_ENABLE)
1867         advertising |= ADVERTISED_Autoneg;
1868 
1869     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1870                         supported);
1871     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1872                         advertising);
1873 
1874     if (!netif_running(dev)){
1875         cmd->base.speed = 0;
1876         cmd->base.duplex = 0xff;
1877     }
1878 
1879     return 0;
1880 }
1881 
1882 static int b44_set_link_ksettings(struct net_device *dev,
1883                   const struct ethtool_link_ksettings *cmd)
1884 {
1885     struct b44 *bp = netdev_priv(dev);
1886     u32 speed;
1887     int ret;
1888     u32 advertising;
1889 
1890     if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1891         BUG_ON(!dev->phydev);
1892         spin_lock_irq(&bp->lock);
1893         if (netif_running(dev))
1894             b44_setup_phy(bp);
1895 
1896         ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1897 
1898         spin_unlock_irq(&bp->lock);
1899 
1900         return ret;
1901     }
1902 
1903     speed = cmd->base.speed;
1904 
1905     ethtool_convert_link_mode_to_legacy_u32(&advertising,
1906                         cmd->link_modes.advertising);
1907 
1908     /* We do not support gigabit. */
1909     if (cmd->base.autoneg == AUTONEG_ENABLE) {
1910         if (advertising &
1911             (ADVERTISED_1000baseT_Half |
1912              ADVERTISED_1000baseT_Full))
1913             return -EINVAL;
1914     } else if ((speed != SPEED_100 &&
1915             speed != SPEED_10) ||
1916            (cmd->base.duplex != DUPLEX_HALF &&
1917             cmd->base.duplex != DUPLEX_FULL)) {
1918             return -EINVAL;
1919     }
1920 
1921     spin_lock_irq(&bp->lock);
1922 
1923     if (cmd->base.autoneg == AUTONEG_ENABLE) {
1924         bp->flags &= ~(B44_FLAG_FORCE_LINK |
1925                    B44_FLAG_100_BASE_T |
1926                    B44_FLAG_FULL_DUPLEX |
1927                    B44_FLAG_ADV_10HALF |
1928                    B44_FLAG_ADV_10FULL |
1929                    B44_FLAG_ADV_100HALF |
1930                    B44_FLAG_ADV_100FULL);
1931         if (advertising == 0) {
1932             bp->flags |= (B44_FLAG_ADV_10HALF |
1933                       B44_FLAG_ADV_10FULL |
1934                       B44_FLAG_ADV_100HALF |
1935                       B44_FLAG_ADV_100FULL);
1936         } else {
1937             if (advertising & ADVERTISED_10baseT_Half)
1938                 bp->flags |= B44_FLAG_ADV_10HALF;
1939             if (advertising & ADVERTISED_10baseT_Full)
1940                 bp->flags |= B44_FLAG_ADV_10FULL;
1941             if (advertising & ADVERTISED_100baseT_Half)
1942                 bp->flags |= B44_FLAG_ADV_100HALF;
1943             if (advertising & ADVERTISED_100baseT_Full)
1944                 bp->flags |= B44_FLAG_ADV_100FULL;
1945         }
1946     } else {
1947         bp->flags |= B44_FLAG_FORCE_LINK;
1948         bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1949         if (speed == SPEED_100)
1950             bp->flags |= B44_FLAG_100_BASE_T;
1951         if (cmd->base.duplex == DUPLEX_FULL)
1952             bp->flags |= B44_FLAG_FULL_DUPLEX;
1953     }
1954 
1955     if (netif_running(dev))
1956         b44_setup_phy(bp);
1957 
1958     spin_unlock_irq(&bp->lock);
1959 
1960     return 0;
1961 }
1962 
1963 static void b44_get_ringparam(struct net_device *dev,
1964                   struct ethtool_ringparam *ering,
1965                   struct kernel_ethtool_ringparam *kernel_ering,
1966                   struct netlink_ext_ack *extack)
1967 {
1968     struct b44 *bp = netdev_priv(dev);
1969 
1970     ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1971     ering->rx_pending = bp->rx_pending;
1972 
1973     /* XXX ethtool lacks a tx_max_pending, oops... */
1974 }
1975 
1976 static int b44_set_ringparam(struct net_device *dev,
1977                  struct ethtool_ringparam *ering,
1978                  struct kernel_ethtool_ringparam *kernel_ering,
1979                  struct netlink_ext_ack *extack)
1980 {
1981     struct b44 *bp = netdev_priv(dev);
1982 
1983     if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1984         (ering->rx_mini_pending != 0) ||
1985         (ering->rx_jumbo_pending != 0) ||
1986         (ering->tx_pending > B44_TX_RING_SIZE - 1))
1987         return -EINVAL;
1988 
1989     spin_lock_irq(&bp->lock);
1990 
1991     bp->rx_pending = ering->rx_pending;
1992     bp->tx_pending = ering->tx_pending;
1993 
1994     b44_halt(bp);
1995     b44_init_rings(bp);
1996     b44_init_hw(bp, B44_FULL_RESET);
1997     netif_wake_queue(bp->dev);
1998     spin_unlock_irq(&bp->lock);
1999 
2000     b44_enable_ints(bp);
2001 
2002     return 0;
2003 }
2004 
2005 static void b44_get_pauseparam(struct net_device *dev,
2006                 struct ethtool_pauseparam *epause)
2007 {
2008     struct b44 *bp = netdev_priv(dev);
2009 
2010     epause->autoneg =
2011         (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
2012     epause->rx_pause =
2013         (bp->flags & B44_FLAG_RX_PAUSE) != 0;
2014     epause->tx_pause =
2015         (bp->flags & B44_FLAG_TX_PAUSE) != 0;
2016 }
2017 
2018 static int b44_set_pauseparam(struct net_device *dev,
2019                 struct ethtool_pauseparam *epause)
2020 {
2021     struct b44 *bp = netdev_priv(dev);
2022 
2023     spin_lock_irq(&bp->lock);
2024     if (epause->autoneg)
2025         bp->flags |= B44_FLAG_PAUSE_AUTO;
2026     else
2027         bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2028     if (epause->rx_pause)
2029         bp->flags |= B44_FLAG_RX_PAUSE;
2030     else
2031         bp->flags &= ~B44_FLAG_RX_PAUSE;
2032     if (epause->tx_pause)
2033         bp->flags |= B44_FLAG_TX_PAUSE;
2034     else
2035         bp->flags &= ~B44_FLAG_TX_PAUSE;
2036     if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2037         b44_halt(bp);
2038         b44_init_rings(bp);
2039         b44_init_hw(bp, B44_FULL_RESET);
2040     } else {
2041         __b44_set_flow_ctrl(bp, bp->flags);
2042     }
2043     spin_unlock_irq(&bp->lock);
2044 
2045     b44_enable_ints(bp);
2046 
2047     return 0;
2048 }
2049 
2050 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2051 {
2052     switch(stringset) {
2053     case ETH_SS_STATS:
2054         memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2055         break;
2056     }
2057 }
2058 
2059 static int b44_get_sset_count(struct net_device *dev, int sset)
2060 {
2061     switch (sset) {
2062     case ETH_SS_STATS:
2063         return ARRAY_SIZE(b44_gstrings);
2064     default:
2065         return -EOPNOTSUPP;
2066     }
2067 }
2068 
2069 static void b44_get_ethtool_stats(struct net_device *dev,
2070                   struct ethtool_stats *stats, u64 *data)
2071 {
2072     struct b44 *bp = netdev_priv(dev);
2073     struct b44_hw_stats *hwstat = &bp->hw_stats;
2074     u64 *data_src, *data_dst;
2075     unsigned int start;
2076     u32 i;
2077 
2078     spin_lock_irq(&bp->lock);
2079     b44_stats_update(bp);
2080     spin_unlock_irq(&bp->lock);
2081 
2082     do {
2083         data_src = &hwstat->tx_good_octets;
2084         data_dst = data;
2085         start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2086 
2087         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2088             *data_dst++ = *data_src++;
2089 
2090     } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2091 }
2092 
2093 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2094 {
2095     struct b44 *bp = netdev_priv(dev);
2096 
2097     wol->supported = WAKE_MAGIC;
2098     if (bp->flags & B44_FLAG_WOL_ENABLE)
2099         wol->wolopts = WAKE_MAGIC;
2100     else
2101         wol->wolopts = 0;
2102     memset(&wol->sopass, 0, sizeof(wol->sopass));
2103 }
2104 
2105 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2106 {
2107     struct b44 *bp = netdev_priv(dev);
2108 
2109     spin_lock_irq(&bp->lock);
2110     if (wol->wolopts & WAKE_MAGIC)
2111         bp->flags |= B44_FLAG_WOL_ENABLE;
2112     else
2113         bp->flags &= ~B44_FLAG_WOL_ENABLE;
2114     spin_unlock_irq(&bp->lock);
2115 
2116     device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2117     return 0;
2118 }
2119 
2120 static const struct ethtool_ops b44_ethtool_ops = {
2121     .get_drvinfo        = b44_get_drvinfo,
2122     .nway_reset     = b44_nway_reset,
2123     .get_link       = ethtool_op_get_link,
2124     .get_wol        = b44_get_wol,
2125     .set_wol        = b44_set_wol,
2126     .get_ringparam      = b44_get_ringparam,
2127     .set_ringparam      = b44_set_ringparam,
2128     .get_pauseparam     = b44_get_pauseparam,
2129     .set_pauseparam     = b44_set_pauseparam,
2130     .get_msglevel       = b44_get_msglevel,
2131     .set_msglevel       = b44_set_msglevel,
2132     .get_strings        = b44_get_strings,
2133     .get_sset_count     = b44_get_sset_count,
2134     .get_ethtool_stats  = b44_get_ethtool_stats,
2135     .get_link_ksettings = b44_get_link_ksettings,
2136     .set_link_ksettings = b44_set_link_ksettings,
2137 };
2138 
2139 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2140 {
2141     struct b44 *bp = netdev_priv(dev);
2142     int err = -EINVAL;
2143 
2144     if (!netif_running(dev))
2145         goto out;
2146 
2147     spin_lock_irq(&bp->lock);
2148     if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2149         BUG_ON(!dev->phydev);
2150         err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2151     } else {
2152         err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2153     }
2154     spin_unlock_irq(&bp->lock);
2155 out:
2156     return err;
2157 }
2158 
2159 static int b44_get_invariants(struct b44 *bp)
2160 {
2161     struct ssb_device *sdev = bp->sdev;
2162     int err = 0;
2163     u8 *addr;
2164 
2165     bp->dma_offset = ssb_dma_translation(sdev);
2166 
2167     if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2168         instance > 1) {
2169         addr = sdev->bus->sprom.et1mac;
2170         bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2171     } else {
2172         addr = sdev->bus->sprom.et0mac;
2173         bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2174     }
2175     /* Some ROMs have buggy PHY addresses with the high
2176      * bits set (sign extension?). Truncate them to a
2177      * valid PHY address. */
2178     bp->phy_addr &= 0x1F;
2179 
2180     eth_hw_addr_set(bp->dev, addr);
2181 
2182     if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2183         pr_err("Invalid MAC address found in EEPROM\n");
2184         return -EINVAL;
2185     }
2186 
2187     bp->imask = IMASK_DEF;
2188 
2189     /* XXX - really required?
2190        bp->flags |= B44_FLAG_BUGGY_TXPTR;
2191     */
2192 
2193     if (bp->sdev->id.revision >= 7)
2194         bp->flags |= B44_FLAG_B0_ANDLATER;
2195 
2196     return err;
2197 }
2198 
2199 static const struct net_device_ops b44_netdev_ops = {
2200     .ndo_open       = b44_open,
2201     .ndo_stop       = b44_close,
2202     .ndo_start_xmit     = b44_start_xmit,
2203     .ndo_get_stats64    = b44_get_stats64,
2204     .ndo_set_rx_mode    = b44_set_rx_mode,
2205     .ndo_set_mac_address    = b44_set_mac_addr,
2206     .ndo_validate_addr  = eth_validate_addr,
2207     .ndo_eth_ioctl      = b44_ioctl,
2208     .ndo_tx_timeout     = b44_tx_timeout,
2209     .ndo_change_mtu     = b44_change_mtu,
2210 #ifdef CONFIG_NET_POLL_CONTROLLER
2211     .ndo_poll_controller    = b44_poll_controller,
2212 #endif
2213 };
2214 
2215 static void b44_adjust_link(struct net_device *dev)
2216 {
2217     struct b44 *bp = netdev_priv(dev);
2218     struct phy_device *phydev = dev->phydev;
2219     bool status_changed = false;
2220 
2221     BUG_ON(!phydev);
2222 
2223     if (bp->old_link != phydev->link) {
2224         status_changed = true;
2225         bp->old_link = phydev->link;
2226     }
2227 
2228     /* reflect duplex change */
2229     if (phydev->link) {
2230         if ((phydev->duplex == DUPLEX_HALF) &&
2231             (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2232             status_changed = true;
2233             bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2234         } else if ((phydev->duplex == DUPLEX_FULL) &&
2235                !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2236             status_changed = true;
2237             bp->flags |= B44_FLAG_FULL_DUPLEX;
2238         }
2239     }
2240 
2241     if (status_changed) {
2242         u32 val = br32(bp, B44_TX_CTRL);
2243         if (bp->flags & B44_FLAG_FULL_DUPLEX)
2244             val |= TX_CTRL_DUPLEX;
2245         else
2246             val &= ~TX_CTRL_DUPLEX;
2247         bw32(bp, B44_TX_CTRL, val);
2248         phy_print_status(phydev);
2249     }
2250 }
2251 
2252 static int b44_register_phy_one(struct b44 *bp)
2253 {
2254     __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2255     struct mii_bus *mii_bus;
2256     struct ssb_device *sdev = bp->sdev;
2257     struct phy_device *phydev;
2258     char bus_id[MII_BUS_ID_SIZE + 3];
2259     struct ssb_sprom *sprom = &sdev->bus->sprom;
2260     int err;
2261 
2262     mii_bus = mdiobus_alloc();
2263     if (!mii_bus) {
2264         dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2265         err = -ENOMEM;
2266         goto err_out;
2267     }
2268 
2269     mii_bus->priv = bp;
2270     mii_bus->read = b44_mdio_read_phylib;
2271     mii_bus->write = b44_mdio_write_phylib;
2272     mii_bus->name = "b44_eth_mii";
2273     mii_bus->parent = sdev->dev;
2274     mii_bus->phy_mask = ~(1 << bp->phy_addr);
2275     snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2276 
2277     bp->mii_bus = mii_bus;
2278 
2279     err = mdiobus_register(mii_bus);
2280     if (err) {
2281         dev_err(sdev->dev, "failed to register MII bus\n");
2282         goto err_out_mdiobus;
2283     }
2284 
2285     if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2286         (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2287 
2288         dev_info(sdev->dev,
2289              "could not find PHY at %i, use fixed one\n",
2290              bp->phy_addr);
2291 
2292         bp->phy_addr = 0;
2293         snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2294              bp->phy_addr);
2295     } else {
2296         snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2297              bp->phy_addr);
2298     }
2299 
2300     phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2301                  PHY_INTERFACE_MODE_MII);
2302     if (IS_ERR(phydev)) {
2303         dev_err(sdev->dev, "could not attach PHY at %i\n",
2304             bp->phy_addr);
2305         err = PTR_ERR(phydev);
2306         goto err_out_mdiobus_unregister;
2307     }
2308 
2309     /* mask with MAC supported features */
2310     linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2311     linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2312     linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2313     linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2314     linkmode_and(phydev->supported, phydev->supported, mask);
2315     linkmode_copy(phydev->advertising, phydev->supported);
2316 
2317     bp->old_link = 0;
2318     bp->phy_addr = phydev->mdio.addr;
2319 
2320     phy_attached_info(phydev);
2321 
2322     return 0;
2323 
2324 err_out_mdiobus_unregister:
2325     mdiobus_unregister(mii_bus);
2326 
2327 err_out_mdiobus:
2328     mdiobus_free(mii_bus);
2329 
2330 err_out:
2331     return err;
2332 }
2333 
2334 static void b44_unregister_phy_one(struct b44 *bp)
2335 {
2336     struct net_device *dev = bp->dev;
2337     struct mii_bus *mii_bus = bp->mii_bus;
2338 
2339     phy_disconnect(dev->phydev);
2340     mdiobus_unregister(mii_bus);
2341     mdiobus_free(mii_bus);
2342 }
2343 
2344 static int b44_init_one(struct ssb_device *sdev,
2345             const struct ssb_device_id *ent)
2346 {
2347     struct net_device *dev;
2348     struct b44 *bp;
2349     int err;
2350 
2351     instance++;
2352 
2353     dev = alloc_etherdev(sizeof(*bp));
2354     if (!dev) {
2355         err = -ENOMEM;
2356         goto out;
2357     }
2358 
2359     SET_NETDEV_DEV(dev, sdev->dev);
2360 
2361     /* No interesting netdevice features in this card... */
2362     dev->features |= 0;
2363 
2364     bp = netdev_priv(dev);
2365     bp->sdev = sdev;
2366     bp->dev = dev;
2367     bp->force_copybreak = 0;
2368 
2369     bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2370 
2371     spin_lock_init(&bp->lock);
2372     u64_stats_init(&bp->hw_stats.syncp);
2373 
2374     bp->rx_pending = B44_DEF_RX_RING_PENDING;
2375     bp->tx_pending = B44_DEF_TX_RING_PENDING;
2376 
2377     dev->netdev_ops = &b44_netdev_ops;
2378     netif_napi_add(dev, &bp->napi, b44_poll, 64);
2379     dev->watchdog_timeo = B44_TX_TIMEOUT;
2380     dev->min_mtu = B44_MIN_MTU;
2381     dev->max_mtu = B44_MAX_MTU;
2382     dev->irq = sdev->irq;
2383     dev->ethtool_ops = &b44_ethtool_ops;
2384 
2385     err = ssb_bus_powerup(sdev->bus, 0);
2386     if (err) {
2387         dev_err(sdev->dev,
2388             "Failed to powerup the bus\n");
2389         goto err_out_free_dev;
2390     }
2391 
2392     err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
2393     if (err) {
2394         dev_err(sdev->dev,
2395             "Required 30BIT DMA mask unsupported by the system\n");
2396         goto err_out_powerdown;
2397     }
2398 
2399     err = b44_get_invariants(bp);
2400     if (err) {
2401         dev_err(sdev->dev,
2402             "Problem fetching invariants of chip, aborting\n");
2403         goto err_out_powerdown;
2404     }
2405 
2406     if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2407         dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2408         err = -ENODEV;
2409         goto err_out_powerdown;
2410     }
2411 
2412     bp->mii_if.dev = dev;
2413     bp->mii_if.mdio_read = b44_mdio_read_mii;
2414     bp->mii_if.mdio_write = b44_mdio_write_mii;
2415     bp->mii_if.phy_id = bp->phy_addr;
2416     bp->mii_if.phy_id_mask = 0x1f;
2417     bp->mii_if.reg_num_mask = 0x1f;
2418 
2419     /* By default, advertise all speed/duplex settings. */
2420     bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2421               B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2422 
2423     /* By default, auto-negotiate PAUSE. */
2424     bp->flags |= B44_FLAG_PAUSE_AUTO;
2425 
2426     err = register_netdev(dev);
2427     if (err) {
2428         dev_err(sdev->dev, "Cannot register net device, aborting\n");
2429         goto err_out_powerdown;
2430     }
2431 
2432     netif_carrier_off(dev);
2433 
2434     ssb_set_drvdata(sdev, dev);
2435 
2436     /* Chip reset provides power to the b44 MAC & PCI cores, which
2437      * is necessary for MAC register access.
2438      */
2439     b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2440 
2441     /* do a phy reset to test if there is an active phy */
2442     err = b44_phy_reset(bp);
2443     if (err < 0) {
2444         dev_err(sdev->dev, "phy reset failed\n");
2445         goto err_out_unregister_netdev;
2446     }
2447 
2448     if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2449         err = b44_register_phy_one(bp);
2450         if (err) {
2451             dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2452             goto err_out_unregister_netdev;
2453         }
2454     }
2455 
2456     device_set_wakeup_capable(sdev->dev, true);
2457     netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2458 
2459     return 0;
2460 
2461 err_out_unregister_netdev:
2462     unregister_netdev(dev);
2463 err_out_powerdown:
2464     ssb_bus_may_powerdown(sdev->bus);
2465 
2466 err_out_free_dev:
2467     netif_napi_del(&bp->napi);
2468     free_netdev(dev);
2469 
2470 out:
2471     return err;
2472 }
2473 
2474 static void b44_remove_one(struct ssb_device *sdev)
2475 {
2476     struct net_device *dev = ssb_get_drvdata(sdev);
2477     struct b44 *bp = netdev_priv(dev);
2478 
2479     unregister_netdev(dev);
2480     if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2481         b44_unregister_phy_one(bp);
2482     ssb_device_disable(sdev, 0);
2483     ssb_bus_may_powerdown(sdev->bus);
2484     netif_napi_del(&bp->napi);
2485     free_netdev(dev);
2486     ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2487     ssb_set_drvdata(sdev, NULL);
2488 }
2489 
2490 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2491 {
2492     struct net_device *dev = ssb_get_drvdata(sdev);
2493     struct b44 *bp = netdev_priv(dev);
2494 
2495     if (!netif_running(dev))
2496         return 0;
2497 
2498     del_timer_sync(&bp->timer);
2499 
2500     spin_lock_irq(&bp->lock);
2501 
2502     b44_halt(bp);
2503     netif_carrier_off(bp->dev);
2504     netif_device_detach(bp->dev);
2505     b44_free_rings(bp);
2506 
2507     spin_unlock_irq(&bp->lock);
2508 
2509     free_irq(dev->irq, dev);
2510     if (bp->flags & B44_FLAG_WOL_ENABLE) {
2511         b44_init_hw(bp, B44_PARTIAL_RESET);
2512         b44_setup_wol(bp);
2513     }
2514 
2515     ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2516     return 0;
2517 }
2518 
2519 static int b44_resume(struct ssb_device *sdev)
2520 {
2521     struct net_device *dev = ssb_get_drvdata(sdev);
2522     struct b44 *bp = netdev_priv(dev);
2523     int rc = 0;
2524 
2525     rc = ssb_bus_powerup(sdev->bus, 0);
2526     if (rc) {
2527         dev_err(sdev->dev,
2528             "Failed to powerup the bus\n");
2529         return rc;
2530     }
2531 
2532     if (!netif_running(dev))
2533         return 0;
2534 
2535     spin_lock_irq(&bp->lock);
2536     b44_init_rings(bp);
2537     b44_init_hw(bp, B44_FULL_RESET);
2538     spin_unlock_irq(&bp->lock);
2539 
2540     /*
2541      * As a shared interrupt, the handler can be called immediately. To be
2542      * able to check the interrupt status the hardware must already be
2543      * powered back on (b44_init_hw).
2544      */
2545     rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2546     if (rc) {
2547         netdev_err(dev, "request_irq failed\n");
2548         spin_lock_irq(&bp->lock);
2549         b44_halt(bp);
2550         b44_free_rings(bp);
2551         spin_unlock_irq(&bp->lock);
2552         return rc;
2553     }
2554 
2555     netif_device_attach(bp->dev);
2556 
2557     b44_enable_ints(bp);
2558     netif_wake_queue(dev);
2559 
2560     mod_timer(&bp->timer, jiffies + 1);
2561 
2562     return 0;
2563 }
2564 
2565 static struct ssb_driver b44_ssb_driver = {
2566     .name       = DRV_MODULE_NAME,
2567     .id_table   = b44_ssb_tbl,
2568     .probe      = b44_init_one,
2569     .remove     = b44_remove_one,
2570     .suspend    = b44_suspend,
2571     .resume     = b44_resume,
2572 };
2573 
2574 static inline int __init b44_pci_init(void)
2575 {
2576     int err = 0;
2577 #ifdef CONFIG_B44_PCI
2578     err = ssb_pcihost_register(&b44_pci_driver);
2579 #endif
2580     return err;
2581 }
2582 
2583 static inline void b44_pci_exit(void)
2584 {
2585 #ifdef CONFIG_B44_PCI
2586     ssb_pcihost_unregister(&b44_pci_driver);
2587 #endif
2588 }
2589 
2590 static int __init b44_init(void)
2591 {
2592     unsigned int dma_desc_align_size = dma_get_cache_alignment();
2593     int err;
2594 
2595     /* Setup paramaters for syncing RX/TX DMA descriptors */
2596     dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2597 
2598     err = b44_pci_init();
2599     if (err)
2600         return err;
2601     err = ssb_driver_register(&b44_ssb_driver);
2602     if (err)
2603         b44_pci_exit();
2604     return err;
2605 }
2606 
2607 static void __exit b44_cleanup(void)
2608 {
2609     ssb_driver_unregister(&b44_ssb_driver);
2610     b44_pci_exit();
2611 }
2612 
2613 module_init(b44_init);
2614 module_exit(b44_cleanup);
2615