0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0017
0018 #include <linux/module.h>
0019 #include <linux/kernel.h>
0020 #include <linux/types.h>
0021 #include <linux/fcntl.h>
0022 #include <linux/interrupt.h>
0023 #include <linux/ioport.h>
0024 #include <linux/in.h>
0025 #include <linux/sched.h>
0026 #include <linux/string.h>
0027 #include <linux/delay.h>
0028 #include <linux/errno.h>
0029 #include <linux/pci.h>
0030 #include <linux/dma-mapping.h>
0031 #include <linux/netdevice.h>
0032 #include <linux/etherdevice.h>
0033 #include <linux/skbuff.h>
0034 #include <linux/mii.h>
0035 #include <linux/ethtool.h>
0036 #include <linux/crc32.h>
0037 #include <linux/random.h>
0038 #include <linux/workqueue.h>
0039 #include <linux/if_vlan.h>
0040 #include <linux/bitops.h>
0041 #include <linux/mm.h>
0042 #include <linux/gfp.h>
0043
0044 #include <asm/io.h>
0045 #include <asm/byteorder.h>
0046 #include <linux/uaccess.h>
0047 #include <asm/irq.h>
0048
0049 #ifdef CONFIG_SPARC
0050 #include <asm/idprom.h>
0051 #include <asm/prom.h>
0052 #endif
0053
0054 #ifdef CONFIG_PPC_PMAC
0055 #include <asm/machdep.h>
0056 #include <asm/pmac_feature.h>
0057 #endif
0058
0059 #include <linux/sungem_phy.h>
0060 #include "sungem.h"
0061
0062 #define STRIP_FCS
0063
0064 #define DEFAULT_MSG (NETIF_MSG_DRV | \
0065 NETIF_MSG_PROBE | \
0066 NETIF_MSG_LINK)
0067
0068 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
0069 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
0070 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
0071 SUPPORTED_Pause | SUPPORTED_Autoneg)
0072
0073 #define DRV_NAME "sungem"
0074 #define DRV_VERSION "1.0"
0075 #define DRV_AUTHOR "David S. Miller <davem@redhat.com>"
0076
0077 static char version[] =
0078 DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
0079
0080 MODULE_AUTHOR(DRV_AUTHOR);
0081 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
0082 MODULE_LICENSE("GPL");
0083
0084 #define GEM_MODULE_NAME "gem"
0085
0086 static const struct pci_device_id gem_pci_tbl[] = {
0087 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
0088 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0089
0090
0091
0092
0093
0094
0095
0096
0097 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
0098 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0099 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
0100 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0101 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
0102 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0103 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
0104 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0105 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
0106 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0107 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
0108 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0109 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
0110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0111 {0, }
0112 };
0113
0114 MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
0115
0116 static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg)
0117 {
0118 u32 cmd;
0119 int limit = 10000;
0120
0121 cmd = (1 << 30);
0122 cmd |= (2 << 28);
0123 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
0124 cmd |= (reg << 18) & MIF_FRAME_REGAD;
0125 cmd |= (MIF_FRAME_TAMSB);
0126 writel(cmd, gp->regs + MIF_FRAME);
0127
0128 while (--limit) {
0129 cmd = readl(gp->regs + MIF_FRAME);
0130 if (cmd & MIF_FRAME_TALSB)
0131 break;
0132
0133 udelay(10);
0134 }
0135
0136 if (!limit)
0137 cmd = 0xffff;
0138
0139 return cmd & MIF_FRAME_DATA;
0140 }
0141
0142 static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg)
0143 {
0144 struct gem *gp = netdev_priv(dev);
0145 return __sungem_phy_read(gp, mii_id, reg);
0146 }
0147
0148 static inline u16 sungem_phy_read(struct gem *gp, int reg)
0149 {
0150 return __sungem_phy_read(gp, gp->mii_phy_addr, reg);
0151 }
0152
0153 static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
0154 {
0155 u32 cmd;
0156 int limit = 10000;
0157
0158 cmd = (1 << 30);
0159 cmd |= (1 << 28);
0160 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
0161 cmd |= (reg << 18) & MIF_FRAME_REGAD;
0162 cmd |= (MIF_FRAME_TAMSB);
0163 cmd |= (val & MIF_FRAME_DATA);
0164 writel(cmd, gp->regs + MIF_FRAME);
0165
0166 while (limit--) {
0167 cmd = readl(gp->regs + MIF_FRAME);
0168 if (cmd & MIF_FRAME_TALSB)
0169 break;
0170
0171 udelay(10);
0172 }
0173 }
0174
0175 static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val)
0176 {
0177 struct gem *gp = netdev_priv(dev);
0178 __sungem_phy_write(gp, mii_id, reg, val & 0xffff);
0179 }
0180
0181 static inline void sungem_phy_write(struct gem *gp, int reg, u16 val)
0182 {
0183 __sungem_phy_write(gp, gp->mii_phy_addr, reg, val);
0184 }
0185
0186 static inline void gem_enable_ints(struct gem *gp)
0187 {
0188
0189 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
0190 }
0191
0192 static inline void gem_disable_ints(struct gem *gp)
0193 {
0194
0195 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
0196 (void)readl(gp->regs + GREG_IMASK);
0197 }
0198
0199 static void gem_get_cell(struct gem *gp)
0200 {
0201 BUG_ON(gp->cell_enabled < 0);
0202 gp->cell_enabled++;
0203 #ifdef CONFIG_PPC_PMAC
0204 if (gp->cell_enabled == 1) {
0205 mb();
0206 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
0207 udelay(10);
0208 }
0209 #endif
0210 }
0211
0212
0213 static void gem_put_cell(struct gem *gp)
0214 {
0215 BUG_ON(gp->cell_enabled <= 0);
0216 gp->cell_enabled--;
0217 #ifdef CONFIG_PPC_PMAC
0218 if (gp->cell_enabled == 0) {
0219 mb();
0220 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
0221 udelay(10);
0222 }
0223 #endif
0224 }
0225
0226 static inline void gem_netif_stop(struct gem *gp)
0227 {
0228 netif_trans_update(gp->dev);
0229 napi_disable(&gp->napi);
0230 netif_tx_disable(gp->dev);
0231 }
0232
0233 static inline void gem_netif_start(struct gem *gp)
0234 {
0235
0236
0237
0238
0239 netif_wake_queue(gp->dev);
0240 napi_enable(&gp->napi);
0241 }
0242
0243 static void gem_schedule_reset(struct gem *gp)
0244 {
0245 gp->reset_task_pending = 1;
0246 schedule_work(&gp->reset_task);
0247 }
0248
0249 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
0250 {
0251 if (netif_msg_intr(gp))
0252 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
0253 }
0254
0255 static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
0256 {
0257 u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
0258 u32 pcs_miistat;
0259
0260 if (netif_msg_intr(gp))
0261 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
0262 gp->dev->name, pcs_istat);
0263
0264 if (!(pcs_istat & PCS_ISTAT_LSC)) {
0265 netdev_err(dev, "PCS irq but no link status change???\n");
0266 return 0;
0267 }
0268
0269
0270
0271
0272
0273 pcs_miistat = readl(gp->regs + PCS_MIISTAT);
0274 if (!(pcs_miistat & PCS_MIISTAT_LS))
0275 pcs_miistat |=
0276 (readl(gp->regs + PCS_MIISTAT) &
0277 PCS_MIISTAT_LS);
0278
0279 if (pcs_miistat & PCS_MIISTAT_ANC) {
0280
0281
0282
0283 if (pcs_miistat & PCS_MIISTAT_RF)
0284 netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
0285 else
0286 netdev_info(dev, "PCS AutoNEG complete\n");
0287 }
0288
0289 if (pcs_miistat & PCS_MIISTAT_LS) {
0290 netdev_info(dev, "PCS link is now up\n");
0291 netif_carrier_on(gp->dev);
0292 } else {
0293 netdev_info(dev, "PCS link is now down\n");
0294 netif_carrier_off(gp->dev);
0295
0296
0297
0298 if (!timer_pending(&gp->link_timer))
0299 return 1;
0300 }
0301
0302 return 0;
0303 }
0304
0305 static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
0306 {
0307 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
0308
0309 if (netif_msg_intr(gp))
0310 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
0311 gp->dev->name, txmac_stat);
0312
0313
0314
0315
0316 if ((txmac_stat & MAC_TXSTAT_DTE) &&
0317 !(txmac_stat & ~MAC_TXSTAT_DTE))
0318 return 0;
0319
0320 if (txmac_stat & MAC_TXSTAT_URUN) {
0321 netdev_err(dev, "TX MAC xmit underrun\n");
0322 dev->stats.tx_fifo_errors++;
0323 }
0324
0325 if (txmac_stat & MAC_TXSTAT_MPE) {
0326 netdev_err(dev, "TX MAC max packet size error\n");
0327 dev->stats.tx_errors++;
0328 }
0329
0330
0331
0332
0333 if (txmac_stat & MAC_TXSTAT_NCE)
0334 dev->stats.collisions += 0x10000;
0335
0336 if (txmac_stat & MAC_TXSTAT_ECE) {
0337 dev->stats.tx_aborted_errors += 0x10000;
0338 dev->stats.collisions += 0x10000;
0339 }
0340
0341 if (txmac_stat & MAC_TXSTAT_LCE) {
0342 dev->stats.tx_aborted_errors += 0x10000;
0343 dev->stats.collisions += 0x10000;
0344 }
0345
0346
0347
0348
0349 return 0;
0350 }
0351
0352
0353
0354
0355
0356
0357
0358 static int gem_rxmac_reset(struct gem *gp)
0359 {
0360 struct net_device *dev = gp->dev;
0361 int limit, i;
0362 u64 desc_dma;
0363 u32 val;
0364
0365
0366 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
0367 for (limit = 0; limit < 5000; limit++) {
0368 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
0369 break;
0370 udelay(10);
0371 }
0372 if (limit == 5000) {
0373 netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
0374 return 1;
0375 }
0376
0377 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
0378 gp->regs + MAC_RXCFG);
0379 for (limit = 0; limit < 5000; limit++) {
0380 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
0381 break;
0382 udelay(10);
0383 }
0384 if (limit == 5000) {
0385 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
0386 return 1;
0387 }
0388
0389
0390 writel(0, gp->regs + RXDMA_CFG);
0391 for (limit = 0; limit < 5000; limit++) {
0392 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
0393 break;
0394 udelay(10);
0395 }
0396 if (limit == 5000) {
0397 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
0398 return 1;
0399 }
0400
0401 mdelay(5);
0402
0403
0404 writel(gp->swrst_base | GREG_SWRST_RXRST,
0405 gp->regs + GREG_SWRST);
0406 for (limit = 0; limit < 5000; limit++) {
0407 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
0408 break;
0409 udelay(10);
0410 }
0411 if (limit == 5000) {
0412 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
0413 return 1;
0414 }
0415
0416
0417 for (i = 0; i < RX_RING_SIZE; i++) {
0418 struct gem_rxd *rxd = &gp->init_block->rxd[i];
0419
0420 if (gp->rx_skbs[i] == NULL) {
0421 netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
0422 return 1;
0423 }
0424
0425 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
0426 }
0427 gp->rx_new = gp->rx_old = 0;
0428
0429
0430 desc_dma = (u64) gp->gblock_dvma;
0431 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
0432 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
0433 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
0434 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
0435 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
0436 (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
0437 writel(val, gp->regs + RXDMA_CFG);
0438 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
0439 writel(((5 & RXDMA_BLANK_IPKTS) |
0440 ((8 << 12) & RXDMA_BLANK_ITIME)),
0441 gp->regs + RXDMA_BLANK);
0442 else
0443 writel(((5 & RXDMA_BLANK_IPKTS) |
0444 ((4 << 12) & RXDMA_BLANK_ITIME)),
0445 gp->regs + RXDMA_BLANK);
0446 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
0447 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
0448 writel(val, gp->regs + RXDMA_PTHRESH);
0449 val = readl(gp->regs + RXDMA_CFG);
0450 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
0451 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
0452 val = readl(gp->regs + MAC_RXCFG);
0453 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
0454
0455 return 0;
0456 }
0457
0458 static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
0459 {
0460 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
0461 int ret = 0;
0462
0463 if (netif_msg_intr(gp))
0464 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
0465 gp->dev->name, rxmac_stat);
0466
0467 if (rxmac_stat & MAC_RXSTAT_OFLW) {
0468 u32 smac = readl(gp->regs + MAC_SMACHINE);
0469
0470 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
0471 dev->stats.rx_over_errors++;
0472 dev->stats.rx_fifo_errors++;
0473
0474 ret = gem_rxmac_reset(gp);
0475 }
0476
0477 if (rxmac_stat & MAC_RXSTAT_ACE)
0478 dev->stats.rx_frame_errors += 0x10000;
0479
0480 if (rxmac_stat & MAC_RXSTAT_CCE)
0481 dev->stats.rx_crc_errors += 0x10000;
0482
0483 if (rxmac_stat & MAC_RXSTAT_LCE)
0484 dev->stats.rx_length_errors += 0x10000;
0485
0486
0487
0488
0489 return ret;
0490 }
0491
0492 static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
0493 {
0494 u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
0495
0496 if (netif_msg_intr(gp))
0497 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
0498 gp->dev->name, mac_cstat);
0499
0500
0501
0502
0503
0504 if (mac_cstat & MAC_CSTAT_PS)
0505 gp->pause_entered++;
0506
0507 if (mac_cstat & MAC_CSTAT_PRCV)
0508 gp->pause_last_time_recvd = (mac_cstat >> 16);
0509
0510 return 0;
0511 }
0512
0513 static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
0514 {
0515 u32 mif_status = readl(gp->regs + MIF_STATUS);
0516 u32 reg_val, changed_bits;
0517
0518 reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
0519 changed_bits = (mif_status & MIF_STATUS_STAT);
0520
0521 gem_handle_mif_event(gp, reg_val, changed_bits);
0522
0523 return 0;
0524 }
0525
0526 static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
0527 {
0528 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
0529
0530 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
0531 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
0532 netdev_err(dev, "PCI error [%04x]", pci_estat);
0533
0534 if (pci_estat & GREG_PCIESTAT_BADACK)
0535 pr_cont(" <No ACK64# during ABS64 cycle>");
0536 if (pci_estat & GREG_PCIESTAT_DTRTO)
0537 pr_cont(" <Delayed transaction timeout>");
0538 if (pci_estat & GREG_PCIESTAT_OTHER)
0539 pr_cont(" <other>");
0540 pr_cont("\n");
0541 } else {
0542 pci_estat |= GREG_PCIESTAT_OTHER;
0543 netdev_err(dev, "PCI error\n");
0544 }
0545
0546 if (pci_estat & GREG_PCIESTAT_OTHER) {
0547 int pci_errs;
0548
0549
0550
0551
0552 pci_errs = pci_status_get_and_clear_errors(gp->pdev);
0553 netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
0554 if (pci_errs & PCI_STATUS_PARITY)
0555 netdev_err(dev, "PCI parity error detected\n");
0556 if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
0557 netdev_err(dev, "PCI target abort\n");
0558 if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
0559 netdev_err(dev, "PCI master acks target abort\n");
0560 if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
0561 netdev_err(dev, "PCI master abort\n");
0562 if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
0563 netdev_err(dev, "PCI system error SERR#\n");
0564 if (pci_errs & PCI_STATUS_DETECTED_PARITY)
0565 netdev_err(dev, "PCI parity error\n");
0566 }
0567
0568
0569 return 1;
0570 }
0571
0572
0573
0574
0575
0576
0577 static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
0578 {
0579 if (gem_status & GREG_STAT_RXNOBUF) {
0580
0581 if (netif_msg_rx_err(gp))
0582 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
0583 gp->dev->name);
0584 dev->stats.rx_dropped++;
0585 }
0586
0587 if (gem_status & GREG_STAT_RXTAGERR) {
0588
0589 if (netif_msg_rx_err(gp))
0590 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
0591 gp->dev->name);
0592 dev->stats.rx_errors++;
0593
0594 return 1;
0595 }
0596
0597 if (gem_status & GREG_STAT_PCS) {
0598 if (gem_pcs_interrupt(dev, gp, gem_status))
0599 return 1;
0600 }
0601
0602 if (gem_status & GREG_STAT_TXMAC) {
0603 if (gem_txmac_interrupt(dev, gp, gem_status))
0604 return 1;
0605 }
0606
0607 if (gem_status & GREG_STAT_RXMAC) {
0608 if (gem_rxmac_interrupt(dev, gp, gem_status))
0609 return 1;
0610 }
0611
0612 if (gem_status & GREG_STAT_MAC) {
0613 if (gem_mac_interrupt(dev, gp, gem_status))
0614 return 1;
0615 }
0616
0617 if (gem_status & GREG_STAT_MIF) {
0618 if (gem_mif_interrupt(dev, gp, gem_status))
0619 return 1;
0620 }
0621
0622 if (gem_status & GREG_STAT_PCIERR) {
0623 if (gem_pci_interrupt(dev, gp, gem_status))
0624 return 1;
0625 }
0626
0627 return 0;
0628 }
0629
0630 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
0631 {
0632 int entry, limit;
0633
0634 entry = gp->tx_old;
0635 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
0636 while (entry != limit) {
0637 struct sk_buff *skb;
0638 struct gem_txd *txd;
0639 dma_addr_t dma_addr;
0640 u32 dma_len;
0641 int frag;
0642
0643 if (netif_msg_tx_done(gp))
0644 printk(KERN_DEBUG "%s: tx done, slot %d\n",
0645 gp->dev->name, entry);
0646 skb = gp->tx_skbs[entry];
0647 if (skb_shinfo(skb)->nr_frags) {
0648 int last = entry + skb_shinfo(skb)->nr_frags;
0649 int walk = entry;
0650 int incomplete = 0;
0651
0652 last &= (TX_RING_SIZE - 1);
0653 for (;;) {
0654 walk = NEXT_TX(walk);
0655 if (walk == limit)
0656 incomplete = 1;
0657 if (walk == last)
0658 break;
0659 }
0660 if (incomplete)
0661 break;
0662 }
0663 gp->tx_skbs[entry] = NULL;
0664 dev->stats.tx_bytes += skb->len;
0665
0666 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
0667 txd = &gp->init_block->txd[entry];
0668
0669 dma_addr = le64_to_cpu(txd->buffer);
0670 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
0671
0672 dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len,
0673 DMA_TO_DEVICE);
0674 entry = NEXT_TX(entry);
0675 }
0676
0677 dev->stats.tx_packets++;
0678 dev_consume_skb_any(skb);
0679 }
0680 gp->tx_old = entry;
0681
0682
0683
0684
0685
0686
0687 smp_mb();
0688
0689 if (unlikely(netif_queue_stopped(dev) &&
0690 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
0691 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
0692
0693 __netif_tx_lock(txq, smp_processor_id());
0694 if (netif_queue_stopped(dev) &&
0695 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
0696 netif_wake_queue(dev);
0697 __netif_tx_unlock(txq);
0698 }
0699 }
0700
0701 static __inline__ void gem_post_rxds(struct gem *gp, int limit)
0702 {
0703 int cluster_start, curr, count, kick;
0704
0705 cluster_start = curr = (gp->rx_new & ~(4 - 1));
0706 count = 0;
0707 kick = -1;
0708 dma_wmb();
0709 while (curr != limit) {
0710 curr = NEXT_RX(curr);
0711 if (++count == 4) {
0712 struct gem_rxd *rxd =
0713 &gp->init_block->rxd[cluster_start];
0714 for (;;) {
0715 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
0716 rxd++;
0717 cluster_start = NEXT_RX(cluster_start);
0718 if (cluster_start == curr)
0719 break;
0720 }
0721 kick = curr;
0722 count = 0;
0723 }
0724 }
0725 if (kick >= 0) {
0726 mb();
0727 writel(kick, gp->regs + RXDMA_KICK);
0728 }
0729 }
0730
0731 #define ALIGNED_RX_SKB_ADDR(addr) \
0732 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
0733 static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
0734 gfp_t gfp_flags)
0735 {
0736 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
0737
0738 if (likely(skb)) {
0739 unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
0740 skb_reserve(skb, offset);
0741 }
0742 return skb;
0743 }
0744
0745 static int gem_rx(struct gem *gp, int work_to_do)
0746 {
0747 struct net_device *dev = gp->dev;
0748 int entry, drops, work_done = 0;
0749 u32 done;
0750
0751 if (netif_msg_rx_status(gp))
0752 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
0753 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
0754
0755 entry = gp->rx_new;
0756 drops = 0;
0757 done = readl(gp->regs + RXDMA_DONE);
0758 for (;;) {
0759 struct gem_rxd *rxd = &gp->init_block->rxd[entry];
0760 struct sk_buff *skb;
0761 u64 status = le64_to_cpu(rxd->status_word);
0762 dma_addr_t dma_addr;
0763 int len;
0764
0765 if ((status & RXDCTRL_OWN) != 0)
0766 break;
0767
0768 if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
0769 break;
0770
0771
0772
0773
0774
0775
0776
0777
0778 if (entry == done) {
0779 done = readl(gp->regs + RXDMA_DONE);
0780 if (entry == done)
0781 break;
0782 }
0783
0784
0785 work_done++;
0786
0787 skb = gp->rx_skbs[entry];
0788
0789 len = (status & RXDCTRL_BUFSZ) >> 16;
0790 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
0791 dev->stats.rx_errors++;
0792 if (len < ETH_ZLEN)
0793 dev->stats.rx_length_errors++;
0794 if (len & RXDCTRL_BAD)
0795 dev->stats.rx_crc_errors++;
0796
0797
0798 drop_it:
0799 dev->stats.rx_dropped++;
0800 goto next;
0801 }
0802
0803 dma_addr = le64_to_cpu(rxd->buffer);
0804 if (len > RX_COPY_THRESHOLD) {
0805 struct sk_buff *new_skb;
0806
0807 new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
0808 if (new_skb == NULL) {
0809 drops++;
0810 goto drop_it;
0811 }
0812 dma_unmap_page(&gp->pdev->dev, dma_addr,
0813 RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE);
0814 gp->rx_skbs[entry] = new_skb;
0815 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
0816 rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev,
0817 virt_to_page(new_skb->data),
0818 offset_in_page(new_skb->data),
0819 RX_BUF_ALLOC_SIZE(gp),
0820 DMA_FROM_DEVICE));
0821 skb_reserve(new_skb, RX_OFFSET);
0822
0823
0824 skb_trim(skb, len);
0825 } else {
0826 struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
0827
0828 if (copy_skb == NULL) {
0829 drops++;
0830 goto drop_it;
0831 }
0832
0833 skb_reserve(copy_skb, 2);
0834 skb_put(copy_skb, len);
0835 dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len,
0836 DMA_FROM_DEVICE);
0837 skb_copy_from_linear_data(skb, copy_skb->data, len);
0838 dma_sync_single_for_device(&gp->pdev->dev, dma_addr,
0839 len, DMA_FROM_DEVICE);
0840
0841
0842 skb = copy_skb;
0843 }
0844
0845 if (likely(dev->features & NETIF_F_RXCSUM)) {
0846 __sum16 csum;
0847
0848 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
0849 skb->csum = csum_unfold(csum);
0850 skb->ip_summed = CHECKSUM_COMPLETE;
0851 }
0852 skb->protocol = eth_type_trans(skb, gp->dev);
0853
0854 napi_gro_receive(&gp->napi, skb);
0855
0856 dev->stats.rx_packets++;
0857 dev->stats.rx_bytes += len;
0858
0859 next:
0860 entry = NEXT_RX(entry);
0861 }
0862
0863 gem_post_rxds(gp, entry);
0864
0865 gp->rx_new = entry;
0866
0867 if (drops)
0868 netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
0869
0870 return work_done;
0871 }
0872
0873 static int gem_poll(struct napi_struct *napi, int budget)
0874 {
0875 struct gem *gp = container_of(napi, struct gem, napi);
0876 struct net_device *dev = gp->dev;
0877 int work_done;
0878
0879 work_done = 0;
0880 do {
0881
0882 if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
0883 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
0884 int reset;
0885
0886
0887
0888
0889
0890
0891 __netif_tx_lock(txq, smp_processor_id());
0892 reset = gem_abnormal_irq(dev, gp, gp->status);
0893 __netif_tx_unlock(txq);
0894 if (reset) {
0895 gem_schedule_reset(gp);
0896 napi_complete(napi);
0897 return work_done;
0898 }
0899 }
0900
0901
0902 gem_tx(dev, gp, gp->status);
0903
0904
0905
0906
0907
0908
0909 work_done += gem_rx(gp, budget - work_done);
0910
0911 if (work_done >= budget)
0912 return work_done;
0913
0914 gp->status = readl(gp->regs + GREG_STAT);
0915 } while (gp->status & GREG_STAT_NAPI);
0916
0917 napi_complete_done(napi, work_done);
0918 gem_enable_ints(gp);
0919
0920 return work_done;
0921 }
0922
0923 static irqreturn_t gem_interrupt(int irq, void *dev_id)
0924 {
0925 struct net_device *dev = dev_id;
0926 struct gem *gp = netdev_priv(dev);
0927
0928 if (napi_schedule_prep(&gp->napi)) {
0929 u32 gem_status = readl(gp->regs + GREG_STAT);
0930
0931 if (unlikely(gem_status == 0)) {
0932 napi_enable(&gp->napi);
0933 return IRQ_NONE;
0934 }
0935 if (netif_msg_intr(gp))
0936 printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
0937 gp->dev->name, gem_status);
0938
0939 gp->status = gem_status;
0940 gem_disable_ints(gp);
0941 __napi_schedule(&gp->napi);
0942 }
0943
0944
0945
0946
0947
0948 return IRQ_HANDLED;
0949 }
0950
0951 #ifdef CONFIG_NET_POLL_CONTROLLER
0952 static void gem_poll_controller(struct net_device *dev)
0953 {
0954 struct gem *gp = netdev_priv(dev);
0955
0956 disable_irq(gp->pdev->irq);
0957 gem_interrupt(gp->pdev->irq, dev);
0958 enable_irq(gp->pdev->irq);
0959 }
0960 #endif
0961
0962 static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
0963 {
0964 struct gem *gp = netdev_priv(dev);
0965
0966 netdev_err(dev, "transmit timed out, resetting\n");
0967
0968 netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
0969 readl(gp->regs + TXDMA_CFG),
0970 readl(gp->regs + MAC_TXSTAT),
0971 readl(gp->regs + MAC_TXCFG));
0972 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
0973 readl(gp->regs + RXDMA_CFG),
0974 readl(gp->regs + MAC_RXSTAT),
0975 readl(gp->regs + MAC_RXCFG));
0976
0977 gem_schedule_reset(gp);
0978 }
0979
0980 static __inline__ int gem_intme(int entry)
0981 {
0982
0983 if (!(entry & ((TX_RING_SIZE>>1)-1)))
0984 return 1;
0985
0986 return 0;
0987 }
0988
0989 static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
0990 struct net_device *dev)
0991 {
0992 struct gem *gp = netdev_priv(dev);
0993 int entry;
0994 u64 ctrl;
0995
0996 ctrl = 0;
0997 if (skb->ip_summed == CHECKSUM_PARTIAL) {
0998 const u64 csum_start_off = skb_checksum_start_offset(skb);
0999 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
1000
1001 ctrl = (TXDCTRL_CENAB |
1002 (csum_start_off << 15) |
1003 (csum_stuff_off << 21));
1004 }
1005
1006 if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1007
1008 if (!netif_queue_stopped(dev)) {
1009 netif_stop_queue(dev);
1010 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1011 }
1012 return NETDEV_TX_BUSY;
1013 }
1014
1015 entry = gp->tx_new;
1016 gp->tx_skbs[entry] = skb;
1017
1018 if (skb_shinfo(skb)->nr_frags == 0) {
1019 struct gem_txd *txd = &gp->init_block->txd[entry];
1020 dma_addr_t mapping;
1021 u32 len;
1022
1023 len = skb->len;
1024 mapping = dma_map_page(&gp->pdev->dev,
1025 virt_to_page(skb->data),
1026 offset_in_page(skb->data),
1027 len, DMA_TO_DEVICE);
1028 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
1029 if (gem_intme(entry))
1030 ctrl |= TXDCTRL_INTME;
1031 txd->buffer = cpu_to_le64(mapping);
1032 dma_wmb();
1033 txd->control_word = cpu_to_le64(ctrl);
1034 entry = NEXT_TX(entry);
1035 } else {
1036 struct gem_txd *txd;
1037 u32 first_len;
1038 u64 intme;
1039 dma_addr_t first_mapping;
1040 int frag, first_entry = entry;
1041
1042 intme = 0;
1043 if (gem_intme(entry))
1044 intme |= TXDCTRL_INTME;
1045
1046
1047
1048
1049 first_len = skb_headlen(skb);
1050 first_mapping = dma_map_page(&gp->pdev->dev,
1051 virt_to_page(skb->data),
1052 offset_in_page(skb->data),
1053 first_len, DMA_TO_DEVICE);
1054 entry = NEXT_TX(entry);
1055
1056 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1057 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1058 u32 len;
1059 dma_addr_t mapping;
1060 u64 this_ctrl;
1061
1062 len = skb_frag_size(this_frag);
1063 mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
1064 0, len, DMA_TO_DEVICE);
1065 this_ctrl = ctrl;
1066 if (frag == skb_shinfo(skb)->nr_frags - 1)
1067 this_ctrl |= TXDCTRL_EOF;
1068
1069 txd = &gp->init_block->txd[entry];
1070 txd->buffer = cpu_to_le64(mapping);
1071 dma_wmb();
1072 txd->control_word = cpu_to_le64(this_ctrl | len);
1073
1074 if (gem_intme(entry))
1075 intme |= TXDCTRL_INTME;
1076
1077 entry = NEXT_TX(entry);
1078 }
1079 txd = &gp->init_block->txd[first_entry];
1080 txd->buffer = cpu_to_le64(first_mapping);
1081 dma_wmb();
1082 txd->control_word =
1083 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
1084 }
1085
1086 gp->tx_new = entry;
1087 if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
1088 netif_stop_queue(dev);
1089
1090
1091
1092
1093
1094
1095 smp_mb();
1096 if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
1097 netif_wake_queue(dev);
1098 }
1099 if (netif_msg_tx_queued(gp))
1100 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
1101 dev->name, entry, skb->len);
1102 mb();
1103 writel(gp->tx_new, gp->regs + TXDMA_KICK);
1104
1105 return NETDEV_TX_OK;
1106 }
1107
1108 static void gem_pcs_reset(struct gem *gp)
1109 {
1110 int limit;
1111 u32 val;
1112
1113
1114 val = readl(gp->regs + PCS_MIICTRL);
1115 val |= PCS_MIICTRL_RST;
1116 writel(val, gp->regs + PCS_MIICTRL);
1117
1118 limit = 32;
1119 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1120 udelay(100);
1121 if (limit-- <= 0)
1122 break;
1123 }
1124 if (limit < 0)
1125 netdev_warn(gp->dev, "PCS reset bit would not clear\n");
1126 }
1127
1128 static void gem_pcs_reinit_adv(struct gem *gp)
1129 {
1130 u32 val;
1131
1132
1133
1134
1135 val = readl(gp->regs + PCS_CFG);
1136 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1137 writel(val, gp->regs + PCS_CFG);
1138
1139
1140
1141
1142 val = readl(gp->regs + PCS_MIIADV);
1143 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1144 PCS_MIIADV_SP | PCS_MIIADV_AP);
1145 writel(val, gp->regs + PCS_MIIADV);
1146
1147
1148
1149
1150 val = readl(gp->regs + PCS_MIICTRL);
1151 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1152 val &= ~PCS_MIICTRL_WB;
1153 writel(val, gp->regs + PCS_MIICTRL);
1154
1155 val = readl(gp->regs + PCS_CFG);
1156 val |= PCS_CFG_ENABLE;
1157 writel(val, gp->regs + PCS_CFG);
1158
1159
1160
1161
1162
1163 val = readl(gp->regs + PCS_SCTRL);
1164 if (gp->phy_type == phy_serialink)
1165 val &= ~PCS_SCTRL_LOOP;
1166 else
1167 val |= PCS_SCTRL_LOOP;
1168 writel(val, gp->regs + PCS_SCTRL);
1169 }
1170
1171 #define STOP_TRIES 32
1172
1173 static void gem_reset(struct gem *gp)
1174 {
1175 int limit;
1176 u32 val;
1177
1178
1179 writel(0xffffffff, gp->regs + GREG_IMASK);
1180
1181
1182 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1183 gp->regs + GREG_SWRST);
1184
1185 limit = STOP_TRIES;
1186
1187 do {
1188 udelay(20);
1189 val = readl(gp->regs + GREG_SWRST);
1190 if (limit-- <= 0)
1191 break;
1192 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1193
1194 if (limit < 0)
1195 netdev_err(gp->dev, "SW reset is ghetto\n");
1196
1197 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1198 gem_pcs_reinit_adv(gp);
1199 }
1200
1201 static void gem_start_dma(struct gem *gp)
1202 {
1203 u32 val;
1204
1205
1206 val = readl(gp->regs + TXDMA_CFG);
1207 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1208 val = readl(gp->regs + RXDMA_CFG);
1209 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1210 val = readl(gp->regs + MAC_TXCFG);
1211 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1212 val = readl(gp->regs + MAC_RXCFG);
1213 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1214
1215 (void) readl(gp->regs + MAC_RXCFG);
1216 udelay(100);
1217
1218 gem_enable_ints(gp);
1219
1220 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1221 }
1222
1223
1224
1225 static void gem_stop_dma(struct gem *gp)
1226 {
1227 u32 val;
1228
1229
1230 val = readl(gp->regs + TXDMA_CFG);
1231 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1232 val = readl(gp->regs + RXDMA_CFG);
1233 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1234 val = readl(gp->regs + MAC_TXCFG);
1235 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1236 val = readl(gp->regs + MAC_RXCFG);
1237 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1238
1239 (void) readl(gp->regs + MAC_RXCFG);
1240
1241
1242 }
1243
1244
1245
1246 static void gem_begin_auto_negotiation(struct gem *gp,
1247 const struct ethtool_link_ksettings *ep)
1248 {
1249 u32 advertise, features;
1250 int autoneg;
1251 int speed;
1252 int duplex;
1253 u32 advertising;
1254
1255 if (ep)
1256 ethtool_convert_link_mode_to_legacy_u32(
1257 &advertising, ep->link_modes.advertising);
1258
1259 if (gp->phy_type != phy_mii_mdio0 &&
1260 gp->phy_type != phy_mii_mdio1)
1261 goto non_mii;
1262
1263
1264 if (found_mii_phy(gp))
1265 features = gp->phy_mii.def->features;
1266 else
1267 features = 0;
1268
1269 advertise = features & ADVERTISE_MASK;
1270 if (gp->phy_mii.advertising != 0)
1271 advertise &= gp->phy_mii.advertising;
1272
1273 autoneg = gp->want_autoneg;
1274 speed = gp->phy_mii.speed;
1275 duplex = gp->phy_mii.duplex;
1276
1277
1278 if (!ep)
1279 goto start_aneg;
1280 if (ep->base.autoneg == AUTONEG_ENABLE) {
1281 advertise = advertising;
1282 autoneg = 1;
1283 } else {
1284 autoneg = 0;
1285 speed = ep->base.speed;
1286 duplex = ep->base.duplex;
1287 }
1288
1289 start_aneg:
1290
1291 if ((features & SUPPORTED_Autoneg) == 0)
1292 autoneg = 0;
1293 if (speed == SPEED_1000 &&
1294 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
1295 speed = SPEED_100;
1296 if (speed == SPEED_100 &&
1297 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
1298 speed = SPEED_10;
1299 if (duplex == DUPLEX_FULL &&
1300 !(features & (SUPPORTED_1000baseT_Full |
1301 SUPPORTED_100baseT_Full |
1302 SUPPORTED_10baseT_Full)))
1303 duplex = DUPLEX_HALF;
1304 if (speed == 0)
1305 speed = SPEED_10;
1306
1307
1308
1309
1310 if (!netif_device_present(gp->dev)) {
1311 gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1312 gp->phy_mii.speed = speed;
1313 gp->phy_mii.duplex = duplex;
1314 return;
1315 }
1316
1317
1318 gp->want_autoneg = autoneg;
1319 if (autoneg) {
1320 if (found_mii_phy(gp))
1321 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1322 gp->lstate = link_aneg;
1323 } else {
1324 if (found_mii_phy(gp))
1325 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
1326 gp->lstate = link_force_ok;
1327 }
1328
1329 non_mii:
1330 gp->timer_ticks = 0;
1331 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1332 }
1333
1334
1335
1336
1337 static int gem_set_link_modes(struct gem *gp)
1338 {
1339 struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
1340 int full_duplex, speed, pause;
1341 u32 val;
1342
1343 full_duplex = 0;
1344 speed = SPEED_10;
1345 pause = 0;
1346
1347 if (found_mii_phy(gp)) {
1348 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1349 return 1;
1350 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1351 speed = gp->phy_mii.speed;
1352 pause = gp->phy_mii.pause;
1353 } else if (gp->phy_type == phy_serialink ||
1354 gp->phy_type == phy_serdes) {
1355 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1356
1357 if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
1358 full_duplex = 1;
1359 speed = SPEED_1000;
1360 }
1361
1362 netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
1363 speed, (full_duplex ? "full" : "half"));
1364
1365
1366
1367
1368
1369 __netif_tx_lock(txq, smp_processor_id());
1370
1371 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1372 if (full_duplex) {
1373 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1374 } else {
1375
1376 }
1377 writel(val, gp->regs + MAC_TXCFG);
1378
1379 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1380 if (!full_duplex &&
1381 (gp->phy_type == phy_mii_mdio0 ||
1382 gp->phy_type == phy_mii_mdio1)) {
1383 val |= MAC_XIFCFG_DISE;
1384 } else if (full_duplex) {
1385 val |= MAC_XIFCFG_FLED;
1386 }
1387
1388 if (speed == SPEED_1000)
1389 val |= (MAC_XIFCFG_GMII);
1390
1391 writel(val, gp->regs + MAC_XIFCFG);
1392
1393
1394
1395
1396 if (speed == SPEED_1000 && !full_duplex) {
1397 val = readl(gp->regs + MAC_TXCFG);
1398 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1399
1400 val = readl(gp->regs + MAC_RXCFG);
1401 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1402 } else {
1403 val = readl(gp->regs + MAC_TXCFG);
1404 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1405
1406 val = readl(gp->regs + MAC_RXCFG);
1407 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1408 }
1409
1410 if (gp->phy_type == phy_serialink ||
1411 gp->phy_type == phy_serdes) {
1412 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1413
1414 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1415 pause = 1;
1416 }
1417
1418 if (!full_duplex)
1419 writel(512, gp->regs + MAC_STIME);
1420 else
1421 writel(64, gp->regs + MAC_STIME);
1422 val = readl(gp->regs + MAC_MCCFG);
1423 if (pause)
1424 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1425 else
1426 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1427 writel(val, gp->regs + MAC_MCCFG);
1428
1429 gem_start_dma(gp);
1430
1431 __netif_tx_unlock(txq);
1432
1433 if (netif_msg_link(gp)) {
1434 if (pause) {
1435 netdev_info(gp->dev,
1436 "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
1437 gp->rx_fifo_sz,
1438 gp->rx_pause_off,
1439 gp->rx_pause_on);
1440 } else {
1441 netdev_info(gp->dev, "Pause is disabled\n");
1442 }
1443 }
1444
1445 return 0;
1446 }
1447
1448 static int gem_mdio_link_not_up(struct gem *gp)
1449 {
1450 switch (gp->lstate) {
1451 case link_force_ret:
1452 netif_info(gp, link, gp->dev,
1453 "Autoneg failed again, keeping forced mode\n");
1454 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1455 gp->last_forced_speed, DUPLEX_HALF);
1456 gp->timer_ticks = 5;
1457 gp->lstate = link_force_ok;
1458 return 0;
1459 case link_aneg:
1460
1461
1462
1463
1464 if (gp->phy_mii.def->magic_aneg)
1465 return 1;
1466 netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
1467
1468 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1469 DUPLEX_HALF);
1470 gp->timer_ticks = 5;
1471 gp->lstate = link_force_try;
1472 return 0;
1473 case link_force_try:
1474
1475
1476
1477
1478 if (gp->phy_mii.speed == SPEED_100) {
1479 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1480 DUPLEX_HALF);
1481 gp->timer_ticks = 5;
1482 netif_info(gp, link, gp->dev,
1483 "switching to forced 10bt\n");
1484 return 0;
1485 } else
1486 return 1;
1487 default:
1488 return 0;
1489 }
1490 }
1491
1492 static void gem_link_timer(struct timer_list *t)
1493 {
1494 struct gem *gp = from_timer(gp, t, link_timer);
1495 struct net_device *dev = gp->dev;
1496 int restart_aneg = 0;
1497
1498
1499 if (gp->reset_task_pending)
1500 return;
1501
1502 if (gp->phy_type == phy_serialink ||
1503 gp->phy_type == phy_serdes) {
1504 u32 val = readl(gp->regs + PCS_MIISTAT);
1505
1506 if (!(val & PCS_MIISTAT_LS))
1507 val = readl(gp->regs + PCS_MIISTAT);
1508
1509 if ((val & PCS_MIISTAT_LS) != 0) {
1510 if (gp->lstate == link_up)
1511 goto restart;
1512
1513 gp->lstate = link_up;
1514 netif_carrier_on(dev);
1515 (void)gem_set_link_modes(gp);
1516 }
1517 goto restart;
1518 }
1519 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1520
1521
1522
1523
1524
1525 if (gp->lstate == link_force_try && gp->want_autoneg) {
1526 gp->lstate = link_force_ret;
1527 gp->last_forced_speed = gp->phy_mii.speed;
1528 gp->timer_ticks = 5;
1529 if (netif_msg_link(gp))
1530 netdev_info(dev,
1531 "Got link after fallback, retrying autoneg once...\n");
1532 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1533 } else if (gp->lstate != link_up) {
1534 gp->lstate = link_up;
1535 netif_carrier_on(dev);
1536 if (gem_set_link_modes(gp))
1537 restart_aneg = 1;
1538 }
1539 } else {
1540
1541
1542
1543 if (gp->lstate == link_up) {
1544 gp->lstate = link_down;
1545 netif_info(gp, link, dev, "Link down\n");
1546 netif_carrier_off(dev);
1547 gem_schedule_reset(gp);
1548
1549 return;
1550 } else if (++gp->timer_ticks > 10) {
1551 if (found_mii_phy(gp))
1552 restart_aneg = gem_mdio_link_not_up(gp);
1553 else
1554 restart_aneg = 1;
1555 }
1556 }
1557 if (restart_aneg) {
1558 gem_begin_auto_negotiation(gp, NULL);
1559 return;
1560 }
1561 restart:
1562 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1563 }
1564
1565 static void gem_clean_rings(struct gem *gp)
1566 {
1567 struct gem_init_block *gb = gp->init_block;
1568 struct sk_buff *skb;
1569 int i;
1570 dma_addr_t dma_addr;
1571
1572 for (i = 0; i < RX_RING_SIZE; i++) {
1573 struct gem_rxd *rxd;
1574
1575 rxd = &gb->rxd[i];
1576 if (gp->rx_skbs[i] != NULL) {
1577 skb = gp->rx_skbs[i];
1578 dma_addr = le64_to_cpu(rxd->buffer);
1579 dma_unmap_page(&gp->pdev->dev, dma_addr,
1580 RX_BUF_ALLOC_SIZE(gp),
1581 DMA_FROM_DEVICE);
1582 dev_kfree_skb_any(skb);
1583 gp->rx_skbs[i] = NULL;
1584 }
1585 rxd->status_word = 0;
1586 dma_wmb();
1587 rxd->buffer = 0;
1588 }
1589
1590 for (i = 0; i < TX_RING_SIZE; i++) {
1591 if (gp->tx_skbs[i] != NULL) {
1592 struct gem_txd *txd;
1593 int frag;
1594
1595 skb = gp->tx_skbs[i];
1596 gp->tx_skbs[i] = NULL;
1597
1598 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1599 int ent = i & (TX_RING_SIZE - 1);
1600
1601 txd = &gb->txd[ent];
1602 dma_addr = le64_to_cpu(txd->buffer);
1603 dma_unmap_page(&gp->pdev->dev, dma_addr,
1604 le64_to_cpu(txd->control_word) &
1605 TXDCTRL_BUFSZ, DMA_TO_DEVICE);
1606
1607 if (frag != skb_shinfo(skb)->nr_frags)
1608 i++;
1609 }
1610 dev_kfree_skb_any(skb);
1611 }
1612 }
1613 }
1614
1615 static void gem_init_rings(struct gem *gp)
1616 {
1617 struct gem_init_block *gb = gp->init_block;
1618 struct net_device *dev = gp->dev;
1619 int i;
1620 dma_addr_t dma_addr;
1621
1622 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1623
1624 gem_clean_rings(gp);
1625
1626 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
1627 (unsigned)VLAN_ETH_FRAME_LEN);
1628
1629 for (i = 0; i < RX_RING_SIZE; i++) {
1630 struct sk_buff *skb;
1631 struct gem_rxd *rxd = &gb->rxd[i];
1632
1633 skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
1634 if (!skb) {
1635 rxd->buffer = 0;
1636 rxd->status_word = 0;
1637 continue;
1638 }
1639
1640 gp->rx_skbs[i] = skb;
1641 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
1642 dma_addr = dma_map_page(&gp->pdev->dev,
1643 virt_to_page(skb->data),
1644 offset_in_page(skb->data),
1645 RX_BUF_ALLOC_SIZE(gp),
1646 DMA_FROM_DEVICE);
1647 rxd->buffer = cpu_to_le64(dma_addr);
1648 dma_wmb();
1649 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1650 skb_reserve(skb, RX_OFFSET);
1651 }
1652
1653 for (i = 0; i < TX_RING_SIZE; i++) {
1654 struct gem_txd *txd = &gb->txd[i];
1655
1656 txd->control_word = 0;
1657 dma_wmb();
1658 txd->buffer = 0;
1659 }
1660 wmb();
1661 }
1662
1663
1664 static void gem_init_phy(struct gem *gp)
1665 {
1666 u32 mifcfg;
1667
1668
1669 mifcfg = readl(gp->regs + MIF_CFG);
1670 mifcfg &= ~MIF_CFG_BBMODE;
1671 writel(mifcfg, gp->regs + MIF_CFG);
1672
1673 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1674 int i;
1675
1676
1677
1678
1679
1680 for (i = 0; i < 3; i++) {
1681 #ifdef CONFIG_PPC_PMAC
1682 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1683 msleep(20);
1684 #endif
1685
1686
1687
1688 sungem_phy_write(gp, MII_BMCR, BMCR_RESET);
1689 msleep(20);
1690 if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
1691 break;
1692 if (i == 2)
1693 netdev_warn(gp->dev, "GMAC PHY not responding !\n");
1694 }
1695 }
1696
1697 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1698 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1699 u32 val;
1700
1701
1702 if (gp->phy_type == phy_mii_mdio0 ||
1703 gp->phy_type == phy_mii_mdio1) {
1704 val = PCS_DMODE_MGM;
1705 } else if (gp->phy_type == phy_serialink) {
1706 val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1707 } else {
1708 val = PCS_DMODE_ESM;
1709 }
1710
1711 writel(val, gp->regs + PCS_DMODE);
1712 }
1713
1714 if (gp->phy_type == phy_mii_mdio0 ||
1715 gp->phy_type == phy_mii_mdio1) {
1716
1717 sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1718
1719
1720 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1721 gp->phy_mii.def->ops->init(&gp->phy_mii);
1722 } else {
1723 gem_pcs_reset(gp);
1724 gem_pcs_reinit_adv(gp);
1725 }
1726
1727
1728 gp->timer_ticks = 0;
1729 gp->lstate = link_down;
1730 netif_carrier_off(gp->dev);
1731
1732
1733 if (gp->phy_type == phy_mii_mdio0 ||
1734 gp->phy_type == phy_mii_mdio1)
1735 netdev_info(gp->dev, "Found %s PHY\n",
1736 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
1737
1738 gem_begin_auto_negotiation(gp, NULL);
1739 }
1740
1741 static void gem_init_dma(struct gem *gp)
1742 {
1743 u64 desc_dma = (u64) gp->gblock_dvma;
1744 u32 val;
1745
1746 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1747 writel(val, gp->regs + TXDMA_CFG);
1748
1749 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1750 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1751 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1752
1753 writel(0, gp->regs + TXDMA_KICK);
1754
1755 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1756 (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
1757 writel(val, gp->regs + RXDMA_CFG);
1758
1759 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1760 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1761
1762 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1763
1764 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1765 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1766 writel(val, gp->regs + RXDMA_PTHRESH);
1767
1768 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1769 writel(((5 & RXDMA_BLANK_IPKTS) |
1770 ((8 << 12) & RXDMA_BLANK_ITIME)),
1771 gp->regs + RXDMA_BLANK);
1772 else
1773 writel(((5 & RXDMA_BLANK_IPKTS) |
1774 ((4 << 12) & RXDMA_BLANK_ITIME)),
1775 gp->regs + RXDMA_BLANK);
1776 }
1777
1778 static u32 gem_setup_multicast(struct gem *gp)
1779 {
1780 u32 rxcfg = 0;
1781 int i;
1782
1783 if ((gp->dev->flags & IFF_ALLMULTI) ||
1784 (netdev_mc_count(gp->dev) > 256)) {
1785 for (i=0; i<16; i++)
1786 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1787 rxcfg |= MAC_RXCFG_HFE;
1788 } else if (gp->dev->flags & IFF_PROMISC) {
1789 rxcfg |= MAC_RXCFG_PROM;
1790 } else {
1791 u16 hash_table[16];
1792 u32 crc;
1793 struct netdev_hw_addr *ha;
1794 int i;
1795
1796 memset(hash_table, 0, sizeof(hash_table));
1797 netdev_for_each_mc_addr(ha, gp->dev) {
1798 crc = ether_crc_le(6, ha->addr);
1799 crc >>= 24;
1800 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1801 }
1802 for (i=0; i<16; i++)
1803 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1804 rxcfg |= MAC_RXCFG_HFE;
1805 }
1806
1807 return rxcfg;
1808 }
1809
1810 static void gem_init_mac(struct gem *gp)
1811 {
1812 const unsigned char *e = &gp->dev->dev_addr[0];
1813
1814 writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1815
1816 writel(0x00, gp->regs + MAC_IPG0);
1817 writel(0x08, gp->regs + MAC_IPG1);
1818 writel(0x04, gp->regs + MAC_IPG2);
1819 writel(0x40, gp->regs + MAC_STIME);
1820 writel(0x40, gp->regs + MAC_MINFSZ);
1821
1822
1823 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
1824
1825 writel(0x07, gp->regs + MAC_PASIZE);
1826 writel(0x04, gp->regs + MAC_JAMSIZE);
1827 writel(0x10, gp->regs + MAC_ATTLIM);
1828 writel(0x8808, gp->regs + MAC_MCTYPE);
1829
1830 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1831
1832 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1833 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1834 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1835
1836 writel(0, gp->regs + MAC_ADDR3);
1837 writel(0, gp->regs + MAC_ADDR4);
1838 writel(0, gp->regs + MAC_ADDR5);
1839
1840 writel(0x0001, gp->regs + MAC_ADDR6);
1841 writel(0xc200, gp->regs + MAC_ADDR7);
1842 writel(0x0180, gp->regs + MAC_ADDR8);
1843
1844 writel(0, gp->regs + MAC_AFILT0);
1845 writel(0, gp->regs + MAC_AFILT1);
1846 writel(0, gp->regs + MAC_AFILT2);
1847 writel(0, gp->regs + MAC_AF21MSK);
1848 writel(0, gp->regs + MAC_AF0MSK);
1849
1850 gp->mac_rx_cfg = gem_setup_multicast(gp);
1851 #ifdef STRIP_FCS
1852 gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
1853 #endif
1854 writel(0, gp->regs + MAC_NCOLL);
1855 writel(0, gp->regs + MAC_FASUCC);
1856 writel(0, gp->regs + MAC_ECOLL);
1857 writel(0, gp->regs + MAC_LCOLL);
1858 writel(0, gp->regs + MAC_DTIMER);
1859 writel(0, gp->regs + MAC_PATMPS);
1860 writel(0, gp->regs + MAC_RFCTR);
1861 writel(0, gp->regs + MAC_LERR);
1862 writel(0, gp->regs + MAC_AERR);
1863 writel(0, gp->regs + MAC_FCSERR);
1864 writel(0, gp->regs + MAC_RXCVERR);
1865
1866
1867
1868
1869 writel(0, gp->regs + MAC_TXCFG);
1870 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1871 writel(0, gp->regs + MAC_MCCFG);
1872 writel(0, gp->regs + MAC_XIFCFG);
1873
1874
1875
1876
1877
1878 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1879 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1880
1881
1882
1883
1884 writel(0xffffffff, gp->regs + MAC_MCMASK);
1885
1886
1887
1888 if (gp->has_wol)
1889 writel(0, gp->regs + WOL_WAKECSR);
1890 }
1891
1892 static void gem_init_pause_thresholds(struct gem *gp)
1893 {
1894 u32 cfg;
1895
1896
1897
1898
1899
1900
1901 if (gp->rx_fifo_sz <= (2 * 1024)) {
1902 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1903 } else {
1904 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
1905 int off = (gp->rx_fifo_sz - (max_frame * 2));
1906 int on = off - max_frame;
1907
1908 gp->rx_pause_off = off;
1909 gp->rx_pause_on = on;
1910 }
1911
1912
1913
1914
1915
1916 cfg = 0;
1917 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
1918 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
1919 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1920 cfg |= GREG_CFG_IBURST;
1921 #endif
1922 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
1923 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
1924 writel(cfg, gp->regs + GREG_CFG);
1925
1926
1927
1928
1929 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
1930 cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
1931 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
1932 writel(cfg, gp->regs + GREG_CFG);
1933 }
1934 }
1935
1936 static int gem_check_invariants(struct gem *gp)
1937 {
1938 struct pci_dev *pdev = gp->pdev;
1939 u32 mif_cfg;
1940
1941
1942
1943
1944
1945 if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
1946 gp->phy_type = phy_mii_mdio0;
1947 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
1948 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
1949 gp->swrst_base = 0;
1950
1951 mif_cfg = readl(gp->regs + MIF_CFG);
1952 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
1953 mif_cfg |= MIF_CFG_MDI0;
1954 writel(mif_cfg, gp->regs + MIF_CFG);
1955 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
1956 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
1957
1958
1959
1960
1961
1962 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
1963 gp->mii_phy_addr = 1;
1964 else
1965 gp->mii_phy_addr = 0;
1966
1967 return 0;
1968 }
1969
1970 mif_cfg = readl(gp->regs + MIF_CFG);
1971
1972 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
1973 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
1974
1975
1976
1977 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
1978 pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
1979 mif_cfg);
1980 return -1;
1981 }
1982 }
1983
1984
1985
1986
1987
1988 if (mif_cfg & MIF_CFG_MDI1) {
1989 gp->phy_type = phy_mii_mdio1;
1990 mif_cfg |= MIF_CFG_PSELECT;
1991 writel(mif_cfg, gp->regs + MIF_CFG);
1992 } else if (mif_cfg & MIF_CFG_MDI0) {
1993 gp->phy_type = phy_mii_mdio0;
1994 mif_cfg &= ~MIF_CFG_PSELECT;
1995 writel(mif_cfg, gp->regs + MIF_CFG);
1996 } else {
1997 #ifdef CONFIG_SPARC
1998 const char *p;
1999
2000 p = of_get_property(gp->of_node, "shared-pins", NULL);
2001 if (p && !strcmp(p, "serdes"))
2002 gp->phy_type = phy_serdes;
2003 else
2004 #endif
2005 gp->phy_type = phy_serialink;
2006 }
2007 if (gp->phy_type == phy_mii_mdio1 ||
2008 gp->phy_type == phy_mii_mdio0) {
2009 int i;
2010
2011 for (i = 0; i < 32; i++) {
2012 gp->mii_phy_addr = i;
2013 if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
2014 break;
2015 }
2016 if (i == 32) {
2017 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2018 pr_err("RIO MII phy will not respond\n");
2019 return -1;
2020 }
2021 gp->phy_type = phy_serdes;
2022 }
2023 }
2024
2025
2026 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2027 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2028
2029 if (pdev->vendor == PCI_VENDOR_ID_SUN) {
2030 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2031 if (gp->tx_fifo_sz != (9 * 1024) ||
2032 gp->rx_fifo_sz != (20 * 1024)) {
2033 pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2034 gp->tx_fifo_sz, gp->rx_fifo_sz);
2035 return -1;
2036 }
2037 gp->swrst_base = 0;
2038 } else {
2039 if (gp->tx_fifo_sz != (2 * 1024) ||
2040 gp->rx_fifo_sz != (2 * 1024)) {
2041 pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2042 gp->tx_fifo_sz, gp->rx_fifo_sz);
2043 return -1;
2044 }
2045 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2046 }
2047 }
2048
2049 return 0;
2050 }
2051
2052 static void gem_reinit_chip(struct gem *gp)
2053 {
2054
2055 gem_reset(gp);
2056
2057
2058 gem_disable_ints(gp);
2059
2060
2061 gem_init_rings(gp);
2062
2063
2064 gem_init_pause_thresholds(gp);
2065
2066
2067 gem_init_dma(gp);
2068 gem_init_mac(gp);
2069 }
2070
2071
2072 static void gem_stop_phy(struct gem *gp, int wol)
2073 {
2074 u32 mifcfg;
2075
2076
2077
2078
2079 msleep(10);
2080
2081
2082
2083
2084 mifcfg = readl(gp->regs + MIF_CFG);
2085 mifcfg &= ~MIF_CFG_POLL;
2086 writel(mifcfg, gp->regs + MIF_CFG);
2087
2088 if (wol && gp->has_wol) {
2089 const unsigned char *e = &gp->dev->dev_addr[0];
2090 u32 csr;
2091
2092
2093 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
2094 gp->regs + MAC_RXCFG);
2095 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2096 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2097 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
2098
2099 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
2100 csr = WOL_WAKECSR_ENABLE;
2101 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
2102 csr |= WOL_WAKECSR_MII;
2103 writel(csr, gp->regs + WOL_WAKECSR);
2104 } else {
2105 writel(0, gp->regs + MAC_RXCFG);
2106 (void)readl(gp->regs + MAC_RXCFG);
2107
2108
2109
2110
2111 msleep(10);
2112 }
2113
2114 writel(0, gp->regs + MAC_TXCFG);
2115 writel(0, gp->regs + MAC_XIFCFG);
2116 writel(0, gp->regs + TXDMA_CFG);
2117 writel(0, gp->regs + RXDMA_CFG);
2118
2119 if (!wol) {
2120 gem_reset(gp);
2121 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2122 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2123
2124 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
2125 gp->phy_mii.def->ops->suspend(&gp->phy_mii);
2126
2127
2128
2129
2130 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2131 writel(0, gp->regs + MIF_BBCLK);
2132 writel(0, gp->regs + MIF_BBDATA);
2133 writel(0, gp->regs + MIF_BBOENAB);
2134 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2135 (void) readl(gp->regs + MAC_XIFCFG);
2136 }
2137 }
2138
2139 static int gem_do_start(struct net_device *dev)
2140 {
2141 struct gem *gp = netdev_priv(dev);
2142 int rc;
2143
2144 pci_set_master(gp->pdev);
2145
2146
2147 gem_reinit_chip(gp);
2148
2149
2150 rc = request_irq(gp->pdev->irq, gem_interrupt,
2151 IRQF_SHARED, dev->name, (void *)dev);
2152 if (rc) {
2153 netdev_err(dev, "failed to request irq !\n");
2154
2155 gem_reset(gp);
2156 gem_clean_rings(gp);
2157 gem_put_cell(gp);
2158 return rc;
2159 }
2160
2161
2162
2163
2164 netif_device_attach(dev);
2165
2166
2167 gem_netif_start(gp);
2168
2169
2170
2171
2172
2173 gem_init_phy(gp);
2174
2175 return 0;
2176 }
2177
2178 static void gem_do_stop(struct net_device *dev, int wol)
2179 {
2180 struct gem *gp = netdev_priv(dev);
2181
2182
2183 gem_netif_stop(gp);
2184
2185
2186
2187
2188
2189
2190 gem_disable_ints(gp);
2191
2192
2193 del_timer_sync(&gp->link_timer);
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204 gp->reset_task_pending = 0;
2205
2206
2207 gem_stop_dma(gp);
2208 msleep(10);
2209 if (!wol)
2210 gem_reset(gp);
2211 msleep(10);
2212
2213
2214 gem_clean_rings(gp);
2215
2216
2217 free_irq(gp->pdev->irq, (void *) dev);
2218
2219
2220 gem_stop_phy(gp, wol);
2221 }
2222
2223 static void gem_reset_task(struct work_struct *work)
2224 {
2225 struct gem *gp = container_of(work, struct gem, reset_task);
2226
2227
2228
2229
2230 rtnl_lock();
2231
2232
2233
2234
2235 if (!netif_device_present(gp->dev) ||
2236 !netif_running(gp->dev) ||
2237 !gp->reset_task_pending) {
2238 rtnl_unlock();
2239 return;
2240 }
2241
2242
2243 del_timer_sync(&gp->link_timer);
2244
2245
2246 gem_netif_stop(gp);
2247
2248
2249 gem_reinit_chip(gp);
2250 if (gp->lstate == link_up)
2251 gem_set_link_modes(gp);
2252
2253
2254 gem_netif_start(gp);
2255
2256
2257 gp->reset_task_pending = 0;
2258
2259
2260
2261
2262 if (gp->lstate != link_up)
2263 gem_begin_auto_negotiation(gp, NULL);
2264 else
2265 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
2266
2267 rtnl_unlock();
2268 }
2269
2270 static int gem_open(struct net_device *dev)
2271 {
2272 struct gem *gp = netdev_priv(dev);
2273 int rc;
2274
2275
2276
2277
2278 if (netif_device_present(dev)) {
2279
2280 gem_get_cell(gp);
2281
2282
2283 rc = pci_enable_device(gp->pdev);
2284 if (rc) {
2285 netdev_err(dev, "Failed to enable chip on PCI bus !\n");
2286
2287
2288
2289
2290 gem_put_cell(gp);
2291 return -ENXIO;
2292 }
2293 return gem_do_start(dev);
2294 }
2295
2296 return 0;
2297 }
2298
2299 static int gem_close(struct net_device *dev)
2300 {
2301 struct gem *gp = netdev_priv(dev);
2302
2303 if (netif_device_present(dev)) {
2304 gem_do_stop(dev, 0);
2305
2306
2307 pci_disable_device(gp->pdev);
2308
2309
2310 if (!gp->asleep_wol)
2311 gem_put_cell(gp);
2312 }
2313 return 0;
2314 }
2315
2316 static int __maybe_unused gem_suspend(struct device *dev_d)
2317 {
2318 struct net_device *dev = dev_get_drvdata(dev_d);
2319 struct gem *gp = netdev_priv(dev);
2320
2321
2322
2323
2324 rtnl_lock();
2325
2326
2327
2328
2329 if (!netif_running(dev)) {
2330 netif_device_detach(dev);
2331 rtnl_unlock();
2332 return 0;
2333 }
2334 netdev_info(dev, "suspending, WakeOnLan %s\n",
2335 (gp->wake_on_lan && netif_running(dev)) ?
2336 "enabled" : "disabled");
2337
2338
2339
2340
2341 netif_device_detach(dev);
2342
2343
2344 gp->asleep_wol = !!gp->wake_on_lan;
2345 gem_do_stop(dev, gp->asleep_wol);
2346
2347
2348 if (!gp->asleep_wol)
2349 gem_put_cell(gp);
2350
2351
2352 rtnl_unlock();
2353
2354 return 0;
2355 }
2356
2357 static int __maybe_unused gem_resume(struct device *dev_d)
2358 {
2359 struct net_device *dev = dev_get_drvdata(dev_d);
2360 struct gem *gp = netdev_priv(dev);
2361
2362
2363 rtnl_lock();
2364
2365
2366
2367
2368 if (!netif_running(dev)) {
2369 netif_device_attach(dev);
2370 rtnl_unlock();
2371 return 0;
2372 }
2373
2374
2375 gem_get_cell(gp);
2376
2377
2378
2379
2380 gem_do_start(dev);
2381
2382
2383
2384
2385 if (gp->asleep_wol)
2386 gem_put_cell(gp);
2387
2388
2389 rtnl_unlock();
2390
2391 return 0;
2392 }
2393
2394 static struct net_device_stats *gem_get_stats(struct net_device *dev)
2395 {
2396 struct gem *gp = netdev_priv(dev);
2397
2398
2399
2400
2401
2402
2403
2404
2405 if (!netif_device_present(dev) || !netif_running(dev))
2406 goto bail;
2407
2408
2409 if (WARN_ON(!gp->cell_enabled))
2410 goto bail;
2411
2412 dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2413 writel(0, gp->regs + MAC_FCSERR);
2414
2415 dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
2416 writel(0, gp->regs + MAC_AERR);
2417
2418 dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
2419 writel(0, gp->regs + MAC_LERR);
2420
2421 dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2422 dev->stats.collisions +=
2423 (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
2424 writel(0, gp->regs + MAC_ECOLL);
2425 writel(0, gp->regs + MAC_LCOLL);
2426 bail:
2427 return &dev->stats;
2428 }
2429
2430 static int gem_set_mac_address(struct net_device *dev, void *addr)
2431 {
2432 struct sockaddr *macaddr = (struct sockaddr *) addr;
2433 const unsigned char *e = &dev->dev_addr[0];
2434 struct gem *gp = netdev_priv(dev);
2435
2436 if (!is_valid_ether_addr(macaddr->sa_data))
2437 return -EADDRNOTAVAIL;
2438
2439 eth_hw_addr_set(dev, macaddr->sa_data);
2440
2441
2442 if (!netif_running(dev) || !netif_device_present(dev))
2443 return 0;
2444
2445
2446 if (WARN_ON(!gp->cell_enabled))
2447 return 0;
2448
2449 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
2450 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
2451 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
2452
2453 return 0;
2454 }
2455
2456 static void gem_set_multicast(struct net_device *dev)
2457 {
2458 struct gem *gp = netdev_priv(dev);
2459 u32 rxcfg, rxcfg_new;
2460 int limit = 10000;
2461
2462 if (!netif_running(dev) || !netif_device_present(dev))
2463 return;
2464
2465
2466 if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
2467 return;
2468
2469 rxcfg = readl(gp->regs + MAC_RXCFG);
2470 rxcfg_new = gem_setup_multicast(gp);
2471 #ifdef STRIP_FCS
2472 rxcfg_new |= MAC_RXCFG_SFCS;
2473 #endif
2474 gp->mac_rx_cfg = rxcfg_new;
2475
2476 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2477 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2478 if (!limit--)
2479 break;
2480 udelay(10);
2481 }
2482
2483 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2484 rxcfg |= rxcfg_new;
2485
2486 writel(rxcfg, gp->regs + MAC_RXCFG);
2487 }
2488
2489
2490 #define GEM_MIN_MTU ETH_MIN_MTU
2491 #if 1
2492 #define GEM_MAX_MTU ETH_DATA_LEN
2493 #else
2494 #define GEM_MAX_MTU 9000
2495 #endif
2496
2497 static int gem_change_mtu(struct net_device *dev, int new_mtu)
2498 {
2499 struct gem *gp = netdev_priv(dev);
2500
2501 dev->mtu = new_mtu;
2502
2503
2504 if (!netif_running(dev) || !netif_device_present(dev))
2505 return 0;
2506
2507
2508 if (WARN_ON(!gp->cell_enabled))
2509 return 0;
2510
2511 gem_netif_stop(gp);
2512 gem_reinit_chip(gp);
2513 if (gp->lstate == link_up)
2514 gem_set_link_modes(gp);
2515 gem_netif_start(gp);
2516
2517 return 0;
2518 }
2519
2520 static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2521 {
2522 struct gem *gp = netdev_priv(dev);
2523
2524 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2525 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2526 strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
2527 }
2528
2529 static int gem_get_link_ksettings(struct net_device *dev,
2530 struct ethtool_link_ksettings *cmd)
2531 {
2532 struct gem *gp = netdev_priv(dev);
2533 u32 supported, advertising;
2534
2535 if (gp->phy_type == phy_mii_mdio0 ||
2536 gp->phy_type == phy_mii_mdio1) {
2537 if (gp->phy_mii.def)
2538 supported = gp->phy_mii.def->features;
2539 else
2540 supported = (SUPPORTED_10baseT_Half |
2541 SUPPORTED_10baseT_Full);
2542
2543
2544 cmd->base.port = PORT_MII;
2545 cmd->base.phy_address = 0;
2546
2547
2548 cmd->base.autoneg = gp->want_autoneg;
2549 cmd->base.speed = gp->phy_mii.speed;
2550 cmd->base.duplex = gp->phy_mii.duplex;
2551 advertising = gp->phy_mii.advertising;
2552
2553
2554
2555
2556
2557 if (advertising == 0)
2558 advertising = supported;
2559 } else {
2560 supported =
2561 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2562 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2563 SUPPORTED_Autoneg);
2564 advertising = supported;
2565 cmd->base.speed = 0;
2566 cmd->base.duplex = 0;
2567 cmd->base.port = 0;
2568 cmd->base.phy_address = 0;
2569 cmd->base.autoneg = 0;
2570
2571
2572 if (gp->phy_type == phy_serdes) {
2573 cmd->base.port = PORT_FIBRE;
2574 supported = (SUPPORTED_1000baseT_Half |
2575 SUPPORTED_1000baseT_Full |
2576 SUPPORTED_FIBRE | SUPPORTED_Autoneg |
2577 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2578 advertising = supported;
2579 if (gp->lstate == link_up)
2580 cmd->base.speed = SPEED_1000;
2581 cmd->base.duplex = DUPLEX_FULL;
2582 cmd->base.autoneg = 1;
2583 }
2584 }
2585
2586 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2587 supported);
2588 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2589 advertising);
2590
2591 return 0;
2592 }
2593
2594 static int gem_set_link_ksettings(struct net_device *dev,
2595 const struct ethtool_link_ksettings *cmd)
2596 {
2597 struct gem *gp = netdev_priv(dev);
2598 u32 speed = cmd->base.speed;
2599 u32 advertising;
2600
2601 ethtool_convert_link_mode_to_legacy_u32(&advertising,
2602 cmd->link_modes.advertising);
2603
2604
2605 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2606 cmd->base.autoneg != AUTONEG_DISABLE)
2607 return -EINVAL;
2608
2609 if (cmd->base.autoneg == AUTONEG_ENABLE &&
2610 advertising == 0)
2611 return -EINVAL;
2612
2613 if (cmd->base.autoneg == AUTONEG_DISABLE &&
2614 ((speed != SPEED_1000 &&
2615 speed != SPEED_100 &&
2616 speed != SPEED_10) ||
2617 (cmd->base.duplex != DUPLEX_HALF &&
2618 cmd->base.duplex != DUPLEX_FULL)))
2619 return -EINVAL;
2620
2621
2622 if (netif_device_present(gp->dev)) {
2623 del_timer_sync(&gp->link_timer);
2624 gem_begin_auto_negotiation(gp, cmd);
2625 }
2626
2627 return 0;
2628 }
2629
2630 static int gem_nway_reset(struct net_device *dev)
2631 {
2632 struct gem *gp = netdev_priv(dev);
2633
2634 if (!gp->want_autoneg)
2635 return -EINVAL;
2636
2637
2638 if (netif_device_present(gp->dev)) {
2639 del_timer_sync(&gp->link_timer);
2640 gem_begin_auto_negotiation(gp, NULL);
2641 }
2642
2643 return 0;
2644 }
2645
2646 static u32 gem_get_msglevel(struct net_device *dev)
2647 {
2648 struct gem *gp = netdev_priv(dev);
2649 return gp->msg_enable;
2650 }
2651
2652 static void gem_set_msglevel(struct net_device *dev, u32 value)
2653 {
2654 struct gem *gp = netdev_priv(dev);
2655 gp->msg_enable = value;
2656 }
2657
2658
2659
2660
2661
2662 #define WOL_SUPPORTED_MASK (WAKE_MAGIC)
2663
2664 static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2665 {
2666 struct gem *gp = netdev_priv(dev);
2667
2668
2669 if (gp->has_wol) {
2670 wol->supported = WOL_SUPPORTED_MASK;
2671 wol->wolopts = gp->wake_on_lan;
2672 } else {
2673 wol->supported = 0;
2674 wol->wolopts = 0;
2675 }
2676 }
2677
2678 static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2679 {
2680 struct gem *gp = netdev_priv(dev);
2681
2682 if (!gp->has_wol)
2683 return -EOPNOTSUPP;
2684 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
2685 return 0;
2686 }
2687
2688 static const struct ethtool_ops gem_ethtool_ops = {
2689 .get_drvinfo = gem_get_drvinfo,
2690 .get_link = ethtool_op_get_link,
2691 .nway_reset = gem_nway_reset,
2692 .get_msglevel = gem_get_msglevel,
2693 .set_msglevel = gem_set_msglevel,
2694 .get_wol = gem_get_wol,
2695 .set_wol = gem_set_wol,
2696 .get_link_ksettings = gem_get_link_ksettings,
2697 .set_link_ksettings = gem_set_link_ksettings,
2698 };
2699
2700 static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2701 {
2702 struct gem *gp = netdev_priv(dev);
2703 struct mii_ioctl_data *data = if_mii(ifr);
2704 int rc = -EOPNOTSUPP;
2705
2706
2707
2708
2709
2710
2711 switch (cmd) {
2712 case SIOCGMIIPHY:
2713 data->phy_id = gp->mii_phy_addr;
2714 fallthrough;
2715
2716 case SIOCGMIIREG:
2717 data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
2718 data->reg_num & 0x1f);
2719 rc = 0;
2720 break;
2721
2722 case SIOCSMIIREG:
2723 __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2724 data->val_in);
2725 rc = 0;
2726 break;
2727 }
2728 return rc;
2729 }
2730
2731 #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
2732
2733 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
2734 {
2735 int this_offset;
2736
2737 for (this_offset = 0x20; this_offset < len; this_offset++) {
2738 void __iomem *p = rom_base + this_offset;
2739 int i;
2740
2741 if (readb(p + 0) != 0x90 ||
2742 readb(p + 1) != 0x00 ||
2743 readb(p + 2) != 0x09 ||
2744 readb(p + 3) != 0x4e ||
2745 readb(p + 4) != 0x41 ||
2746 readb(p + 5) != 0x06)
2747 continue;
2748
2749 this_offset += 6;
2750 p += 6;
2751
2752 for (i = 0; i < 6; i++)
2753 dev_addr[i] = readb(p + i);
2754 return 1;
2755 }
2756 return 0;
2757 }
2758
2759 static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2760 {
2761 size_t size;
2762 void __iomem *p = pci_map_rom(pdev, &size);
2763
2764 if (p) {
2765 int found;
2766
2767 found = readb(p) == 0x55 &&
2768 readb(p + 1) == 0xaa &&
2769 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2770 pci_unmap_rom(pdev, p);
2771 if (found)
2772 return;
2773 }
2774
2775
2776 dev_addr[0] = 0x08;
2777 dev_addr[1] = 0x00;
2778 dev_addr[2] = 0x20;
2779 get_random_bytes(dev_addr + 3, 3);
2780 }
2781 #endif
2782
2783 static int gem_get_device_address(struct gem *gp)
2784 {
2785 #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
2786 struct net_device *dev = gp->dev;
2787 const unsigned char *addr;
2788
2789 addr = of_get_property(gp->of_node, "local-mac-address", NULL);
2790 if (addr == NULL) {
2791 #ifdef CONFIG_SPARC
2792 addr = idprom->id_ethaddr;
2793 #else
2794 printk("\n");
2795 pr_err("%s: can't get mac-address\n", dev->name);
2796 return -1;
2797 #endif
2798 }
2799 eth_hw_addr_set(dev, addr);
2800 #else
2801 u8 addr[ETH_ALEN];
2802
2803 get_gem_mac_nonobp(gp->pdev, addr);
2804 eth_hw_addr_set(gp->dev, addr);
2805 #endif
2806 return 0;
2807 }
2808
2809 static void gem_remove_one(struct pci_dev *pdev)
2810 {
2811 struct net_device *dev = pci_get_drvdata(pdev);
2812
2813 if (dev) {
2814 struct gem *gp = netdev_priv(dev);
2815
2816 unregister_netdev(dev);
2817
2818
2819 cancel_work_sync(&gp->reset_task);
2820
2821
2822 dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block),
2823 gp->init_block, gp->gblock_dvma);
2824 iounmap(gp->regs);
2825 pci_release_regions(pdev);
2826 free_netdev(dev);
2827 }
2828 }
2829
2830 static const struct net_device_ops gem_netdev_ops = {
2831 .ndo_open = gem_open,
2832 .ndo_stop = gem_close,
2833 .ndo_start_xmit = gem_start_xmit,
2834 .ndo_get_stats = gem_get_stats,
2835 .ndo_set_rx_mode = gem_set_multicast,
2836 .ndo_eth_ioctl = gem_ioctl,
2837 .ndo_tx_timeout = gem_tx_timeout,
2838 .ndo_change_mtu = gem_change_mtu,
2839 .ndo_validate_addr = eth_validate_addr,
2840 .ndo_set_mac_address = gem_set_mac_address,
2841 #ifdef CONFIG_NET_POLL_CONTROLLER
2842 .ndo_poll_controller = gem_poll_controller,
2843 #endif
2844 };
2845
2846 static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2847 {
2848 unsigned long gemreg_base, gemreg_len;
2849 struct net_device *dev;
2850 struct gem *gp;
2851 int err, pci_using_dac;
2852
2853 printk_once(KERN_INFO "%s", version);
2854
2855
2856
2857
2858
2859
2860
2861 err = pci_enable_device(pdev);
2862 if (err) {
2863 pr_err("Cannot enable MMIO operation, aborting\n");
2864 return err;
2865 }
2866 pci_set_master(pdev);
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2878 pdev->device == PCI_DEVICE_ID_SUN_GEM &&
2879 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
2880 pci_using_dac = 1;
2881 } else {
2882 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2883 if (err) {
2884 pr_err("No usable DMA configuration, aborting\n");
2885 goto err_disable_device;
2886 }
2887 pci_using_dac = 0;
2888 }
2889
2890 gemreg_base = pci_resource_start(pdev, 0);
2891 gemreg_len = pci_resource_len(pdev, 0);
2892
2893 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
2894 pr_err("Cannot find proper PCI device base address, aborting\n");
2895 err = -ENODEV;
2896 goto err_disable_device;
2897 }
2898
2899 dev = alloc_etherdev(sizeof(*gp));
2900 if (!dev) {
2901 err = -ENOMEM;
2902 goto err_disable_device;
2903 }
2904 SET_NETDEV_DEV(dev, &pdev->dev);
2905
2906 gp = netdev_priv(dev);
2907
2908 err = pci_request_regions(pdev, DRV_NAME);
2909 if (err) {
2910 pr_err("Cannot obtain PCI resources, aborting\n");
2911 goto err_out_free_netdev;
2912 }
2913
2914 gp->pdev = pdev;
2915 gp->dev = dev;
2916
2917 gp->msg_enable = DEFAULT_MSG;
2918
2919 timer_setup(&gp->link_timer, gem_link_timer, 0);
2920
2921 INIT_WORK(&gp->reset_task, gem_reset_task);
2922
2923 gp->lstate = link_down;
2924 gp->timer_ticks = 0;
2925 netif_carrier_off(dev);
2926
2927 gp->regs = ioremap(gemreg_base, gemreg_len);
2928 if (!gp->regs) {
2929 pr_err("Cannot map device registers, aborting\n");
2930 err = -EIO;
2931 goto err_out_free_res;
2932 }
2933
2934
2935
2936
2937 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
2938 gp->of_node = pci_device_to_OF_node(pdev);
2939 #endif
2940
2941
2942 if (pdev->vendor == PCI_VENDOR_ID_APPLE)
2943 gp->has_wol = 1;
2944
2945
2946 gem_get_cell(gp);
2947
2948
2949 gem_reset(gp);
2950
2951
2952 gp->phy_mii.dev = dev;
2953 gp->phy_mii.mdio_read = _sungem_phy_read;
2954 gp->phy_mii.mdio_write = _sungem_phy_write;
2955 #ifdef CONFIG_PPC_PMAC
2956 gp->phy_mii.platform_data = gp->of_node;
2957 #endif
2958
2959 gp->want_autoneg = 1;
2960
2961
2962 if (gem_check_invariants(gp)) {
2963 err = -ENODEV;
2964 goto err_out_iounmap;
2965 }
2966
2967
2968
2969
2970 gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
2971 &gp->gblock_dvma, GFP_KERNEL);
2972 if (!gp->init_block) {
2973 pr_err("Cannot allocate init block, aborting\n");
2974 err = -ENOMEM;
2975 goto err_out_iounmap;
2976 }
2977
2978 err = gem_get_device_address(gp);
2979 if (err)
2980 goto err_out_free_consistent;
2981
2982 dev->netdev_ops = &gem_netdev_ops;
2983 netif_napi_add(dev, &gp->napi, gem_poll, 64);
2984 dev->ethtool_ops = &gem_ethtool_ops;
2985 dev->watchdog_timeo = 5 * HZ;
2986 dev->dma = 0;
2987
2988
2989 pci_set_drvdata(pdev, dev);
2990
2991
2992 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2993 dev->features = dev->hw_features;
2994 if (pci_using_dac)
2995 dev->features |= NETIF_F_HIGHDMA;
2996
2997
2998 dev->min_mtu = GEM_MIN_MTU;
2999 dev->max_mtu = GEM_MAX_MTU;
3000
3001
3002 if (register_netdev(dev)) {
3003 pr_err("Cannot register net device, aborting\n");
3004 err = -ENOMEM;
3005 goto err_out_free_consistent;
3006 }
3007
3008
3009
3010
3011 rtnl_lock();
3012 gem_put_cell(gp);
3013 rtnl_unlock();
3014
3015 netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
3016 dev->dev_addr);
3017 return 0;
3018
3019 err_out_free_consistent:
3020 gem_remove_one(pdev);
3021 err_out_iounmap:
3022 gem_put_cell(gp);
3023 iounmap(gp->regs);
3024
3025 err_out_free_res:
3026 pci_release_regions(pdev);
3027
3028 err_out_free_netdev:
3029 free_netdev(dev);
3030 err_disable_device:
3031 pci_disable_device(pdev);
3032 return err;
3033
3034 }
3035
3036 static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume);
3037
3038 static struct pci_driver gem_driver = {
3039 .name = GEM_MODULE_NAME,
3040 .id_table = gem_pci_tbl,
3041 .probe = gem_init_one,
3042 .remove = gem_remove_one,
3043 .driver.pm = &gem_pm_ops,
3044 };
3045
3046 module_pci_driver(gem_driver);