0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/moduleparam.h>
0014 #include <linux/string.h>
0015 #include <linux/timer.h>
0016 #include <linux/errno.h>
0017 #include <linux/ioport.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/pci.h>
0020 #include <linux/netdevice.h>
0021 #include <linux/etherdevice.h>
0022 #include <linux/skbuff.h>
0023 #include <linux/delay.h>
0024 #include <linux/mii.h>
0025 #include <linux/ethtool.h>
0026 #include <linux/crc32.h>
0027 #include <linux/spinlock.h>
0028 #include <linux/bitops.h>
0029 #include <linux/io.h>
0030 #include <linux/irq.h>
0031 #include <linux/uaccess.h>
0032 #include <linux/phy.h>
0033
0034 #include <asm/processor.h>
0035
0036 #define DRV_NAME "r6040"
0037 #define DRV_VERSION "0.29"
0038 #define DRV_RELDATE "04Jul2016"
0039
0040
0041 #define TX_TIMEOUT (6000 * HZ / 1000)
0042
0043
0044 #define R6040_IO_SIZE 256
0045
0046
0047 #define MAX_MAC 2
0048
0049
0050 #define MCR0 0x00
0051 #define MCR0_RCVEN 0x0002
0052 #define MCR0_PROMISC 0x0020
0053 #define MCR0_HASH_EN 0x0100
0054 #define MCR0_XMTEN 0x1000
0055 #define MCR0_FD 0x8000
0056 #define MCR1 0x04
0057 #define MAC_RST 0x0001
0058 #define MBCR 0x08
0059 #define MT_ICR 0x0C
0060 #define MR_ICR 0x10
0061 #define MTPR 0x14
0062 #define TM2TX 0x0001
0063 #define MR_BSR 0x18
0064 #define MR_DCR 0x1A
0065 #define MLSR 0x1C
0066 #define TX_FIFO_UNDR 0x0200
0067 #define TX_EXCEEDC 0x2000
0068 #define TX_LATEC 0x4000
0069 #define MMDIO 0x20
0070 #define MDIO_WRITE 0x4000
0071 #define MDIO_READ 0x2000
0072 #define MMRD 0x24
0073 #define MMWD 0x28
0074 #define MTD_SA0 0x2C
0075 #define MTD_SA1 0x30
0076 #define MRD_SA0 0x34
0077 #define MRD_SA1 0x38
0078 #define MISR 0x3C
0079 #define MIER 0x40
0080 #define MSK_INT 0x0000
0081 #define RX_FINISH 0x0001
0082 #define RX_NO_DESC 0x0002
0083 #define RX_FIFO_FULL 0x0004
0084 #define RX_EARLY 0x0008
0085 #define TX_FINISH 0x0010
0086 #define TX_EARLY 0x0080
0087 #define EVENT_OVRFL 0x0100
0088 #define LINK_CHANGED 0x0200
0089 #define ME_CISR 0x44
0090 #define ME_CIER 0x48
0091 #define MR_CNT 0x50
0092 #define ME_CNT0 0x52
0093 #define ME_CNT1 0x54
0094 #define ME_CNT2 0x56
0095 #define ME_CNT3 0x58
0096 #define MT_CNT 0x5A
0097 #define ME_CNT4 0x5C
0098 #define MP_CNT 0x5E
0099 #define MAR0 0x60
0100 #define MAR1 0x62
0101 #define MAR2 0x64
0102 #define MAR3 0x66
0103 #define MID_0L 0x68
0104 #define MID_0M 0x6A
0105 #define MID_0H 0x6C
0106 #define MID_1L 0x70
0107 #define MID_1M 0x72
0108 #define MID_1H 0x74
0109 #define MID_2L 0x78
0110 #define MID_2M 0x7A
0111 #define MID_2H 0x7C
0112 #define MID_3L 0x80
0113 #define MID_3M 0x82
0114 #define MID_3H 0x84
0115 #define PHY_CC 0x88
0116 #define SCEN 0x8000
0117 #define PHYAD_SHIFT 8
0118 #define TMRDIV_SHIFT 0
0119 #define PHY_ST 0x8A
0120 #define MAC_SM 0xAC
0121 #define MAC_SM_RST 0x0002
0122 #define MD_CSC 0xb6
0123 #define MD_CSC_DEFAULT 0x0030
0124 #define MAC_ID 0xBE
0125
0126 #define TX_DCNT 0x80
0127 #define RX_DCNT 0x80
0128 #define MAX_BUF_SIZE 0x600
0129 #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
0130 #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
0131 #define MBCR_DEFAULT 0x012A
0132 #define MCAST_MAX 3
0133
0134 #define MAC_DEF_TIMEOUT 2048
0135
0136
0137 #define DSC_OWNER_MAC 0x8000
0138 #define DSC_RX_OK 0x4000
0139 #define DSC_RX_ERR 0x0800
0140 #define DSC_RX_ERR_DRI 0x0400
0141 #define DSC_RX_ERR_BUF 0x0200
0142 #define DSC_RX_ERR_LONG 0x0100
0143 #define DSC_RX_ERR_RUNT 0x0080
0144 #define DSC_RX_ERR_CRC 0x0040
0145 #define DSC_RX_BCAST 0x0020
0146 #define DSC_RX_MCAST 0x0010
0147 #define DSC_RX_MCH_HIT 0x0008
0148 #define DSC_RX_MIDH_HIT 0x0004
0149 #define DSC_RX_IDX_MID_MASK 3
0150
0151 MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
0152 "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
0153 "Florian Fainelli <f.fainelli@gmail.com>");
0154 MODULE_LICENSE("GPL");
0155 MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
0156 MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
0157
0158
0159 #define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
0160 #define TX_INTS (TX_FINISH)
0161 #define INT_MASK (RX_INTS | TX_INTS)
0162
0163 struct r6040_descriptor {
0164 u16 status, len;
0165 __le32 buf;
0166 __le32 ndesc;
0167 u32 rev1;
0168 char *vbufp;
0169 struct r6040_descriptor *vndescp;
0170 struct sk_buff *skb_ptr;
0171 u32 rev2;
0172 } __aligned(32);
0173
0174 struct r6040_private {
0175 spinlock_t lock;
0176 struct pci_dev *pdev;
0177 struct r6040_descriptor *rx_insert_ptr;
0178 struct r6040_descriptor *rx_remove_ptr;
0179 struct r6040_descriptor *tx_insert_ptr;
0180 struct r6040_descriptor *tx_remove_ptr;
0181 struct r6040_descriptor *rx_ring;
0182 struct r6040_descriptor *tx_ring;
0183 dma_addr_t rx_ring_dma;
0184 dma_addr_t tx_ring_dma;
0185 u16 tx_free_desc;
0186 u16 mcr0;
0187 struct net_device *dev;
0188 struct mii_bus *mii_bus;
0189 struct napi_struct napi;
0190 void __iomem *base;
0191 int old_link;
0192 int old_duplex;
0193 };
0194
0195 static char version[] = DRV_NAME
0196 ": RDC R6040 NAPI net driver,"
0197 "version "DRV_VERSION " (" DRV_RELDATE ")";
0198
0199
0200 static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
0201 {
0202 int limit = MAC_DEF_TIMEOUT;
0203 u16 cmd;
0204
0205 iowrite16(MDIO_READ | reg | (phy_addr << 8), ioaddr + MMDIO);
0206
0207 while (limit--) {
0208 cmd = ioread16(ioaddr + MMDIO);
0209 if (!(cmd & MDIO_READ))
0210 break;
0211 udelay(1);
0212 }
0213
0214 if (limit < 0)
0215 return -ETIMEDOUT;
0216
0217 return ioread16(ioaddr + MMRD);
0218 }
0219
0220
0221 static int r6040_phy_write(void __iomem *ioaddr,
0222 int phy_addr, int reg, u16 val)
0223 {
0224 int limit = MAC_DEF_TIMEOUT;
0225 u16 cmd;
0226
0227 iowrite16(val, ioaddr + MMWD);
0228
0229 iowrite16(MDIO_WRITE | reg | (phy_addr << 8), ioaddr + MMDIO);
0230
0231 while (limit--) {
0232 cmd = ioread16(ioaddr + MMDIO);
0233 if (!(cmd & MDIO_WRITE))
0234 break;
0235 udelay(1);
0236 }
0237
0238 return (limit < 0) ? -ETIMEDOUT : 0;
0239 }
0240
0241 static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
0242 {
0243 struct net_device *dev = bus->priv;
0244 struct r6040_private *lp = netdev_priv(dev);
0245 void __iomem *ioaddr = lp->base;
0246
0247 return r6040_phy_read(ioaddr, phy_addr, reg);
0248 }
0249
0250 static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
0251 int reg, u16 value)
0252 {
0253 struct net_device *dev = bus->priv;
0254 struct r6040_private *lp = netdev_priv(dev);
0255 void __iomem *ioaddr = lp->base;
0256
0257 return r6040_phy_write(ioaddr, phy_addr, reg, value);
0258 }
0259
0260 static void r6040_free_txbufs(struct net_device *dev)
0261 {
0262 struct r6040_private *lp = netdev_priv(dev);
0263 int i;
0264
0265 for (i = 0; i < TX_DCNT; i++) {
0266 if (lp->tx_insert_ptr->skb_ptr) {
0267 dma_unmap_single(&lp->pdev->dev,
0268 le32_to_cpu(lp->tx_insert_ptr->buf),
0269 MAX_BUF_SIZE, DMA_TO_DEVICE);
0270 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
0271 lp->tx_insert_ptr->skb_ptr = NULL;
0272 }
0273 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
0274 }
0275 }
0276
0277 static void r6040_free_rxbufs(struct net_device *dev)
0278 {
0279 struct r6040_private *lp = netdev_priv(dev);
0280 int i;
0281
0282 for (i = 0; i < RX_DCNT; i++) {
0283 if (lp->rx_insert_ptr->skb_ptr) {
0284 dma_unmap_single(&lp->pdev->dev,
0285 le32_to_cpu(lp->rx_insert_ptr->buf),
0286 MAX_BUF_SIZE, DMA_FROM_DEVICE);
0287 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
0288 lp->rx_insert_ptr->skb_ptr = NULL;
0289 }
0290 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
0291 }
0292 }
0293
0294 static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
0295 dma_addr_t desc_dma, int size)
0296 {
0297 struct r6040_descriptor *desc = desc_ring;
0298 dma_addr_t mapping = desc_dma;
0299
0300 while (size-- > 0) {
0301 mapping += sizeof(*desc);
0302 desc->ndesc = cpu_to_le32(mapping);
0303 desc->vndescp = desc + 1;
0304 desc++;
0305 }
0306 desc--;
0307 desc->ndesc = cpu_to_le32(desc_dma);
0308 desc->vndescp = desc_ring;
0309 }
0310
0311 static void r6040_init_txbufs(struct net_device *dev)
0312 {
0313 struct r6040_private *lp = netdev_priv(dev);
0314
0315 lp->tx_free_desc = TX_DCNT;
0316
0317 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
0318 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
0319 }
0320
0321 static int r6040_alloc_rxbufs(struct net_device *dev)
0322 {
0323 struct r6040_private *lp = netdev_priv(dev);
0324 struct r6040_descriptor *desc;
0325 struct sk_buff *skb;
0326 int rc;
0327
0328 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
0329 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
0330
0331
0332 desc = lp->rx_ring;
0333 do {
0334 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
0335 if (!skb) {
0336 rc = -ENOMEM;
0337 goto err_exit;
0338 }
0339 desc->skb_ptr = skb;
0340 desc->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev,
0341 desc->skb_ptr->data,
0342 MAX_BUF_SIZE,
0343 DMA_FROM_DEVICE));
0344 desc->status = DSC_OWNER_MAC;
0345 desc = desc->vndescp;
0346 } while (desc != lp->rx_ring);
0347
0348 return 0;
0349
0350 err_exit:
0351
0352 r6040_free_rxbufs(dev);
0353 return rc;
0354 }
0355
0356 static void r6040_reset_mac(struct r6040_private *lp)
0357 {
0358 void __iomem *ioaddr = lp->base;
0359 int limit = MAC_DEF_TIMEOUT;
0360 u16 cmd, md_csc;
0361
0362 md_csc = ioread16(ioaddr + MD_CSC);
0363 iowrite16(MAC_RST, ioaddr + MCR1);
0364 while (limit--) {
0365 cmd = ioread16(ioaddr + MCR1);
0366 if (cmd & MAC_RST)
0367 break;
0368 }
0369
0370
0371 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
0372 iowrite16(0, ioaddr + MAC_SM);
0373 mdelay(5);
0374
0375
0376 if (md_csc != MD_CSC_DEFAULT)
0377 iowrite16(md_csc, ioaddr + MD_CSC);
0378 }
0379
0380 static void r6040_init_mac_regs(struct net_device *dev)
0381 {
0382 struct r6040_private *lp = netdev_priv(dev);
0383 void __iomem *ioaddr = lp->base;
0384
0385
0386 iowrite16(MSK_INT, ioaddr + MIER);
0387
0388
0389 r6040_reset_mac(lp);
0390
0391
0392 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
0393
0394
0395 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
0396
0397
0398 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
0399 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
0400
0401
0402 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
0403 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
0404
0405
0406 iowrite16(0, ioaddr + MT_ICR);
0407 iowrite16(0, ioaddr + MR_ICR);
0408
0409
0410 iowrite16(INT_MASK, ioaddr + MIER);
0411
0412
0413 iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
0414
0415
0416
0417
0418 iowrite16(TM2TX, ioaddr + MTPR);
0419 }
0420
0421 static void r6040_tx_timeout(struct net_device *dev, unsigned int txqueue)
0422 {
0423 struct r6040_private *priv = netdev_priv(dev);
0424 void __iomem *ioaddr = priv->base;
0425
0426 netdev_warn(dev, "transmit timed out, int enable %4.4x "
0427 "status %4.4x\n",
0428 ioread16(ioaddr + MIER),
0429 ioread16(ioaddr + MISR));
0430
0431 dev->stats.tx_errors++;
0432
0433
0434 r6040_init_mac_regs(dev);
0435 }
0436
0437 static struct net_device_stats *r6040_get_stats(struct net_device *dev)
0438 {
0439 struct r6040_private *priv = netdev_priv(dev);
0440 void __iomem *ioaddr = priv->base;
0441 unsigned long flags;
0442
0443 spin_lock_irqsave(&priv->lock, flags);
0444 dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
0445 dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
0446 spin_unlock_irqrestore(&priv->lock, flags);
0447
0448 return &dev->stats;
0449 }
0450
0451
0452 static void r6040_down(struct net_device *dev)
0453 {
0454 struct r6040_private *lp = netdev_priv(dev);
0455 void __iomem *ioaddr = lp->base;
0456 const u16 *adrp;
0457
0458
0459 iowrite16(MSK_INT, ioaddr + MIER);
0460
0461
0462 r6040_reset_mac(lp);
0463
0464
0465 adrp = (const u16 *) dev->dev_addr;
0466 iowrite16(adrp[0], ioaddr + MID_0L);
0467 iowrite16(adrp[1], ioaddr + MID_0M);
0468 iowrite16(adrp[2], ioaddr + MID_0H);
0469 }
0470
0471 static int r6040_close(struct net_device *dev)
0472 {
0473 struct r6040_private *lp = netdev_priv(dev);
0474 struct pci_dev *pdev = lp->pdev;
0475
0476 phy_stop(dev->phydev);
0477 napi_disable(&lp->napi);
0478 netif_stop_queue(dev);
0479
0480 spin_lock_irq(&lp->lock);
0481 r6040_down(dev);
0482
0483
0484 r6040_free_rxbufs(dev);
0485
0486
0487 r6040_free_txbufs(dev);
0488
0489 spin_unlock_irq(&lp->lock);
0490
0491 free_irq(dev->irq, dev);
0492
0493
0494 if (lp->rx_ring) {
0495 dma_free_coherent(&pdev->dev, RX_DESC_SIZE, lp->rx_ring,
0496 lp->rx_ring_dma);
0497 lp->rx_ring = NULL;
0498 }
0499
0500 if (lp->tx_ring) {
0501 dma_free_coherent(&pdev->dev, TX_DESC_SIZE, lp->tx_ring,
0502 lp->tx_ring_dma);
0503 lp->tx_ring = NULL;
0504 }
0505
0506 return 0;
0507 }
0508
0509 static int r6040_rx(struct net_device *dev, int limit)
0510 {
0511 struct r6040_private *priv = netdev_priv(dev);
0512 struct r6040_descriptor *descptr = priv->rx_remove_ptr;
0513 struct sk_buff *skb_ptr, *new_skb;
0514 int count = 0;
0515 u16 err;
0516
0517
0518 while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
0519
0520 err = descptr->status;
0521
0522 if (err & DSC_RX_ERR) {
0523
0524 if (err & DSC_RX_ERR_DRI)
0525 dev->stats.rx_frame_errors++;
0526
0527 if (err & DSC_RX_ERR_BUF)
0528 dev->stats.rx_length_errors++;
0529
0530 if (err & DSC_RX_ERR_LONG)
0531 dev->stats.rx_length_errors++;
0532
0533 if (err & DSC_RX_ERR_RUNT)
0534 dev->stats.rx_length_errors++;
0535
0536 if (err & DSC_RX_ERR_CRC) {
0537 spin_lock(&priv->lock);
0538 dev->stats.rx_crc_errors++;
0539 spin_unlock(&priv->lock);
0540 }
0541 goto next_descr;
0542 }
0543
0544
0545 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
0546 if (!new_skb) {
0547 dev->stats.rx_dropped++;
0548 goto next_descr;
0549 }
0550 skb_ptr = descptr->skb_ptr;
0551 skb_ptr->dev = priv->dev;
0552
0553
0554 skb_put(skb_ptr, descptr->len - ETH_FCS_LEN);
0555 dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
0556 MAX_BUF_SIZE, DMA_FROM_DEVICE);
0557 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
0558
0559
0560 netif_receive_skb(skb_ptr);
0561 dev->stats.rx_packets++;
0562 dev->stats.rx_bytes += descptr->len - ETH_FCS_LEN;
0563
0564
0565 descptr->skb_ptr = new_skb;
0566 descptr->buf = cpu_to_le32(dma_map_single(&priv->pdev->dev,
0567 descptr->skb_ptr->data,
0568 MAX_BUF_SIZE,
0569 DMA_FROM_DEVICE));
0570
0571 next_descr:
0572
0573 descptr->status = DSC_OWNER_MAC;
0574 descptr = descptr->vndescp;
0575 count++;
0576 }
0577 priv->rx_remove_ptr = descptr;
0578
0579 return count;
0580 }
0581
0582 static void r6040_tx(struct net_device *dev)
0583 {
0584 struct r6040_private *priv = netdev_priv(dev);
0585 struct r6040_descriptor *descptr;
0586 void __iomem *ioaddr = priv->base;
0587 struct sk_buff *skb_ptr;
0588 u16 err;
0589
0590 spin_lock(&priv->lock);
0591 descptr = priv->tx_remove_ptr;
0592 while (priv->tx_free_desc < TX_DCNT) {
0593
0594 err = ioread16(ioaddr + MLSR);
0595
0596 if (err & TX_FIFO_UNDR)
0597 dev->stats.tx_fifo_errors++;
0598 if (err & (TX_EXCEEDC | TX_LATEC))
0599 dev->stats.tx_carrier_errors++;
0600
0601 if (descptr->status & DSC_OWNER_MAC)
0602 break;
0603 skb_ptr = descptr->skb_ptr;
0604
0605
0606 dev->stats.tx_packets++;
0607 dev->stats.tx_bytes += skb_ptr->len;
0608
0609 dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
0610 skb_ptr->len, DMA_TO_DEVICE);
0611
0612 dev_kfree_skb(skb_ptr);
0613 descptr->skb_ptr = NULL;
0614
0615 descptr = descptr->vndescp;
0616 priv->tx_free_desc++;
0617 }
0618 priv->tx_remove_ptr = descptr;
0619
0620 if (priv->tx_free_desc)
0621 netif_wake_queue(dev);
0622 spin_unlock(&priv->lock);
0623 }
0624
0625 static int r6040_poll(struct napi_struct *napi, int budget)
0626 {
0627 struct r6040_private *priv =
0628 container_of(napi, struct r6040_private, napi);
0629 struct net_device *dev = priv->dev;
0630 void __iomem *ioaddr = priv->base;
0631 int work_done;
0632
0633 r6040_tx(dev);
0634
0635 work_done = r6040_rx(dev, budget);
0636
0637 if (work_done < budget) {
0638 napi_complete_done(napi, work_done);
0639
0640 iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
0641 ioaddr + MIER);
0642 }
0643 return work_done;
0644 }
0645
0646
0647 static irqreturn_t r6040_interrupt(int irq, void *dev_id)
0648 {
0649 struct net_device *dev = dev_id;
0650 struct r6040_private *lp = netdev_priv(dev);
0651 void __iomem *ioaddr = lp->base;
0652 u16 misr, status;
0653
0654
0655 misr = ioread16(ioaddr + MIER);
0656
0657 iowrite16(MSK_INT, ioaddr + MIER);
0658
0659 status = ioread16(ioaddr + MISR);
0660
0661 if (status == 0x0000 || status == 0xffff) {
0662
0663 iowrite16(misr, ioaddr + MIER);
0664 return IRQ_NONE;
0665 }
0666
0667
0668 if (status & (RX_INTS | TX_INTS)) {
0669 if (status & RX_NO_DESC) {
0670
0671 dev->stats.rx_dropped++;
0672 dev->stats.rx_missed_errors++;
0673 }
0674 if (status & RX_FIFO_FULL)
0675 dev->stats.rx_fifo_errors++;
0676
0677 if (likely(napi_schedule_prep(&lp->napi))) {
0678
0679 misr &= ~(RX_INTS | TX_INTS);
0680 __napi_schedule_irqoff(&lp->napi);
0681 }
0682 }
0683
0684
0685 iowrite16(misr, ioaddr + MIER);
0686
0687 return IRQ_HANDLED;
0688 }
0689
0690 #ifdef CONFIG_NET_POLL_CONTROLLER
0691 static void r6040_poll_controller(struct net_device *dev)
0692 {
0693 disable_irq(dev->irq);
0694 r6040_interrupt(dev->irq, dev);
0695 enable_irq(dev->irq);
0696 }
0697 #endif
0698
0699
0700 static int r6040_up(struct net_device *dev)
0701 {
0702 struct r6040_private *lp = netdev_priv(dev);
0703 void __iomem *ioaddr = lp->base;
0704 int ret;
0705
0706
0707 r6040_init_txbufs(dev);
0708 ret = r6040_alloc_rxbufs(dev);
0709 if (ret)
0710 return ret;
0711
0712
0713 r6040_phy_write(ioaddr, 30, 17,
0714 (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
0715 r6040_phy_write(ioaddr, 30, 17,
0716 ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
0717 r6040_phy_write(ioaddr, 0, 19, 0x0000);
0718 r6040_phy_write(ioaddr, 0, 30, 0x01F0);
0719
0720
0721 r6040_init_mac_regs(dev);
0722
0723 phy_start(dev->phydev);
0724
0725 return 0;
0726 }
0727
0728
0729
0730 static void r6040_mac_address(struct net_device *dev)
0731 {
0732 struct r6040_private *lp = netdev_priv(dev);
0733 void __iomem *ioaddr = lp->base;
0734 const u16 *adrp;
0735
0736
0737 r6040_reset_mac(lp);
0738
0739
0740 adrp = (const u16 *) dev->dev_addr;
0741 iowrite16(adrp[0], ioaddr + MID_0L);
0742 iowrite16(adrp[1], ioaddr + MID_0M);
0743 iowrite16(adrp[2], ioaddr + MID_0H);
0744 }
0745
0746 static int r6040_open(struct net_device *dev)
0747 {
0748 struct r6040_private *lp = netdev_priv(dev);
0749 int ret;
0750
0751
0752 ret = request_irq(dev->irq, r6040_interrupt,
0753 IRQF_SHARED, dev->name, dev);
0754 if (ret)
0755 goto out;
0756
0757
0758 r6040_mac_address(dev);
0759
0760
0761 lp->rx_ring =
0762 dma_alloc_coherent(&lp->pdev->dev, RX_DESC_SIZE,
0763 &lp->rx_ring_dma, GFP_KERNEL);
0764 if (!lp->rx_ring) {
0765 ret = -ENOMEM;
0766 goto err_free_irq;
0767 }
0768
0769 lp->tx_ring =
0770 dma_alloc_coherent(&lp->pdev->dev, TX_DESC_SIZE,
0771 &lp->tx_ring_dma, GFP_KERNEL);
0772 if (!lp->tx_ring) {
0773 ret = -ENOMEM;
0774 goto err_free_rx_ring;
0775 }
0776
0777 ret = r6040_up(dev);
0778 if (ret)
0779 goto err_free_tx_ring;
0780
0781 napi_enable(&lp->napi);
0782 netif_start_queue(dev);
0783
0784 return 0;
0785
0786 err_free_tx_ring:
0787 dma_free_coherent(&lp->pdev->dev, TX_DESC_SIZE, lp->tx_ring,
0788 lp->tx_ring_dma);
0789 err_free_rx_ring:
0790 dma_free_coherent(&lp->pdev->dev, RX_DESC_SIZE, lp->rx_ring,
0791 lp->rx_ring_dma);
0792 err_free_irq:
0793 free_irq(dev->irq, dev);
0794 out:
0795 return ret;
0796 }
0797
0798 static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
0799 struct net_device *dev)
0800 {
0801 struct r6040_private *lp = netdev_priv(dev);
0802 struct r6040_descriptor *descptr;
0803 void __iomem *ioaddr = lp->base;
0804 unsigned long flags;
0805
0806 if (skb_put_padto(skb, ETH_ZLEN) < 0)
0807 return NETDEV_TX_OK;
0808
0809
0810 spin_lock_irqsave(&lp->lock, flags);
0811
0812
0813 if (!lp->tx_free_desc) {
0814 spin_unlock_irqrestore(&lp->lock, flags);
0815 netif_stop_queue(dev);
0816 netdev_err(dev, ": no tx descriptor\n");
0817 return NETDEV_TX_BUSY;
0818 }
0819
0820
0821 lp->tx_free_desc--;
0822 descptr = lp->tx_insert_ptr;
0823 descptr->len = skb->len;
0824 descptr->skb_ptr = skb;
0825 descptr->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev, skb->data,
0826 skb->len, DMA_TO_DEVICE));
0827 descptr->status = DSC_OWNER_MAC;
0828
0829 skb_tx_timestamp(skb);
0830
0831
0832 if (!netdev_xmit_more() || netif_queue_stopped(dev))
0833 iowrite16(TM2TX, ioaddr + MTPR);
0834 lp->tx_insert_ptr = descptr->vndescp;
0835
0836
0837 if (!lp->tx_free_desc)
0838 netif_stop_queue(dev);
0839
0840 spin_unlock_irqrestore(&lp->lock, flags);
0841
0842 return NETDEV_TX_OK;
0843 }
0844
0845 static void r6040_multicast_list(struct net_device *dev)
0846 {
0847 struct r6040_private *lp = netdev_priv(dev);
0848 void __iomem *ioaddr = lp->base;
0849 unsigned long flags;
0850 struct netdev_hw_addr *ha;
0851 int i;
0852 const u16 *adrp;
0853 u16 hash_table[4] = { 0 };
0854
0855 spin_lock_irqsave(&lp->lock, flags);
0856
0857
0858 adrp = (const u16 *)dev->dev_addr;
0859 iowrite16(adrp[0], ioaddr + MID_0L);
0860 iowrite16(adrp[1], ioaddr + MID_0M);
0861 iowrite16(adrp[2], ioaddr + MID_0H);
0862
0863
0864 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
0865
0866
0867 if (dev->flags & IFF_PROMISC)
0868 lp->mcr0 |= MCR0_PROMISC;
0869
0870
0871
0872 else if (dev->flags & IFF_ALLMULTI) {
0873 lp->mcr0 |= MCR0_HASH_EN;
0874
0875 for (i = 0; i < MCAST_MAX ; i++) {
0876 iowrite16(0, ioaddr + MID_1L + 8 * i);
0877 iowrite16(0, ioaddr + MID_1M + 8 * i);
0878 iowrite16(0, ioaddr + MID_1H + 8 * i);
0879 }
0880
0881 for (i = 0; i < 4; i++)
0882 hash_table[i] = 0xffff;
0883 }
0884
0885
0886 else if (netdev_mc_count(dev) <= MCAST_MAX) {
0887 i = 0;
0888 netdev_for_each_mc_addr(ha, dev) {
0889 u16 *adrp = (u16 *) ha->addr;
0890 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
0891 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
0892 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
0893 i++;
0894 }
0895 while (i < MCAST_MAX) {
0896 iowrite16(0, ioaddr + MID_1L + 8 * i);
0897 iowrite16(0, ioaddr + MID_1M + 8 * i);
0898 iowrite16(0, ioaddr + MID_1H + 8 * i);
0899 i++;
0900 }
0901 }
0902
0903 else {
0904 u32 crc;
0905
0906 lp->mcr0 |= MCR0_HASH_EN;
0907
0908 for (i = 0; i < MCAST_MAX ; i++) {
0909 iowrite16(0, ioaddr + MID_1L + 8 * i);
0910 iowrite16(0, ioaddr + MID_1M + 8 * i);
0911 iowrite16(0, ioaddr + MID_1H + 8 * i);
0912 }
0913
0914
0915 netdev_for_each_mc_addr(ha, dev) {
0916 u8 *addrs = ha->addr;
0917
0918 crc = ether_crc(ETH_ALEN, addrs);
0919 crc >>= 26;
0920 hash_table[crc >> 4] |= 1 << (crc & 0xf);
0921 }
0922 }
0923
0924 iowrite16(lp->mcr0, ioaddr + MCR0);
0925
0926
0927 if (lp->mcr0 & MCR0_HASH_EN) {
0928 iowrite16(hash_table[0], ioaddr + MAR0);
0929 iowrite16(hash_table[1], ioaddr + MAR1);
0930 iowrite16(hash_table[2], ioaddr + MAR2);
0931 iowrite16(hash_table[3], ioaddr + MAR3);
0932 }
0933
0934 spin_unlock_irqrestore(&lp->lock, flags);
0935 }
0936
0937 static void netdev_get_drvinfo(struct net_device *dev,
0938 struct ethtool_drvinfo *info)
0939 {
0940 struct r6040_private *rp = netdev_priv(dev);
0941
0942 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
0943 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
0944 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
0945 }
0946
0947 static const struct ethtool_ops netdev_ethtool_ops = {
0948 .get_drvinfo = netdev_get_drvinfo,
0949 .get_link = ethtool_op_get_link,
0950 .get_ts_info = ethtool_op_get_ts_info,
0951 .get_link_ksettings = phy_ethtool_get_link_ksettings,
0952 .set_link_ksettings = phy_ethtool_set_link_ksettings,
0953 .nway_reset = phy_ethtool_nway_reset,
0954 };
0955
0956 static const struct net_device_ops r6040_netdev_ops = {
0957 .ndo_open = r6040_open,
0958 .ndo_stop = r6040_close,
0959 .ndo_start_xmit = r6040_start_xmit,
0960 .ndo_get_stats = r6040_get_stats,
0961 .ndo_set_rx_mode = r6040_multicast_list,
0962 .ndo_validate_addr = eth_validate_addr,
0963 .ndo_set_mac_address = eth_mac_addr,
0964 .ndo_eth_ioctl = phy_do_ioctl,
0965 .ndo_tx_timeout = r6040_tx_timeout,
0966 #ifdef CONFIG_NET_POLL_CONTROLLER
0967 .ndo_poll_controller = r6040_poll_controller,
0968 #endif
0969 };
0970
0971 static void r6040_adjust_link(struct net_device *dev)
0972 {
0973 struct r6040_private *lp = netdev_priv(dev);
0974 struct phy_device *phydev = dev->phydev;
0975 int status_changed = 0;
0976 void __iomem *ioaddr = lp->base;
0977
0978 BUG_ON(!phydev);
0979
0980 if (lp->old_link != phydev->link) {
0981 status_changed = 1;
0982 lp->old_link = phydev->link;
0983 }
0984
0985
0986 if (phydev->link && (lp->old_duplex != phydev->duplex)) {
0987 lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
0988 iowrite16(lp->mcr0, ioaddr);
0989
0990 status_changed = 1;
0991 lp->old_duplex = phydev->duplex;
0992 }
0993
0994 if (status_changed)
0995 phy_print_status(phydev);
0996 }
0997
0998 static int r6040_mii_probe(struct net_device *dev)
0999 {
1000 struct r6040_private *lp = netdev_priv(dev);
1001 struct phy_device *phydev = NULL;
1002
1003 phydev = phy_find_first(lp->mii_bus);
1004 if (!phydev) {
1005 dev_err(&lp->pdev->dev, "no PHY found\n");
1006 return -ENODEV;
1007 }
1008
1009 phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link,
1010 PHY_INTERFACE_MODE_MII);
1011
1012 if (IS_ERR(phydev)) {
1013 dev_err(&lp->pdev->dev, "could not attach to PHY\n");
1014 return PTR_ERR(phydev);
1015 }
1016
1017 phy_set_max_speed(phydev, SPEED_100);
1018
1019 lp->old_link = 0;
1020 lp->old_duplex = -1;
1021
1022 phy_attached_info(phydev);
1023
1024 return 0;
1025 }
1026
1027 static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1028 {
1029 struct net_device *dev;
1030 struct r6040_private *lp;
1031 void __iomem *ioaddr;
1032 int err, io_size = R6040_IO_SIZE;
1033 static int card_idx = -1;
1034 u16 addr[ETH_ALEN / 2];
1035 int bar = 0;
1036
1037 pr_info("%s\n", version);
1038
1039 err = pci_enable_device(pdev);
1040 if (err)
1041 goto err_out;
1042
1043
1044 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1045 if (err) {
1046 dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
1047 goto err_out_disable_dev;
1048 }
1049 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1050 if (err) {
1051 dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
1052 goto err_out_disable_dev;
1053 }
1054
1055
1056 if (pci_resource_len(pdev, bar) < io_size) {
1057 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1058 err = -EIO;
1059 goto err_out_disable_dev;
1060 }
1061
1062 pci_set_master(pdev);
1063
1064 dev = alloc_etherdev(sizeof(struct r6040_private));
1065 if (!dev) {
1066 err = -ENOMEM;
1067 goto err_out_disable_dev;
1068 }
1069 SET_NETDEV_DEV(dev, &pdev->dev);
1070 lp = netdev_priv(dev);
1071
1072 err = pci_request_regions(pdev, DRV_NAME);
1073
1074 if (err) {
1075 dev_err(&pdev->dev, "Failed to request PCI regions\n");
1076 goto err_out_free_dev;
1077 }
1078
1079 ioaddr = pci_iomap(pdev, bar, io_size);
1080 if (!ioaddr) {
1081 dev_err(&pdev->dev, "ioremap failed for device\n");
1082 err = -EIO;
1083 goto err_out_free_res;
1084 }
1085
1086
1087
1088
1089
1090
1091 if (ioread16(ioaddr + PHY_CC) == 0)
1092 iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
1093 7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
1094
1095
1096 lp->base = ioaddr;
1097 dev->irq = pdev->irq;
1098
1099 spin_lock_init(&lp->lock);
1100 pci_set_drvdata(pdev, dev);
1101
1102
1103 card_idx++;
1104
1105 addr[0] = ioread16(ioaddr + MID_0L);
1106 addr[1] = ioread16(ioaddr + MID_0M);
1107 addr[2] = ioread16(ioaddr + MID_0H);
1108 eth_hw_addr_set(dev, (u8 *)addr);
1109
1110
1111
1112 if (!(addr[0] || addr[1] || addr[2])) {
1113 netdev_warn(dev, "MAC address not initialized, "
1114 "generating random\n");
1115 eth_hw_addr_random(dev);
1116 }
1117
1118
1119 lp->pdev = pdev;
1120 lp->dev = dev;
1121
1122
1123 lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
1124
1125
1126 dev->netdev_ops = &r6040_netdev_ops;
1127 dev->ethtool_ops = &netdev_ethtool_ops;
1128 dev->watchdog_timeo = TX_TIMEOUT;
1129
1130 netif_napi_add(dev, &lp->napi, r6040_poll, 64);
1131
1132 lp->mii_bus = mdiobus_alloc();
1133 if (!lp->mii_bus) {
1134 dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
1135 err = -ENOMEM;
1136 goto err_out_unmap;
1137 }
1138
1139 lp->mii_bus->priv = dev;
1140 lp->mii_bus->read = r6040_mdiobus_read;
1141 lp->mii_bus->write = r6040_mdiobus_write;
1142 lp->mii_bus->name = "r6040_eth_mii";
1143 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1144 dev_name(&pdev->dev), card_idx);
1145
1146 err = mdiobus_register(lp->mii_bus);
1147 if (err) {
1148 dev_err(&pdev->dev, "failed to register MII bus\n");
1149 goto err_out_mdio;
1150 }
1151
1152 err = r6040_mii_probe(dev);
1153 if (err) {
1154 dev_err(&pdev->dev, "failed to probe MII bus\n");
1155 goto err_out_mdio_unregister;
1156 }
1157
1158
1159 err = register_netdev(dev);
1160 if (err) {
1161 dev_err(&pdev->dev, "Failed to register net device\n");
1162 goto err_out_mdio_unregister;
1163 }
1164 return 0;
1165
1166 err_out_mdio_unregister:
1167 mdiobus_unregister(lp->mii_bus);
1168 err_out_mdio:
1169 mdiobus_free(lp->mii_bus);
1170 err_out_unmap:
1171 netif_napi_del(&lp->napi);
1172 pci_iounmap(pdev, ioaddr);
1173 err_out_free_res:
1174 pci_release_regions(pdev);
1175 err_out_free_dev:
1176 free_netdev(dev);
1177 err_out_disable_dev:
1178 pci_disable_device(pdev);
1179 err_out:
1180 return err;
1181 }
1182
1183 static void r6040_remove_one(struct pci_dev *pdev)
1184 {
1185 struct net_device *dev = pci_get_drvdata(pdev);
1186 struct r6040_private *lp = netdev_priv(dev);
1187
1188 unregister_netdev(dev);
1189 mdiobus_unregister(lp->mii_bus);
1190 mdiobus_free(lp->mii_bus);
1191 netif_napi_del(&lp->napi);
1192 pci_iounmap(pdev, lp->base);
1193 pci_release_regions(pdev);
1194 free_netdev(dev);
1195 pci_disable_device(pdev);
1196 }
1197
1198
1199 static const struct pci_device_id r6040_pci_tbl[] = {
1200 { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
1201 { 0 }
1202 };
1203 MODULE_DEVICE_TABLE(pci, r6040_pci_tbl);
1204
1205 static struct pci_driver r6040_driver = {
1206 .name = DRV_NAME,
1207 .id_table = r6040_pci_tbl,
1208 .probe = r6040_init_one,
1209 .remove = r6040_remove_one,
1210 };
1211
1212 module_pci_driver(r6040_driver);