0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0048
0049 #define DRV_NAME "winbond-840"
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 static int debug = 1;
0067 static int max_interrupt_work = 20;
0068
0069
0070 static int multicast_filter_limit = 32;
0071
0072
0073
0074 static int rx_copybreak;
0075
0076
0077
0078
0079
0080
0081 #define MAX_UNITS 8
0082 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
0083 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
0084
0085
0086
0087
0088
0089
0090
0091
0092 #define TX_QUEUE_LEN 10
0093 #define TX_QUEUE_LEN_RESTART 5
0094
0095 #define TX_BUFLIMIT (1024-128)
0096
0097
0098
0099
0100
0101 #define TX_FIFO_SIZE (2048)
0102 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
0103
0104
0105
0106
0107 #define TX_TIMEOUT (2*HZ)
0108
0109
0110 #include <linux/module.h>
0111 #include <linux/kernel.h>
0112 #include <linux/string.h>
0113 #include <linux/timer.h>
0114 #include <linux/errno.h>
0115 #include <linux/ioport.h>
0116 #include <linux/interrupt.h>
0117 #include <linux/pci.h>
0118 #include <linux/dma-mapping.h>
0119 #include <linux/netdevice.h>
0120 #include <linux/etherdevice.h>
0121 #include <linux/skbuff.h>
0122 #include <linux/init.h>
0123 #include <linux/delay.h>
0124 #include <linux/ethtool.h>
0125 #include <linux/mii.h>
0126 #include <linux/rtnetlink.h>
0127 #include <linux/crc32.h>
0128 #include <linux/bitops.h>
0129 #include <linux/uaccess.h>
0130 #include <asm/processor.h> /* Processor type for cache alignment. */
0131 #include <asm/io.h>
0132 #include <asm/irq.h>
0133
0134 #include "tulip.h"
0135
0136 #undef PKT_BUF_SZ
0137 #define PKT_BUF_SZ 1536
0138
0139 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
0140 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
0141 MODULE_LICENSE("GPL");
0142
0143 module_param(max_interrupt_work, int, 0);
0144 module_param(debug, int, 0);
0145 module_param(rx_copybreak, int, 0);
0146 module_param(multicast_filter_limit, int, 0);
0147 module_param_array(options, int, NULL, 0);
0148 module_param_array(full_duplex, int, NULL, 0);
0149 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
0150 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
0151 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
0152 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
0153 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
0154 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 enum chip_capability_flags {
0209 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
0210 };
0211
0212 static const struct pci_device_id w840_pci_tbl[] = {
0213 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
0214 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
0215 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
0216 { }
0217 };
0218 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
0219
0220 enum {
0221 netdev_res_size = 128,
0222 };
0223
0224 struct pci_id_info {
0225 const char *name;
0226 int drv_flags;
0227 };
0228
0229 static const struct pci_id_info pci_id_tbl[] = {
0230 {
0231 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
0232 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
0233 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
0234 { }
0235 };
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 enum w840_offsets {
0248 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
0249 RxRingPtr=0x0C, TxRingPtr=0x10,
0250 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
0251 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
0252 CurRxDescAddr=0x30, CurRxBufAddr=0x34,
0253 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
0254 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
0255 };
0256
0257
0258 enum rx_mode_bits {
0259 AcceptErr=0x80,
0260 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
0261 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
0262 };
0263
0264 enum mii_reg_bits {
0265 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
0266 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
0267 };
0268
0269
0270 struct w840_rx_desc {
0271 s32 status;
0272 s32 length;
0273 u32 buffer1;
0274 u32 buffer2;
0275 };
0276
0277 struct w840_tx_desc {
0278 s32 status;
0279 s32 length;
0280 u32 buffer1, buffer2;
0281 };
0282
0283 #define MII_CNT 1
0284 struct netdev_private {
0285 struct w840_rx_desc *rx_ring;
0286 dma_addr_t rx_addr[RX_RING_SIZE];
0287 struct w840_tx_desc *tx_ring;
0288 dma_addr_t tx_addr[TX_RING_SIZE];
0289 dma_addr_t ring_dma_addr;
0290
0291 struct sk_buff* rx_skbuff[RX_RING_SIZE];
0292
0293 struct sk_buff* tx_skbuff[TX_RING_SIZE];
0294 struct net_device_stats stats;
0295 struct timer_list timer;
0296
0297 spinlock_t lock;
0298 int chip_id, drv_flags;
0299 struct pci_dev *pci_dev;
0300 int csr6;
0301 struct w840_rx_desc *rx_head_desc;
0302 unsigned int cur_rx, dirty_rx;
0303 unsigned int rx_buf_sz;
0304 unsigned int cur_tx, dirty_tx;
0305 unsigned int tx_q_bytes;
0306 unsigned int tx_full;
0307
0308 int mii_cnt;
0309 unsigned char phys[MII_CNT];
0310 u32 mii;
0311 struct mii_if_info mii_if;
0312 void __iomem *base_addr;
0313 };
0314
0315 static int eeprom_read(void __iomem *ioaddr, int location);
0316 static int mdio_read(struct net_device *dev, int phy_id, int location);
0317 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
0318 static int netdev_open(struct net_device *dev);
0319 static int update_link(struct net_device *dev);
0320 static void netdev_timer(struct timer_list *t);
0321 static void init_rxtx_rings(struct net_device *dev);
0322 static void free_rxtx_rings(struct netdev_private *np);
0323 static void init_registers(struct net_device *dev);
0324 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
0325 static int alloc_ringdesc(struct net_device *dev);
0326 static void free_ringdesc(struct netdev_private *np);
0327 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
0328 static irqreturn_t intr_handler(int irq, void *dev_instance);
0329 static void netdev_error(struct net_device *dev, int intr_status);
0330 static int netdev_rx(struct net_device *dev);
0331 static u32 __set_rx_mode(struct net_device *dev);
0332 static void set_rx_mode(struct net_device *dev);
0333 static struct net_device_stats *get_stats(struct net_device *dev);
0334 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0335 static const struct ethtool_ops netdev_ethtool_ops;
0336 static int netdev_close(struct net_device *dev);
0337
0338 static const struct net_device_ops netdev_ops = {
0339 .ndo_open = netdev_open,
0340 .ndo_stop = netdev_close,
0341 .ndo_start_xmit = start_tx,
0342 .ndo_get_stats = get_stats,
0343 .ndo_set_rx_mode = set_rx_mode,
0344 .ndo_eth_ioctl = netdev_ioctl,
0345 .ndo_tx_timeout = tx_timeout,
0346 .ndo_set_mac_address = eth_mac_addr,
0347 .ndo_validate_addr = eth_validate_addr,
0348 };
0349
0350 static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
0351 {
0352 struct net_device *dev;
0353 struct netdev_private *np;
0354 static int find_cnt;
0355 int chip_idx = ent->driver_data;
0356 int irq;
0357 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
0358 __le16 addr[ETH_ALEN / 2];
0359 void __iomem *ioaddr;
0360
0361 i = pcim_enable_device(pdev);
0362 if (i) return i;
0363
0364 pci_set_master(pdev);
0365
0366 irq = pdev->irq;
0367
0368 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
0369 pr_warn("Device %s disabled due to DMA limitations\n",
0370 pci_name(pdev));
0371 return -EIO;
0372 }
0373 dev = alloc_etherdev(sizeof(*np));
0374 if (!dev)
0375 return -ENOMEM;
0376 SET_NETDEV_DEV(dev, &pdev->dev);
0377
0378 if (pci_request_regions(pdev, DRV_NAME))
0379 goto err_out_netdev;
0380
0381 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
0382 if (!ioaddr)
0383 goto err_out_netdev;
0384
0385 for (i = 0; i < 3; i++)
0386 addr[i] = cpu_to_le16(eeprom_read(ioaddr, i));
0387 eth_hw_addr_set(dev, (u8 *)addr);
0388
0389
0390
0391 iowrite32(0x00000001, ioaddr + PCIBusCfg);
0392
0393 np = netdev_priv(dev);
0394 np->pci_dev = pdev;
0395 np->chip_id = chip_idx;
0396 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
0397 spin_lock_init(&np->lock);
0398 np->mii_if.dev = dev;
0399 np->mii_if.mdio_read = mdio_read;
0400 np->mii_if.mdio_write = mdio_write;
0401 np->base_addr = ioaddr;
0402
0403 pci_set_drvdata(pdev, dev);
0404
0405 if (dev->mem_start)
0406 option = dev->mem_start;
0407
0408
0409 if (option > 0) {
0410 if (option & 0x200)
0411 np->mii_if.full_duplex = 1;
0412 if (option & 15)
0413 dev_info(&dev->dev,
0414 "ignoring user supplied media type %d",
0415 option & 15);
0416 }
0417 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
0418 np->mii_if.full_duplex = 1;
0419
0420 if (np->mii_if.full_duplex)
0421 np->mii_if.force_media = 1;
0422
0423
0424 dev->netdev_ops = &netdev_ops;
0425 dev->ethtool_ops = &netdev_ethtool_ops;
0426 dev->watchdog_timeo = TX_TIMEOUT;
0427
0428 i = register_netdev(dev);
0429 if (i)
0430 goto err_out_cleardev;
0431
0432 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
0433 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
0434
0435 if (np->drv_flags & CanHaveMII) {
0436 int phy, phy_idx = 0;
0437 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
0438 int mii_status = mdio_read(dev, phy, MII_BMSR);
0439 if (mii_status != 0xffff && mii_status != 0x0000) {
0440 np->phys[phy_idx++] = phy;
0441 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
0442 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
0443 mdio_read(dev, phy, MII_PHYSID2);
0444 dev_info(&dev->dev,
0445 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
0446 np->mii, phy, mii_status,
0447 np->mii_if.advertising);
0448 }
0449 }
0450 np->mii_cnt = phy_idx;
0451 np->mii_if.phy_id = np->phys[0];
0452 if (phy_idx == 0) {
0453 dev_warn(&dev->dev,
0454 "MII PHY not found -- this device may not operate correctly\n");
0455 }
0456 }
0457
0458 find_cnt++;
0459 return 0;
0460
0461 err_out_cleardev:
0462 pci_iounmap(pdev, ioaddr);
0463 err_out_netdev:
0464 free_netdev (dev);
0465 return -ENODEV;
0466 }
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478 #define eeprom_delay(ee_addr) ioread32(ee_addr)
0479
0480 enum EEPROM_Ctrl_Bits {
0481 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
0482 EE_ChipSelect=0x801, EE_DataIn=0x08,
0483 };
0484
0485
0486 enum EEPROM_Cmds {
0487 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
0488 };
0489
0490 static int eeprom_read(void __iomem *addr, int location)
0491 {
0492 int i;
0493 int retval = 0;
0494 void __iomem *ee_addr = addr + EECtrl;
0495 int read_cmd = location | EE_ReadCmd;
0496 iowrite32(EE_ChipSelect, ee_addr);
0497
0498
0499 for (i = 10; i >= 0; i--) {
0500 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
0501 iowrite32(dataval, ee_addr);
0502 eeprom_delay(ee_addr);
0503 iowrite32(dataval | EE_ShiftClk, ee_addr);
0504 eeprom_delay(ee_addr);
0505 }
0506 iowrite32(EE_ChipSelect, ee_addr);
0507 eeprom_delay(ee_addr);
0508
0509 for (i = 16; i > 0; i--) {
0510 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
0511 eeprom_delay(ee_addr);
0512 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
0513 iowrite32(EE_ChipSelect, ee_addr);
0514 eeprom_delay(ee_addr);
0515 }
0516
0517
0518 iowrite32(0, ee_addr);
0519 return retval;
0520 }
0521
0522
0523
0524
0525
0526
0527
0528
0529 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
0530
0531
0532
0533
0534 static char mii_preamble_required = 1;
0535
0536 #define MDIO_WRITE0 (MDIO_EnbOutput)
0537 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
0538
0539
0540
0541 static void mdio_sync(void __iomem *mdio_addr)
0542 {
0543 int bits = 32;
0544
0545
0546 while (--bits >= 0) {
0547 iowrite32(MDIO_WRITE1, mdio_addr);
0548 mdio_delay(mdio_addr);
0549 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
0550 mdio_delay(mdio_addr);
0551 }
0552 }
0553
0554 static int mdio_read(struct net_device *dev, int phy_id, int location)
0555 {
0556 struct netdev_private *np = netdev_priv(dev);
0557 void __iomem *mdio_addr = np->base_addr + MIICtrl;
0558 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
0559 int i, retval = 0;
0560
0561 if (mii_preamble_required)
0562 mdio_sync(mdio_addr);
0563
0564
0565 for (i = 15; i >= 0; i--) {
0566 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
0567
0568 iowrite32(dataval, mdio_addr);
0569 mdio_delay(mdio_addr);
0570 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
0571 mdio_delay(mdio_addr);
0572 }
0573
0574 for (i = 20; i > 0; i--) {
0575 iowrite32(MDIO_EnbIn, mdio_addr);
0576 mdio_delay(mdio_addr);
0577 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
0578 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
0579 mdio_delay(mdio_addr);
0580 }
0581 return (retval>>1) & 0xffff;
0582 }
0583
0584 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
0585 {
0586 struct netdev_private *np = netdev_priv(dev);
0587 void __iomem *mdio_addr = np->base_addr + MIICtrl;
0588 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
0589 int i;
0590
0591 if (location == 4 && phy_id == np->phys[0])
0592 np->mii_if.advertising = value;
0593
0594 if (mii_preamble_required)
0595 mdio_sync(mdio_addr);
0596
0597
0598 for (i = 31; i >= 0; i--) {
0599 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
0600
0601 iowrite32(dataval, mdio_addr);
0602 mdio_delay(mdio_addr);
0603 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
0604 mdio_delay(mdio_addr);
0605 }
0606
0607 for (i = 2; i > 0; i--) {
0608 iowrite32(MDIO_EnbIn, mdio_addr);
0609 mdio_delay(mdio_addr);
0610 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
0611 mdio_delay(mdio_addr);
0612 }
0613 }
0614
0615
0616 static int netdev_open(struct net_device *dev)
0617 {
0618 struct netdev_private *np = netdev_priv(dev);
0619 void __iomem *ioaddr = np->base_addr;
0620 const int irq = np->pci_dev->irq;
0621 int i;
0622
0623 iowrite32(0x00000001, ioaddr + PCIBusCfg);
0624
0625 netif_device_detach(dev);
0626 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
0627 if (i)
0628 goto out_err;
0629
0630 if (debug > 1)
0631 netdev_dbg(dev, "%s() irq %d\n", __func__, irq);
0632
0633 i = alloc_ringdesc(dev);
0634 if (i)
0635 goto out_err;
0636
0637 spin_lock_irq(&np->lock);
0638 netif_device_attach(dev);
0639 init_registers(dev);
0640 spin_unlock_irq(&np->lock);
0641
0642 netif_start_queue(dev);
0643 if (debug > 2)
0644 netdev_dbg(dev, "Done %s()\n", __func__);
0645
0646
0647 timer_setup(&np->timer, netdev_timer, 0);
0648 np->timer.expires = jiffies + 1*HZ;
0649 add_timer(&np->timer);
0650 return 0;
0651 out_err:
0652 netif_device_attach(dev);
0653 return i;
0654 }
0655
0656 #define MII_DAVICOM_DM9101 0x0181b800
0657
0658 static int update_link(struct net_device *dev)
0659 {
0660 struct netdev_private *np = netdev_priv(dev);
0661 int duplex, fasteth, result, mii_reg;
0662
0663
0664 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
0665
0666 if (mii_reg == 0xffff)
0667 return np->csr6;
0668
0669 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
0670 if (!(mii_reg & 0x4)) {
0671 if (netif_carrier_ok(dev)) {
0672 if (debug)
0673 dev_info(&dev->dev,
0674 "MII #%d reports no link. Disabling watchdog\n",
0675 np->phys[0]);
0676 netif_carrier_off(dev);
0677 }
0678 return np->csr6;
0679 }
0680 if (!netif_carrier_ok(dev)) {
0681 if (debug)
0682 dev_info(&dev->dev,
0683 "MII #%d link is back. Enabling watchdog\n",
0684 np->phys[0]);
0685 netif_carrier_on(dev);
0686 }
0687
0688 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
0689
0690
0691
0692
0693
0694
0695
0696
0697 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
0698 duplex = mii_reg & BMCR_FULLDPLX;
0699 fasteth = mii_reg & BMCR_SPEED100;
0700 } else {
0701 int negotiated;
0702 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
0703 negotiated = mii_reg & np->mii_if.advertising;
0704
0705 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
0706 fasteth = negotiated & 0x380;
0707 }
0708 duplex |= np->mii_if.force_media;
0709
0710 result = np->csr6 & ~0x20000200;
0711 if (duplex)
0712 result |= 0x200;
0713 if (fasteth)
0714 result |= 0x20000000;
0715 if (result != np->csr6 && debug)
0716 dev_info(&dev->dev,
0717 "Setting %dMBit-%s-duplex based on MII#%d\n",
0718 fasteth ? 100 : 10, duplex ? "full" : "half",
0719 np->phys[0]);
0720 return result;
0721 }
0722
0723 #define RXTX_TIMEOUT 2000
0724 static inline void update_csr6(struct net_device *dev, int new)
0725 {
0726 struct netdev_private *np = netdev_priv(dev);
0727 void __iomem *ioaddr = np->base_addr;
0728 int limit = RXTX_TIMEOUT;
0729
0730 if (!netif_device_present(dev))
0731 new = 0;
0732 if (new==np->csr6)
0733 return;
0734
0735 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
0736
0737 for (;;) {
0738 int csr5 = ioread32(ioaddr + IntrStatus);
0739 int t;
0740
0741 t = (csr5 >> 17) & 0x07;
0742 if (t==0||t==1) {
0743
0744 t = (csr5 >> 20) & 0x07;
0745 if (t==0||t==1)
0746 break;
0747 }
0748
0749 limit--;
0750 if(!limit) {
0751 dev_info(&dev->dev,
0752 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
0753 break;
0754 }
0755 udelay(1);
0756 }
0757 np->csr6 = new;
0758
0759 iowrite32(np->csr6, ioaddr + NetworkConfig);
0760 if (new & 0x200)
0761 np->mii_if.full_duplex = 1;
0762 }
0763
0764 static void netdev_timer(struct timer_list *t)
0765 {
0766 struct netdev_private *np = from_timer(np, t, timer);
0767 struct net_device *dev = pci_get_drvdata(np->pci_dev);
0768 void __iomem *ioaddr = np->base_addr;
0769
0770 if (debug > 2)
0771 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
0772 ioread32(ioaddr + IntrStatus),
0773 ioread32(ioaddr + NetworkConfig));
0774 spin_lock_irq(&np->lock);
0775 update_csr6(dev, update_link(dev));
0776 spin_unlock_irq(&np->lock);
0777 np->timer.expires = jiffies + 10*HZ;
0778 add_timer(&np->timer);
0779 }
0780
0781 static void init_rxtx_rings(struct net_device *dev)
0782 {
0783 struct netdev_private *np = netdev_priv(dev);
0784 int i;
0785
0786 np->rx_head_desc = &np->rx_ring[0];
0787 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
0788
0789
0790 for (i = 0; i < RX_RING_SIZE; i++) {
0791 np->rx_ring[i].length = np->rx_buf_sz;
0792 np->rx_ring[i].status = 0;
0793 np->rx_skbuff[i] = NULL;
0794 }
0795
0796 np->rx_ring[i-1].length |= DescEndRing;
0797
0798
0799 for (i = 0; i < RX_RING_SIZE; i++) {
0800 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
0801 np->rx_skbuff[i] = skb;
0802 if (skb == NULL)
0803 break;
0804 np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data,
0805 np->rx_buf_sz,
0806 DMA_FROM_DEVICE);
0807
0808 np->rx_ring[i].buffer1 = np->rx_addr[i];
0809 np->rx_ring[i].status = DescOwned;
0810 }
0811
0812 np->cur_rx = 0;
0813 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
0814
0815
0816 for (i = 0; i < TX_RING_SIZE; i++) {
0817 np->tx_skbuff[i] = NULL;
0818 np->tx_ring[i].status = 0;
0819 }
0820 np->tx_full = 0;
0821 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
0822
0823 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
0824 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
0825 np->base_addr + TxRingPtr);
0826
0827 }
0828
0829 static void free_rxtx_rings(struct netdev_private* np)
0830 {
0831 int i;
0832
0833 for (i = 0; i < RX_RING_SIZE; i++) {
0834 np->rx_ring[i].status = 0;
0835 if (np->rx_skbuff[i]) {
0836 dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i],
0837 np->rx_skbuff[i]->len,
0838 DMA_FROM_DEVICE);
0839 dev_kfree_skb(np->rx_skbuff[i]);
0840 }
0841 np->rx_skbuff[i] = NULL;
0842 }
0843 for (i = 0; i < TX_RING_SIZE; i++) {
0844 if (np->tx_skbuff[i]) {
0845 dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i],
0846 np->tx_skbuff[i]->len, DMA_TO_DEVICE);
0847 dev_kfree_skb(np->tx_skbuff[i]);
0848 }
0849 np->tx_skbuff[i] = NULL;
0850 }
0851 }
0852
0853 static void init_registers(struct net_device *dev)
0854 {
0855 struct netdev_private *np = netdev_priv(dev);
0856 void __iomem *ioaddr = np->base_addr;
0857 int i;
0858
0859 for (i = 0; i < 6; i++)
0860 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
0861
0862
0863 #ifdef __BIG_ENDIAN
0864 i = (1<<20);
0865 #else
0866 i = 0;
0867 #endif
0868 i |= (0x04<<2);
0869 i |= 0x02;
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880 #if defined (__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
0881
0882 if (boot_cpu_data.x86 <= 4) {
0883 i |= 0x4800;
0884 dev_info(&dev->dev,
0885 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
0886 } else {
0887 i |= 0xE000;
0888 }
0889 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
0890 i |= 0xE000;
0891 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
0892 i |= 0x4800;
0893 #else
0894 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
0895 i |= 0x4800;
0896 #endif
0897 iowrite32(i, ioaddr + PCIBusCfg);
0898
0899 np->csr6 = 0;
0900
0901
0902 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
0903
0904
0905 iowrite32(0x1A0F5, ioaddr + IntrStatus);
0906 iowrite32(0x1A0F5, ioaddr + IntrEnable);
0907
0908 iowrite32(0, ioaddr + RxStartDemand);
0909 }
0910
0911 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
0912 {
0913 struct netdev_private *np = netdev_priv(dev);
0914 void __iomem *ioaddr = np->base_addr;
0915 const int irq = np->pci_dev->irq;
0916
0917 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
0918 ioread32(ioaddr + IntrStatus));
0919
0920 {
0921 int i;
0922 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
0923 for (i = 0; i < RX_RING_SIZE; i++)
0924 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
0925 printk(KERN_CONT "\n");
0926 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
0927 for (i = 0; i < TX_RING_SIZE; i++)
0928 printk(KERN_CONT " %08x", np->tx_ring[i].status);
0929 printk(KERN_CONT "\n");
0930 }
0931 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
0932 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
0933 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
0934
0935 disable_irq(irq);
0936 spin_lock_irq(&np->lock);
0937
0938
0939
0940
0941
0942
0943 iowrite32(1, np->base_addr+PCIBusCfg);
0944 udelay(1);
0945
0946 free_rxtx_rings(np);
0947 init_rxtx_rings(dev);
0948 init_registers(dev);
0949 spin_unlock_irq(&np->lock);
0950 enable_irq(irq);
0951
0952 netif_wake_queue(dev);
0953 netif_trans_update(dev);
0954 np->stats.tx_errors++;
0955 }
0956
0957
0958 static int alloc_ringdesc(struct net_device *dev)
0959 {
0960 struct netdev_private *np = netdev_priv(dev);
0961
0962 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
0963
0964 np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
0965 sizeof(struct w840_rx_desc) * RX_RING_SIZE +
0966 sizeof(struct w840_tx_desc) * TX_RING_SIZE,
0967 &np->ring_dma_addr, GFP_KERNEL);
0968 if(!np->rx_ring)
0969 return -ENOMEM;
0970 init_rxtx_rings(dev);
0971 return 0;
0972 }
0973
0974 static void free_ringdesc(struct netdev_private *np)
0975 {
0976 dma_free_coherent(&np->pci_dev->dev,
0977 sizeof(struct w840_rx_desc) * RX_RING_SIZE +
0978 sizeof(struct w840_tx_desc) * TX_RING_SIZE,
0979 np->rx_ring, np->ring_dma_addr);
0980
0981 }
0982
0983 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
0984 {
0985 struct netdev_private *np = netdev_priv(dev);
0986 unsigned entry;
0987
0988
0989
0990
0991
0992 entry = np->cur_tx % TX_RING_SIZE;
0993
0994 np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
0995 skb->len, DMA_TO_DEVICE);
0996 np->tx_skbuff[entry] = skb;
0997
0998 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
0999 if (skb->len < TX_BUFLIMIT) {
1000 np->tx_ring[entry].length = DescWholePkt | skb->len;
1001 } else {
1002 int len = skb->len - TX_BUFLIMIT;
1003
1004 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1005 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1006 }
1007 if(entry == TX_RING_SIZE-1)
1008 np->tx_ring[entry].length |= DescEndRing;
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 spin_lock_irq(&np->lock);
1021 np->cur_tx++;
1022
1023 wmb();
1024 np->tx_ring[entry].status = DescOwned;
1025 wmb();
1026 iowrite32(0, np->base_addr + TxStartDemand);
1027 np->tx_q_bytes += skb->len;
1028
1029
1030 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1031 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1032 netif_stop_queue(dev);
1033 wmb();
1034 np->tx_full = 1;
1035 }
1036 spin_unlock_irq(&np->lock);
1037
1038 if (debug > 4) {
1039 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1040 np->cur_tx, entry);
1041 }
1042 return NETDEV_TX_OK;
1043 }
1044
1045 static void netdev_tx_done(struct net_device *dev)
1046 {
1047 struct netdev_private *np = netdev_priv(dev);
1048 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1049 int entry = np->dirty_tx % TX_RING_SIZE;
1050 int tx_status = np->tx_ring[entry].status;
1051
1052 if (tx_status < 0)
1053 break;
1054 if (tx_status & 0x8000) {
1055 #ifndef final_version
1056 if (debug > 1)
1057 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1058 tx_status);
1059 #endif
1060 np->stats.tx_errors++;
1061 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1062 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1063 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1064 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1065 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1066 np->stats.tx_heartbeat_errors++;
1067 } else {
1068 #ifndef final_version
1069 if (debug > 3)
1070 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1071 entry, tx_status);
1072 #endif
1073 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1074 np->stats.collisions += (tx_status >> 3) & 15;
1075 np->stats.tx_packets++;
1076 }
1077
1078 dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry],
1079 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
1080 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1081 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1082 np->tx_skbuff[entry] = NULL;
1083 }
1084 if (np->tx_full &&
1085 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1086 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1087
1088 np->tx_full = 0;
1089 wmb();
1090 netif_wake_queue(dev);
1091 }
1092 }
1093
1094
1095
1096 static irqreturn_t intr_handler(int irq, void *dev_instance)
1097 {
1098 struct net_device *dev = (struct net_device *)dev_instance;
1099 struct netdev_private *np = netdev_priv(dev);
1100 void __iomem *ioaddr = np->base_addr;
1101 int work_limit = max_interrupt_work;
1102 int handled = 0;
1103
1104 if (!netif_device_present(dev))
1105 return IRQ_NONE;
1106 do {
1107 u32 intr_status = ioread32(ioaddr + IntrStatus);
1108
1109
1110 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1111
1112 if (debug > 4)
1113 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1114
1115 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1116 break;
1117
1118 handled = 1;
1119
1120 if (intr_status & (RxIntr | RxNoBuf))
1121 netdev_rx(dev);
1122 if (intr_status & RxNoBuf)
1123 iowrite32(0, ioaddr + RxStartDemand);
1124
1125 if (intr_status & (TxNoBuf | TxIntr) &&
1126 np->cur_tx != np->dirty_tx) {
1127 spin_lock(&np->lock);
1128 netdev_tx_done(dev);
1129 spin_unlock(&np->lock);
1130 }
1131
1132
1133 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1134 TimerInt | TxDied))
1135 netdev_error(dev, intr_status);
1136
1137 if (--work_limit < 0) {
1138 dev_warn(&dev->dev,
1139 "Too much work at interrupt, status=0x%04x\n",
1140 intr_status);
1141
1142
1143 spin_lock(&np->lock);
1144 if (netif_device_present(dev)) {
1145 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1146 iowrite32(10, ioaddr + GPTimer);
1147 }
1148 spin_unlock(&np->lock);
1149 break;
1150 }
1151 } while (1);
1152
1153 if (debug > 3)
1154 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1155 ioread32(ioaddr + IntrStatus));
1156 return IRQ_RETVAL(handled);
1157 }
1158
1159
1160
1161 static int netdev_rx(struct net_device *dev)
1162 {
1163 struct netdev_private *np = netdev_priv(dev);
1164 int entry = np->cur_rx % RX_RING_SIZE;
1165 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1166
1167 if (debug > 4) {
1168 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1169 entry, np->rx_ring[entry].status);
1170 }
1171
1172
1173 while (--work_limit >= 0) {
1174 struct w840_rx_desc *desc = np->rx_head_desc;
1175 s32 status = desc->status;
1176
1177 if (debug > 4)
1178 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1179 status);
1180 if (status < 0)
1181 break;
1182 if ((status & 0x38008300) != 0x0300) {
1183 if ((status & 0x38000300) != 0x0300) {
1184
1185 if ((status & 0xffff) != 0x7fff) {
1186 dev_warn(&dev->dev,
1187 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1188 np->cur_rx, status);
1189 np->stats.rx_length_errors++;
1190 }
1191 } else if (status & 0x8000) {
1192
1193 if (debug > 2)
1194 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1195 status);
1196 np->stats.rx_errors++;
1197 if (status & 0x0890) np->stats.rx_length_errors++;
1198 if (status & 0x004C) np->stats.rx_frame_errors++;
1199 if (status & 0x0002) np->stats.rx_crc_errors++;
1200 }
1201 } else {
1202 struct sk_buff *skb;
1203
1204 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1205
1206 #ifndef final_version
1207 if (debug > 4)
1208 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1209 pkt_len, status);
1210 #endif
1211
1212
1213 if (pkt_len < rx_copybreak &&
1214 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1215 skb_reserve(skb, 2);
1216 dma_sync_single_for_cpu(&np->pci_dev->dev,
1217 np->rx_addr[entry],
1218 np->rx_skbuff[entry]->len,
1219 DMA_FROM_DEVICE);
1220 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1221 skb_put(skb, pkt_len);
1222 dma_sync_single_for_device(&np->pci_dev->dev,
1223 np->rx_addr[entry],
1224 np->rx_skbuff[entry]->len,
1225 DMA_FROM_DEVICE);
1226 } else {
1227 dma_unmap_single(&np->pci_dev->dev,
1228 np->rx_addr[entry],
1229 np->rx_skbuff[entry]->len,
1230 DMA_FROM_DEVICE);
1231 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1232 np->rx_skbuff[entry] = NULL;
1233 }
1234 #ifndef final_version
1235
1236 if (debug > 5)
1237 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1238 &skb->data[0], &skb->data[6],
1239 skb->data[12], skb->data[13],
1240 &skb->data[14]);
1241 #endif
1242 skb->protocol = eth_type_trans(skb, dev);
1243 netif_rx(skb);
1244 np->stats.rx_packets++;
1245 np->stats.rx_bytes += pkt_len;
1246 }
1247 entry = (++np->cur_rx) % RX_RING_SIZE;
1248 np->rx_head_desc = &np->rx_ring[entry];
1249 }
1250
1251
1252 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1253 struct sk_buff *skb;
1254 entry = np->dirty_rx % RX_RING_SIZE;
1255 if (np->rx_skbuff[entry] == NULL) {
1256 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1257 np->rx_skbuff[entry] = skb;
1258 if (skb == NULL)
1259 break;
1260 np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev,
1261 skb->data,
1262 np->rx_buf_sz,
1263 DMA_FROM_DEVICE);
1264 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1265 }
1266 wmb();
1267 np->rx_ring[entry].status = DescOwned;
1268 }
1269
1270 return 0;
1271 }
1272
1273 static void netdev_error(struct net_device *dev, int intr_status)
1274 {
1275 struct netdev_private *np = netdev_priv(dev);
1276 void __iomem *ioaddr = np->base_addr;
1277
1278 if (debug > 2)
1279 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1280 if (intr_status == 0xffffffff)
1281 return;
1282 spin_lock(&np->lock);
1283 if (intr_status & TxFIFOUnderflow) {
1284 int new;
1285
1286 #if 0
1287
1288
1289
1290 new = np->csr6 + 0x4000;
1291 #else
1292 new = (np->csr6 >> 14)&0x7f;
1293 if (new < 64)
1294 new *= 2;
1295 else
1296 new = 127;
1297 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1298 #endif
1299 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1300 update_csr6(dev, new);
1301 }
1302 if (intr_status & RxDied) {
1303 np->stats.rx_errors++;
1304 }
1305 if (intr_status & TimerInt) {
1306
1307 if (netif_device_present(dev))
1308 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1309 }
1310 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1311 iowrite32(0, ioaddr + RxStartDemand);
1312 spin_unlock(&np->lock);
1313 }
1314
1315 static struct net_device_stats *get_stats(struct net_device *dev)
1316 {
1317 struct netdev_private *np = netdev_priv(dev);
1318 void __iomem *ioaddr = np->base_addr;
1319
1320
1321 spin_lock_irq(&np->lock);
1322 if (netif_running(dev) && netif_device_present(dev))
1323 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1324 spin_unlock_irq(&np->lock);
1325
1326 return &np->stats;
1327 }
1328
1329
1330 static u32 __set_rx_mode(struct net_device *dev)
1331 {
1332 struct netdev_private *np = netdev_priv(dev);
1333 void __iomem *ioaddr = np->base_addr;
1334 u32 mc_filter[2];
1335 u32 rx_mode;
1336
1337 if (dev->flags & IFF_PROMISC) {
1338 memset(mc_filter, 0xff, sizeof(mc_filter));
1339 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1340 | AcceptMyPhys;
1341 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1342 (dev->flags & IFF_ALLMULTI)) {
1343
1344 memset(mc_filter, 0xff, sizeof(mc_filter));
1345 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1346 } else {
1347 struct netdev_hw_addr *ha;
1348
1349 memset(mc_filter, 0, sizeof(mc_filter));
1350 netdev_for_each_mc_addr(ha, dev) {
1351 int filbit;
1352
1353 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1354 filbit &= 0x3f;
1355 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1356 }
1357 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1358 }
1359 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1360 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1361 return rx_mode;
1362 }
1363
1364 static void set_rx_mode(struct net_device *dev)
1365 {
1366 struct netdev_private *np = netdev_priv(dev);
1367 u32 rx_mode = __set_rx_mode(dev);
1368 spin_lock_irq(&np->lock);
1369 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1370 spin_unlock_irq(&np->lock);
1371 }
1372
1373 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1374 {
1375 struct netdev_private *np = netdev_priv(dev);
1376
1377 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1378 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1379 }
1380
1381 static int netdev_get_link_ksettings(struct net_device *dev,
1382 struct ethtool_link_ksettings *cmd)
1383 {
1384 struct netdev_private *np = netdev_priv(dev);
1385
1386 spin_lock_irq(&np->lock);
1387 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1388 spin_unlock_irq(&np->lock);
1389
1390 return 0;
1391 }
1392
1393 static int netdev_set_link_ksettings(struct net_device *dev,
1394 const struct ethtool_link_ksettings *cmd)
1395 {
1396 struct netdev_private *np = netdev_priv(dev);
1397 int rc;
1398
1399 spin_lock_irq(&np->lock);
1400 rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1401 spin_unlock_irq(&np->lock);
1402
1403 return rc;
1404 }
1405
1406 static int netdev_nway_reset(struct net_device *dev)
1407 {
1408 struct netdev_private *np = netdev_priv(dev);
1409 return mii_nway_restart(&np->mii_if);
1410 }
1411
1412 static u32 netdev_get_link(struct net_device *dev)
1413 {
1414 struct netdev_private *np = netdev_priv(dev);
1415 return mii_link_ok(&np->mii_if);
1416 }
1417
1418 static u32 netdev_get_msglevel(struct net_device *dev)
1419 {
1420 return debug;
1421 }
1422
1423 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1424 {
1425 debug = value;
1426 }
1427
1428 static const struct ethtool_ops netdev_ethtool_ops = {
1429 .get_drvinfo = netdev_get_drvinfo,
1430 .nway_reset = netdev_nway_reset,
1431 .get_link = netdev_get_link,
1432 .get_msglevel = netdev_get_msglevel,
1433 .set_msglevel = netdev_set_msglevel,
1434 .get_link_ksettings = netdev_get_link_ksettings,
1435 .set_link_ksettings = netdev_set_link_ksettings,
1436 };
1437
1438 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1439 {
1440 struct mii_ioctl_data *data = if_mii(rq);
1441 struct netdev_private *np = netdev_priv(dev);
1442
1443 switch(cmd) {
1444 case SIOCGMIIPHY:
1445 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1446 fallthrough;
1447
1448 case SIOCGMIIREG:
1449 spin_lock_irq(&np->lock);
1450 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1451 spin_unlock_irq(&np->lock);
1452 return 0;
1453
1454 case SIOCSMIIREG:
1455 spin_lock_irq(&np->lock);
1456 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1457 spin_unlock_irq(&np->lock);
1458 return 0;
1459 default:
1460 return -EOPNOTSUPP;
1461 }
1462 }
1463
1464 static int netdev_close(struct net_device *dev)
1465 {
1466 struct netdev_private *np = netdev_priv(dev);
1467 void __iomem *ioaddr = np->base_addr;
1468
1469 netif_stop_queue(dev);
1470
1471 if (debug > 1) {
1472 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1473 ioread32(ioaddr + IntrStatus),
1474 ioread32(ioaddr + NetworkConfig));
1475 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1476 np->cur_tx, np->dirty_tx,
1477 np->cur_rx, np->dirty_rx);
1478 }
1479
1480
1481 spin_lock_irq(&np->lock);
1482 netif_device_detach(dev);
1483 update_csr6(dev, 0);
1484 iowrite32(0x0000, ioaddr + IntrEnable);
1485 spin_unlock_irq(&np->lock);
1486
1487 free_irq(np->pci_dev->irq, dev);
1488 wmb();
1489 netif_device_attach(dev);
1490
1491 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1492 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1493
1494 #ifdef __i386__
1495 if (debug > 2) {
1496 int i;
1497
1498 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1499 for (i = 0; i < TX_RING_SIZE; i++)
1500 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1501 i, np->tx_ring[i].length,
1502 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1503 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1504 for (i = 0; i < RX_RING_SIZE; i++) {
1505 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1506 i, np->rx_ring[i].length,
1507 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1508 }
1509 }
1510 #endif
1511
1512 del_timer_sync(&np->timer);
1513
1514 free_rxtx_rings(np);
1515 free_ringdesc(np);
1516
1517 return 0;
1518 }
1519
1520 static void w840_remove1(struct pci_dev *pdev)
1521 {
1522 struct net_device *dev = pci_get_drvdata(pdev);
1523
1524 if (dev) {
1525 struct netdev_private *np = netdev_priv(dev);
1526 unregister_netdev(dev);
1527 pci_iounmap(pdev, np->base_addr);
1528 free_netdev(dev);
1529 }
1530 }
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555 static int __maybe_unused w840_suspend(struct device *dev_d)
1556 {
1557 struct net_device *dev = dev_get_drvdata(dev_d);
1558 struct netdev_private *np = netdev_priv(dev);
1559 void __iomem *ioaddr = np->base_addr;
1560
1561 rtnl_lock();
1562 if (netif_running (dev)) {
1563 del_timer_sync(&np->timer);
1564
1565 spin_lock_irq(&np->lock);
1566 netif_device_detach(dev);
1567 update_csr6(dev, 0);
1568 iowrite32(0, ioaddr + IntrEnable);
1569 spin_unlock_irq(&np->lock);
1570
1571 synchronize_irq(np->pci_dev->irq);
1572 netif_tx_disable(dev);
1573
1574 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1575
1576
1577
1578 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1579
1580
1581
1582 free_rxtx_rings(np);
1583 } else {
1584 netif_device_detach(dev);
1585 }
1586 rtnl_unlock();
1587 return 0;
1588 }
1589
1590 static int __maybe_unused w840_resume(struct device *dev_d)
1591 {
1592 struct net_device *dev = dev_get_drvdata(dev_d);
1593 struct netdev_private *np = netdev_priv(dev);
1594
1595 rtnl_lock();
1596 if (netif_device_present(dev))
1597 goto out;
1598 if (netif_running(dev)) {
1599 spin_lock_irq(&np->lock);
1600 iowrite32(1, np->base_addr+PCIBusCfg);
1601 ioread32(np->base_addr+PCIBusCfg);
1602 udelay(1);
1603 netif_device_attach(dev);
1604 init_rxtx_rings(dev);
1605 init_registers(dev);
1606 spin_unlock_irq(&np->lock);
1607
1608 netif_wake_queue(dev);
1609
1610 mod_timer(&np->timer, jiffies + 1*HZ);
1611 } else {
1612 netif_device_attach(dev);
1613 }
1614 out:
1615 rtnl_unlock();
1616 return 0;
1617 }
1618
1619 static SIMPLE_DEV_PM_OPS(w840_pm_ops, w840_suspend, w840_resume);
1620
1621 static struct pci_driver w840_driver = {
1622 .name = DRV_NAME,
1623 .id_table = w840_pci_tbl,
1624 .probe = w840_probe1,
1625 .remove = w840_remove1,
1626 .driver.pm = &w840_pm_ops,
1627 };
1628
1629 module_pci_driver(w840_driver);