0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0033
0034 #define DRV_NAME "via-rhine"
0035
0036 #include <linux/types.h>
0037
0038
0039
0040 static int debug = 0;
0041 #define RHINE_MSG_DEFAULT \
0042 (0x0000)
0043
0044
0045
0046 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
0047 defined(CONFIG_SPARC) || defined(__ia64__) || \
0048 defined(__sh__) || defined(__mips__)
0049 static int rx_copybreak = 1518;
0050 #else
0051 static int rx_copybreak;
0052 #endif
0053
0054
0055
0056 static bool avoid_D3;
0057
0058
0059
0060
0061
0062
0063
0064
0065 static const int multicast_filter_limit = 32;
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 #define TX_RING_SIZE 64
0078 #define TX_QUEUE_LEN (TX_RING_SIZE - 6)
0079 #define RX_RING_SIZE 64
0080
0081
0082
0083
0084 #define TX_TIMEOUT (2*HZ)
0085
0086 #define PKT_BUF_SZ 1536
0087
0088 #include <linux/module.h>
0089 #include <linux/moduleparam.h>
0090 #include <linux/kernel.h>
0091 #include <linux/string.h>
0092 #include <linux/timer.h>
0093 #include <linux/errno.h>
0094 #include <linux/ioport.h>
0095 #include <linux/interrupt.h>
0096 #include <linux/pci.h>
0097 #include <linux/of_device.h>
0098 #include <linux/of_irq.h>
0099 #include <linux/platform_device.h>
0100 #include <linux/dma-mapping.h>
0101 #include <linux/netdevice.h>
0102 #include <linux/etherdevice.h>
0103 #include <linux/skbuff.h>
0104 #include <linux/init.h>
0105 #include <linux/delay.h>
0106 #include <linux/mii.h>
0107 #include <linux/ethtool.h>
0108 #include <linux/crc32.h>
0109 #include <linux/if_vlan.h>
0110 #include <linux/bitops.h>
0111 #include <linux/workqueue.h>
0112 #include <asm/processor.h> /* Processor type for cache alignment. */
0113 #include <asm/io.h>
0114 #include <asm/irq.h>
0115 #include <linux/uaccess.h>
0116 #include <linux/dmi.h>
0117
0118 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
0119 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
0120 MODULE_LICENSE("GPL");
0121
0122 module_param(debug, int, 0);
0123 module_param(rx_copybreak, int, 0);
0124 module_param(avoid_D3, bool, 0);
0125 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
0126 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
0127 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
0128
0129 #define MCAM_SIZE 32
0130 #define VCAM_SIZE 32
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232 enum rhine_revs {
0233 VT86C100A = 0x00,
0234 VTunknown0 = 0x20,
0235 VT6102 = 0x40,
0236 VT8231 = 0x50,
0237 VT8233 = 0x60,
0238 VT8235 = 0x74,
0239 VT8237 = 0x78,
0240 VT8251 = 0x7C,
0241 VT6105 = 0x80,
0242 VT6105_B0 = 0x83,
0243 VT6105L = 0x8A,
0244 VT6107 = 0x8C,
0245 VTunknown2 = 0x8E,
0246 VT6105M = 0x90,
0247 };
0248
0249 enum rhine_quirks {
0250 rqWOL = 0x0001,
0251 rqForceReset = 0x0002,
0252 rq6patterns = 0x0040,
0253 rqStatusWBRace = 0x0080,
0254 rqRhineI = 0x0100,
0255 rqIntPHY = 0x0200,
0256 rqMgmt = 0x0400,
0257 rqNeedEnMMIO = 0x0800,
0258
0259
0260
0261 };
0262
0263
0264
0265
0266
0267
0268
0269 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
0270
0271 static const struct pci_device_id rhine_pci_tbl[] = {
0272 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },
0273 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },
0274 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },
0275 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },
0276 { }
0277 };
0278 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
0279
0280
0281
0282
0283 static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
0284 static const struct of_device_id rhine_of_tbl[] = {
0285 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
0286 { }
0287 };
0288 MODULE_DEVICE_TABLE(of, rhine_of_tbl);
0289
0290
0291 enum register_offsets {
0292 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
0293 ChipCmd1=0x09, TQWake=0x0A,
0294 IntrStatus=0x0C, IntrEnable=0x0E,
0295 MulticastFilter0=0x10, MulticastFilter1=0x14,
0296 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
0297 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
0298 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
0299 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
0300 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
0301 StickyHW=0x83, IntrStatus2=0x84,
0302 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
0303 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
0304 WOLcrClr1=0xA6, WOLcgClr=0xA7,
0305 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
0306 };
0307
0308
0309 enum backoff_bits {
0310 BackOptional=0x01, BackModify=0x02,
0311 BackCaptureEffect=0x04, BackRandom=0x08
0312 };
0313
0314
0315 enum tcr_bits {
0316 TCR_PQEN=0x01,
0317 TCR_LB0=0x02,
0318 TCR_LB1=0x04,
0319 TCR_OFSET=0x08,
0320 TCR_RTGOPT=0x10,
0321 TCR_RTFT0=0x20,
0322 TCR_RTFT1=0x40,
0323 TCR_RTSF=0x80,
0324 };
0325
0326
0327 enum camcon_bits {
0328 CAMC_CAMEN=0x01,
0329 CAMC_VCAMSL=0x02,
0330 CAMC_CAMWR=0x04,
0331 CAMC_CAMRD=0x08,
0332 };
0333
0334
0335 enum bcr1_bits {
0336 BCR1_POT0=0x01,
0337 BCR1_POT1=0x02,
0338 BCR1_POT2=0x04,
0339 BCR1_CTFT0=0x08,
0340 BCR1_CTFT1=0x10,
0341 BCR1_CTSF=0x20,
0342 BCR1_TXQNOBK=0x40,
0343 BCR1_VIDFR=0x80,
0344 BCR1_MED0=0x40,
0345 BCR1_MED1=0x80,
0346 };
0347
0348
0349 static const int mmio_verify_registers[] = {
0350 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
0351 0
0352 };
0353
0354
0355 enum intr_status_bits {
0356 IntrRxDone = 0x0001,
0357 IntrTxDone = 0x0002,
0358 IntrRxErr = 0x0004,
0359 IntrTxError = 0x0008,
0360 IntrRxEmpty = 0x0020,
0361 IntrPCIErr = 0x0040,
0362 IntrStatsMax = 0x0080,
0363 IntrRxEarly = 0x0100,
0364 IntrTxUnderrun = 0x0210,
0365 IntrRxOverflow = 0x0400,
0366 IntrRxDropped = 0x0800,
0367 IntrRxNoBuf = 0x1000,
0368 IntrTxAborted = 0x2000,
0369 IntrLinkChange = 0x4000,
0370 IntrRxWakeUp = 0x8000,
0371 IntrTxDescRace = 0x080000,
0372 IntrNormalSummary = IntrRxDone | IntrTxDone,
0373 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
0374 IntrTxUnderrun,
0375 };
0376
0377
0378 enum wol_bits {
0379 WOLucast = 0x10,
0380 WOLmagic = 0x20,
0381 WOLbmcast = 0x30,
0382 WOLlnkon = 0x40,
0383 WOLlnkoff = 0x80,
0384 };
0385
0386
0387 struct rx_desc {
0388 __le32 rx_status;
0389 __le32 desc_length;
0390 __le32 addr;
0391 __le32 next_desc;
0392 };
0393 struct tx_desc {
0394 __le32 tx_status;
0395 __le32 desc_length;
0396 __le32 addr;
0397 __le32 next_desc;
0398 };
0399
0400
0401 #define TXDESC 0x00e08000
0402
0403 enum rx_status_bits {
0404 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
0405 };
0406
0407
0408 enum desc_status_bits {
0409 DescOwn=0x80000000
0410 };
0411
0412
0413 enum desc_length_bits {
0414 DescTag=0x00010000
0415 };
0416
0417
0418 enum chip_cmd_bits {
0419 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
0420 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
0421 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
0422 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
0423 };
0424
0425 struct rhine_stats {
0426 u64 packets;
0427 u64 bytes;
0428 struct u64_stats_sync syncp;
0429 };
0430
0431 struct rhine_private {
0432
0433 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
0434
0435
0436 struct rx_desc *rx_ring;
0437 struct tx_desc *tx_ring;
0438 dma_addr_t rx_ring_dma;
0439 dma_addr_t tx_ring_dma;
0440
0441
0442 struct sk_buff *rx_skbuff[RX_RING_SIZE];
0443 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
0444
0445
0446 struct sk_buff *tx_skbuff[TX_RING_SIZE];
0447 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
0448
0449
0450 unsigned char *tx_buf[TX_RING_SIZE];
0451 unsigned char *tx_bufs;
0452 dma_addr_t tx_bufs_dma;
0453
0454 int irq;
0455 long pioaddr;
0456 struct net_device *dev;
0457 struct napi_struct napi;
0458 spinlock_t lock;
0459 struct mutex task_lock;
0460 bool task_enable;
0461 struct work_struct slow_event_task;
0462 struct work_struct reset_task;
0463
0464 u32 msg_enable;
0465
0466
0467 u32 quirks;
0468 unsigned int cur_rx;
0469 unsigned int cur_tx, dirty_tx;
0470 unsigned int rx_buf_sz;
0471 struct rhine_stats rx_stats;
0472 struct rhine_stats tx_stats;
0473 u8 wolopts;
0474
0475 u8 tx_thresh, rx_thresh;
0476
0477 struct mii_if_info mii_if;
0478 void __iomem *base;
0479 };
0480
0481 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
0482 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
0483 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
0484
0485 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
0486 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
0487 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
0488
0489 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
0490 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
0491 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
0492
0493 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
0494 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
0495 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
0496
0497
0498 static int mdio_read(struct net_device *dev, int phy_id, int location);
0499 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
0500 static int rhine_open(struct net_device *dev);
0501 static void rhine_reset_task(struct work_struct *work);
0502 static void rhine_slow_event_task(struct work_struct *work);
0503 static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
0504 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
0505 struct net_device *dev);
0506 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
0507 static void rhine_tx(struct net_device *dev);
0508 static int rhine_rx(struct net_device *dev, int limit);
0509 static void rhine_set_rx_mode(struct net_device *dev);
0510 static void rhine_get_stats64(struct net_device *dev,
0511 struct rtnl_link_stats64 *stats);
0512 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0513 static const struct ethtool_ops netdev_ethtool_ops;
0514 static int rhine_close(struct net_device *dev);
0515 static int rhine_vlan_rx_add_vid(struct net_device *dev,
0516 __be16 proto, u16 vid);
0517 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
0518 __be16 proto, u16 vid);
0519 static void rhine_restart_tx(struct net_device *dev);
0520
0521 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
0522 {
0523 void __iomem *ioaddr = rp->base;
0524 int i;
0525
0526 for (i = 0; i < 1024; i++) {
0527 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
0528
0529 if (low ^ has_mask_bits)
0530 break;
0531 udelay(10);
0532 }
0533 if (i > 64) {
0534 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
0535 "count: %04d\n", low ? "low" : "high", reg, mask, i);
0536 }
0537 }
0538
0539 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
0540 {
0541 rhine_wait_bit(rp, reg, mask, false);
0542 }
0543
0544 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
0545 {
0546 rhine_wait_bit(rp, reg, mask, true);
0547 }
0548
0549 static u32 rhine_get_events(struct rhine_private *rp)
0550 {
0551 void __iomem *ioaddr = rp->base;
0552 u32 intr_status;
0553
0554 intr_status = ioread16(ioaddr + IntrStatus);
0555
0556 if (rp->quirks & rqStatusWBRace)
0557 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
0558 return intr_status;
0559 }
0560
0561 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
0562 {
0563 void __iomem *ioaddr = rp->base;
0564
0565 if (rp->quirks & rqStatusWBRace)
0566 iowrite8(mask >> 16, ioaddr + IntrStatus2);
0567 iowrite16(mask, ioaddr + IntrStatus);
0568 }
0569
0570
0571
0572
0573
0574 static void rhine_power_init(struct net_device *dev)
0575 {
0576 struct rhine_private *rp = netdev_priv(dev);
0577 void __iomem *ioaddr = rp->base;
0578 u16 wolstat;
0579
0580 if (rp->quirks & rqWOL) {
0581
0582 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
0583
0584
0585 iowrite8(0x80, ioaddr + WOLcgClr);
0586
0587
0588 iowrite8(0xFF, ioaddr + WOLcrClr);
0589
0590 if (rp->quirks & rq6patterns)
0591 iowrite8(0x03, ioaddr + WOLcrClr1);
0592
0593
0594 wolstat = ioread8(ioaddr + PwrcsrSet);
0595 if (rp->quirks & rq6patterns)
0596 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
0597
0598
0599 iowrite8(0xFF, ioaddr + PwrcsrClr);
0600 if (rp->quirks & rq6patterns)
0601 iowrite8(0x03, ioaddr + PwrcsrClr1);
0602
0603 if (wolstat) {
0604 char *reason;
0605 switch (wolstat) {
0606 case WOLmagic:
0607 reason = "Magic packet";
0608 break;
0609 case WOLlnkon:
0610 reason = "Link went up";
0611 break;
0612 case WOLlnkoff:
0613 reason = "Link went down";
0614 break;
0615 case WOLucast:
0616 reason = "Unicast packet";
0617 break;
0618 case WOLbmcast:
0619 reason = "Multicast/broadcast packet";
0620 break;
0621 default:
0622 reason = "Unknown";
0623 }
0624 netdev_info(dev, "Woke system up. Reason: %s\n",
0625 reason);
0626 }
0627 }
0628 }
0629
0630 static void rhine_chip_reset(struct net_device *dev)
0631 {
0632 struct rhine_private *rp = netdev_priv(dev);
0633 void __iomem *ioaddr = rp->base;
0634 u8 cmd1;
0635
0636 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
0637 IOSYNC;
0638
0639 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
0640 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
0641
0642
0643 if (rp->quirks & rqForceReset)
0644 iowrite8(0x40, ioaddr + MiscCmd);
0645
0646
0647 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
0648 }
0649
0650 cmd1 = ioread8(ioaddr + ChipCmd1);
0651 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
0652 "failed" : "succeeded");
0653 }
0654
0655 static void enable_mmio(long pioaddr, u32 quirks)
0656 {
0657 int n;
0658
0659 if (quirks & rqNeedEnMMIO) {
0660 if (quirks & rqRhineI) {
0661
0662 n = inb(pioaddr + ConfigA) | 0x20;
0663 outb(n, pioaddr + ConfigA);
0664 } else {
0665 n = inb(pioaddr + ConfigD) | 0x80;
0666 outb(n, pioaddr + ConfigD);
0667 }
0668 }
0669 }
0670
0671 static inline int verify_mmio(struct device *hwdev,
0672 long pioaddr,
0673 void __iomem *ioaddr,
0674 u32 quirks)
0675 {
0676 if (quirks & rqNeedEnMMIO) {
0677 int i = 0;
0678
0679
0680 while (mmio_verify_registers[i]) {
0681 int reg = mmio_verify_registers[i++];
0682 unsigned char a = inb(pioaddr+reg);
0683 unsigned char b = readb(ioaddr+reg);
0684
0685 if (a != b) {
0686 dev_err(hwdev,
0687 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
0688 reg, a, b);
0689 return -EIO;
0690 }
0691 }
0692 }
0693 return 0;
0694 }
0695
0696
0697
0698
0699
0700 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
0701 {
0702 struct rhine_private *rp = netdev_priv(dev);
0703 void __iomem *ioaddr = rp->base;
0704 int i;
0705
0706 outb(0x20, pioaddr + MACRegEEcsr);
0707 for (i = 0; i < 1024; i++) {
0708 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
0709 break;
0710 }
0711 if (i > 512)
0712 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
0713
0714
0715
0716
0717
0718
0719 enable_mmio(pioaddr, rp->quirks);
0720
0721
0722 if (rp->quirks & rqWOL)
0723 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
0724
0725 }
0726
0727 #ifdef CONFIG_NET_POLL_CONTROLLER
0728 static void rhine_poll(struct net_device *dev)
0729 {
0730 struct rhine_private *rp = netdev_priv(dev);
0731 const int irq = rp->irq;
0732
0733 disable_irq(irq);
0734 rhine_interrupt(irq, dev);
0735 enable_irq(irq);
0736 }
0737 #endif
0738
0739 static void rhine_kick_tx_threshold(struct rhine_private *rp)
0740 {
0741 if (rp->tx_thresh < 0xe0) {
0742 void __iomem *ioaddr = rp->base;
0743
0744 rp->tx_thresh += 0x20;
0745 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
0746 }
0747 }
0748
0749 static void rhine_tx_err(struct rhine_private *rp, u32 status)
0750 {
0751 struct net_device *dev = rp->dev;
0752
0753 if (status & IntrTxAborted) {
0754 netif_info(rp, tx_err, dev,
0755 "Abort %08x, frame dropped\n", status);
0756 }
0757
0758 if (status & IntrTxUnderrun) {
0759 rhine_kick_tx_threshold(rp);
0760 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
0761 "Tx threshold now %02x\n", rp->tx_thresh);
0762 }
0763
0764 if (status & IntrTxDescRace)
0765 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
0766
0767 if ((status & IntrTxError) &&
0768 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
0769 rhine_kick_tx_threshold(rp);
0770 netif_info(rp, tx_err, dev, "Unspecified error. "
0771 "Tx threshold now %02x\n", rp->tx_thresh);
0772 }
0773
0774 rhine_restart_tx(dev);
0775 }
0776
0777 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
0778 {
0779 void __iomem *ioaddr = rp->base;
0780 struct net_device_stats *stats = &rp->dev->stats;
0781
0782 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
0783 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
0784
0785
0786
0787
0788
0789
0790
0791 iowrite32(0, ioaddr + RxMissed);
0792 ioread16(ioaddr + RxCRCErrs);
0793 ioread16(ioaddr + RxMissed);
0794 }
0795
0796 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
0797 IntrRxErr | \
0798 IntrRxEmpty | \
0799 IntrRxOverflow | \
0800 IntrRxDropped | \
0801 IntrRxNoBuf | \
0802 IntrRxWakeUp)
0803
0804 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
0805 IntrTxAborted | \
0806 IntrTxUnderrun | \
0807 IntrTxDescRace)
0808 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
0809
0810 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
0811 RHINE_EVENT_NAPI_TX | \
0812 IntrStatsMax)
0813 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
0814 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
0815
0816 static int rhine_napipoll(struct napi_struct *napi, int budget)
0817 {
0818 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
0819 struct net_device *dev = rp->dev;
0820 void __iomem *ioaddr = rp->base;
0821 u16 enable_mask = RHINE_EVENT & 0xffff;
0822 int work_done = 0;
0823 u32 status;
0824
0825 status = rhine_get_events(rp);
0826 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
0827
0828 if (status & RHINE_EVENT_NAPI_RX)
0829 work_done += rhine_rx(dev, budget);
0830
0831 if (status & RHINE_EVENT_NAPI_TX) {
0832 if (status & RHINE_EVENT_NAPI_TX_ERR) {
0833
0834 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
0835 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
0836 netif_warn(rp, tx_err, dev, "Tx still on\n");
0837 }
0838
0839 rhine_tx(dev);
0840
0841 if (status & RHINE_EVENT_NAPI_TX_ERR)
0842 rhine_tx_err(rp, status);
0843 }
0844
0845 if (status & IntrStatsMax) {
0846 spin_lock(&rp->lock);
0847 rhine_update_rx_crc_and_missed_errord(rp);
0848 spin_unlock(&rp->lock);
0849 }
0850
0851 if (status & RHINE_EVENT_SLOW) {
0852 enable_mask &= ~RHINE_EVENT_SLOW;
0853 schedule_work(&rp->slow_event_task);
0854 }
0855
0856 if (work_done < budget) {
0857 napi_complete_done(napi, work_done);
0858 iowrite16(enable_mask, ioaddr + IntrEnable);
0859 }
0860 return work_done;
0861 }
0862
0863 static void rhine_hw_init(struct net_device *dev, long pioaddr)
0864 {
0865 struct rhine_private *rp = netdev_priv(dev);
0866
0867
0868 rhine_chip_reset(dev);
0869
0870
0871 if (rp->quirks & rqRhineI)
0872 msleep(5);
0873
0874
0875 if (dev_is_pci(dev->dev.parent))
0876 rhine_reload_eeprom(pioaddr, dev);
0877 }
0878
0879 static const struct net_device_ops rhine_netdev_ops = {
0880 .ndo_open = rhine_open,
0881 .ndo_stop = rhine_close,
0882 .ndo_start_xmit = rhine_start_tx,
0883 .ndo_get_stats64 = rhine_get_stats64,
0884 .ndo_set_rx_mode = rhine_set_rx_mode,
0885 .ndo_validate_addr = eth_validate_addr,
0886 .ndo_set_mac_address = eth_mac_addr,
0887 .ndo_eth_ioctl = netdev_ioctl,
0888 .ndo_tx_timeout = rhine_tx_timeout,
0889 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
0890 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
0891 #ifdef CONFIG_NET_POLL_CONTROLLER
0892 .ndo_poll_controller = rhine_poll,
0893 #endif
0894 };
0895
0896 static int rhine_init_one_common(struct device *hwdev, u32 quirks,
0897 long pioaddr, void __iomem *ioaddr, int irq)
0898 {
0899 struct net_device *dev;
0900 struct rhine_private *rp;
0901 int i, rc, phy_id;
0902 u8 addr[ETH_ALEN];
0903 const char *name;
0904
0905
0906 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
0907 if (rc) {
0908 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
0909 goto err_out;
0910 }
0911
0912 dev = alloc_etherdev(sizeof(struct rhine_private));
0913 if (!dev) {
0914 rc = -ENOMEM;
0915 goto err_out;
0916 }
0917 SET_NETDEV_DEV(dev, hwdev);
0918
0919 rp = netdev_priv(dev);
0920 rp->dev = dev;
0921 rp->quirks = quirks;
0922 rp->pioaddr = pioaddr;
0923 rp->base = ioaddr;
0924 rp->irq = irq;
0925 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
0926
0927 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
0928
0929 u64_stats_init(&rp->tx_stats.syncp);
0930 u64_stats_init(&rp->rx_stats.syncp);
0931
0932
0933 rhine_power_init(dev);
0934 rhine_hw_init(dev, pioaddr);
0935
0936 for (i = 0; i < 6; i++)
0937 addr[i] = ioread8(ioaddr + StationAddr + i);
0938 eth_hw_addr_set(dev, addr);
0939
0940 if (!is_valid_ether_addr(dev->dev_addr)) {
0941
0942 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
0943 eth_hw_addr_random(dev);
0944 netdev_info(dev, "Using random MAC address: %pM\n",
0945 dev->dev_addr);
0946 }
0947
0948
0949 if (!phy_id)
0950 phy_id = ioread8(ioaddr + 0x6C);
0951
0952 spin_lock_init(&rp->lock);
0953 mutex_init(&rp->task_lock);
0954 INIT_WORK(&rp->reset_task, rhine_reset_task);
0955 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
0956
0957 rp->mii_if.dev = dev;
0958 rp->mii_if.mdio_read = mdio_read;
0959 rp->mii_if.mdio_write = mdio_write;
0960 rp->mii_if.phy_id_mask = 0x1f;
0961 rp->mii_if.reg_num_mask = 0x1f;
0962
0963
0964 dev->netdev_ops = &rhine_netdev_ops;
0965 dev->ethtool_ops = &netdev_ethtool_ops;
0966 dev->watchdog_timeo = TX_TIMEOUT;
0967
0968 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
0969
0970 if (rp->quirks & rqRhineI)
0971 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
0972
0973 if (rp->quirks & rqMgmt)
0974 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
0975 NETIF_F_HW_VLAN_CTAG_RX |
0976 NETIF_F_HW_VLAN_CTAG_FILTER;
0977
0978
0979 rc = register_netdev(dev);
0980 if (rc)
0981 goto err_out_free_netdev;
0982
0983 if (rp->quirks & rqRhineI)
0984 name = "Rhine";
0985 else if (rp->quirks & rqStatusWBRace)
0986 name = "Rhine II";
0987 else if (rp->quirks & rqMgmt)
0988 name = "Rhine III (Management Adapter)";
0989 else
0990 name = "Rhine III";
0991
0992 netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
0993 name, ioaddr, dev->dev_addr, rp->irq);
0994
0995 dev_set_drvdata(hwdev, dev);
0996
0997 {
0998 u16 mii_cmd;
0999 int mii_status = mdio_read(dev, phy_id, 1);
1000 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1001 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1002 if (mii_status != 0xffff && mii_status != 0x0000) {
1003 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1004 netdev_info(dev,
1005 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1006 phy_id,
1007 mii_status, rp->mii_if.advertising,
1008 mdio_read(dev, phy_id, 5));
1009
1010
1011 if (mii_status & BMSR_LSTATUS)
1012 netif_carrier_on(dev);
1013 else
1014 netif_carrier_off(dev);
1015
1016 }
1017 }
1018 rp->mii_if.phy_id = phy_id;
1019 if (avoid_D3)
1020 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1021
1022 return 0;
1023
1024 err_out_free_netdev:
1025 free_netdev(dev);
1026 err_out:
1027 return rc;
1028 }
1029
1030 static int rhine_init_one_pci(struct pci_dev *pdev,
1031 const struct pci_device_id *ent)
1032 {
1033 struct device *hwdev = &pdev->dev;
1034 int rc;
1035 long pioaddr, memaddr;
1036 void __iomem *ioaddr;
1037 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1038
1039
1040
1041
1042
1043
1044 #ifdef CONFIG_VIA_RHINE_MMIO
1045 u32 quirks = rqNeedEnMMIO;
1046 #else
1047 u32 quirks = 0;
1048 #endif
1049
1050 rc = pci_enable_device(pdev);
1051 if (rc)
1052 goto err_out;
1053
1054 if (pdev->revision < VTunknown0) {
1055 quirks |= rqRhineI;
1056 } else if (pdev->revision >= VT6102) {
1057 quirks |= rqWOL | rqForceReset;
1058 if (pdev->revision < VT6105) {
1059 quirks |= rqStatusWBRace;
1060 } else {
1061 quirks |= rqIntPHY;
1062 if (pdev->revision >= VT6105_B0)
1063 quirks |= rq6patterns;
1064 if (pdev->revision >= VT6105M)
1065 quirks |= rqMgmt;
1066 }
1067 }
1068
1069
1070 if ((pci_resource_len(pdev, 0) < io_size) ||
1071 (pci_resource_len(pdev, 1) < io_size)) {
1072 rc = -EIO;
1073 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1074 goto err_out_pci_disable;
1075 }
1076
1077 pioaddr = pci_resource_start(pdev, 0);
1078 memaddr = pci_resource_start(pdev, 1);
1079
1080 pci_set_master(pdev);
1081
1082 rc = pci_request_regions(pdev, DRV_NAME);
1083 if (rc)
1084 goto err_out_pci_disable;
1085
1086 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1087 if (!ioaddr) {
1088 rc = -EIO;
1089 dev_err(hwdev,
1090 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1091 dev_name(hwdev), io_size, memaddr);
1092 goto err_out_free_res;
1093 }
1094
1095 enable_mmio(pioaddr, quirks);
1096
1097 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1098 if (rc)
1099 goto err_out_unmap;
1100
1101 rc = rhine_init_one_common(&pdev->dev, quirks,
1102 pioaddr, ioaddr, pdev->irq);
1103 if (!rc)
1104 return 0;
1105
1106 err_out_unmap:
1107 pci_iounmap(pdev, ioaddr);
1108 err_out_free_res:
1109 pci_release_regions(pdev);
1110 err_out_pci_disable:
1111 pci_disable_device(pdev);
1112 err_out:
1113 return rc;
1114 }
1115
1116 static int rhine_init_one_platform(struct platform_device *pdev)
1117 {
1118 const u32 *quirks;
1119 int irq;
1120 void __iomem *ioaddr;
1121
1122 quirks = of_device_get_match_data(&pdev->dev);
1123 if (!quirks)
1124 return -EINVAL;
1125
1126 ioaddr = devm_platform_ioremap_resource(pdev, 0);
1127 if (IS_ERR(ioaddr))
1128 return PTR_ERR(ioaddr);
1129
1130 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1131 if (!irq)
1132 return -EINVAL;
1133
1134 return rhine_init_one_common(&pdev->dev, *quirks,
1135 (long)ioaddr, ioaddr, irq);
1136 }
1137
1138 static int alloc_ring(struct net_device* dev)
1139 {
1140 struct rhine_private *rp = netdev_priv(dev);
1141 struct device *hwdev = dev->dev.parent;
1142 void *ring;
1143 dma_addr_t ring_dma;
1144
1145 ring = dma_alloc_coherent(hwdev,
1146 RX_RING_SIZE * sizeof(struct rx_desc) +
1147 TX_RING_SIZE * sizeof(struct tx_desc),
1148 &ring_dma,
1149 GFP_ATOMIC);
1150 if (!ring) {
1151 netdev_err(dev, "Could not allocate DMA memory\n");
1152 return -ENOMEM;
1153 }
1154 if (rp->quirks & rqRhineI) {
1155 rp->tx_bufs = dma_alloc_coherent(hwdev,
1156 PKT_BUF_SZ * TX_RING_SIZE,
1157 &rp->tx_bufs_dma,
1158 GFP_ATOMIC);
1159 if (rp->tx_bufs == NULL) {
1160 dma_free_coherent(hwdev,
1161 RX_RING_SIZE * sizeof(struct rx_desc) +
1162 TX_RING_SIZE * sizeof(struct tx_desc),
1163 ring, ring_dma);
1164 return -ENOMEM;
1165 }
1166 }
1167
1168 rp->rx_ring = ring;
1169 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1170 rp->rx_ring_dma = ring_dma;
1171 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1172
1173 return 0;
1174 }
1175
1176 static void free_ring(struct net_device* dev)
1177 {
1178 struct rhine_private *rp = netdev_priv(dev);
1179 struct device *hwdev = dev->dev.parent;
1180
1181 dma_free_coherent(hwdev,
1182 RX_RING_SIZE * sizeof(struct rx_desc) +
1183 TX_RING_SIZE * sizeof(struct tx_desc),
1184 rp->rx_ring, rp->rx_ring_dma);
1185 rp->tx_ring = NULL;
1186
1187 if (rp->tx_bufs)
1188 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1189 rp->tx_bufs, rp->tx_bufs_dma);
1190
1191 rp->tx_bufs = NULL;
1192
1193 }
1194
1195 struct rhine_skb_dma {
1196 struct sk_buff *skb;
1197 dma_addr_t dma;
1198 };
1199
1200 static inline int rhine_skb_dma_init(struct net_device *dev,
1201 struct rhine_skb_dma *sd)
1202 {
1203 struct rhine_private *rp = netdev_priv(dev);
1204 struct device *hwdev = dev->dev.parent;
1205 const int size = rp->rx_buf_sz;
1206
1207 sd->skb = netdev_alloc_skb(dev, size);
1208 if (!sd->skb)
1209 return -ENOMEM;
1210
1211 sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1212 if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1213 netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1214 dev_kfree_skb_any(sd->skb);
1215 return -EIO;
1216 }
1217
1218 return 0;
1219 }
1220
1221 static void rhine_reset_rbufs(struct rhine_private *rp)
1222 {
1223 int i;
1224
1225 rp->cur_rx = 0;
1226
1227 for (i = 0; i < RX_RING_SIZE; i++)
1228 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1229 }
1230
1231 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1232 struct rhine_skb_dma *sd, int entry)
1233 {
1234 rp->rx_skbuff_dma[entry] = sd->dma;
1235 rp->rx_skbuff[entry] = sd->skb;
1236
1237 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1238 dma_wmb();
1239 }
1240
1241 static void free_rbufs(struct net_device* dev);
1242
1243 static int alloc_rbufs(struct net_device *dev)
1244 {
1245 struct rhine_private *rp = netdev_priv(dev);
1246 dma_addr_t next;
1247 int rc, i;
1248
1249 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1250 next = rp->rx_ring_dma;
1251
1252
1253 for (i = 0; i < RX_RING_SIZE; i++) {
1254 rp->rx_ring[i].rx_status = 0;
1255 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1256 next += sizeof(struct rx_desc);
1257 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1258 rp->rx_skbuff[i] = NULL;
1259 }
1260
1261 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1262
1263
1264 for (i = 0; i < RX_RING_SIZE; i++) {
1265 struct rhine_skb_dma sd;
1266
1267 rc = rhine_skb_dma_init(dev, &sd);
1268 if (rc < 0) {
1269 free_rbufs(dev);
1270 goto out;
1271 }
1272
1273 rhine_skb_dma_nic_store(rp, &sd, i);
1274 }
1275
1276 rhine_reset_rbufs(rp);
1277 out:
1278 return rc;
1279 }
1280
1281 static void free_rbufs(struct net_device* dev)
1282 {
1283 struct rhine_private *rp = netdev_priv(dev);
1284 struct device *hwdev = dev->dev.parent;
1285 int i;
1286
1287
1288 for (i = 0; i < RX_RING_SIZE; i++) {
1289 rp->rx_ring[i].rx_status = 0;
1290 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
1291 if (rp->rx_skbuff[i]) {
1292 dma_unmap_single(hwdev,
1293 rp->rx_skbuff_dma[i],
1294 rp->rx_buf_sz, DMA_FROM_DEVICE);
1295 dev_kfree_skb(rp->rx_skbuff[i]);
1296 }
1297 rp->rx_skbuff[i] = NULL;
1298 }
1299 }
1300
1301 static void alloc_tbufs(struct net_device* dev)
1302 {
1303 struct rhine_private *rp = netdev_priv(dev);
1304 dma_addr_t next;
1305 int i;
1306
1307 rp->dirty_tx = rp->cur_tx = 0;
1308 next = rp->tx_ring_dma;
1309 for (i = 0; i < TX_RING_SIZE; i++) {
1310 rp->tx_skbuff[i] = NULL;
1311 rp->tx_ring[i].tx_status = 0;
1312 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1313 next += sizeof(struct tx_desc);
1314 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1315 if (rp->quirks & rqRhineI)
1316 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1317 }
1318 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1319
1320 netdev_reset_queue(dev);
1321 }
1322
1323 static void free_tbufs(struct net_device* dev)
1324 {
1325 struct rhine_private *rp = netdev_priv(dev);
1326 struct device *hwdev = dev->dev.parent;
1327 int i;
1328
1329 for (i = 0; i < TX_RING_SIZE; i++) {
1330 rp->tx_ring[i].tx_status = 0;
1331 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1332 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0);
1333 if (rp->tx_skbuff[i]) {
1334 if (rp->tx_skbuff_dma[i]) {
1335 dma_unmap_single(hwdev,
1336 rp->tx_skbuff_dma[i],
1337 rp->tx_skbuff[i]->len,
1338 DMA_TO_DEVICE);
1339 }
1340 dev_kfree_skb(rp->tx_skbuff[i]);
1341 }
1342 rp->tx_skbuff[i] = NULL;
1343 rp->tx_buf[i] = NULL;
1344 }
1345 }
1346
1347 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1348 {
1349 struct rhine_private *rp = netdev_priv(dev);
1350 void __iomem *ioaddr = rp->base;
1351
1352 if (!rp->mii_if.force_media)
1353 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1354
1355 if (rp->mii_if.full_duplex)
1356 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1357 ioaddr + ChipCmd1);
1358 else
1359 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1360 ioaddr + ChipCmd1);
1361
1362 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1363 rp->mii_if.force_media, netif_carrier_ok(dev));
1364 }
1365
1366
1367 static void rhine_set_carrier(struct mii_if_info *mii)
1368 {
1369 struct net_device *dev = mii->dev;
1370 struct rhine_private *rp = netdev_priv(dev);
1371
1372 if (mii->force_media) {
1373
1374 if (!netif_carrier_ok(dev))
1375 netif_carrier_on(dev);
1376 }
1377
1378 rhine_check_media(dev, 0);
1379
1380 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1381 mii->force_media, netif_carrier_ok(dev));
1382 }
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1393 {
1394 int i;
1395
1396 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1397 wmb();
1398
1399
1400 idx &= (MCAM_SIZE - 1);
1401
1402 iowrite8((u8) idx, ioaddr + CamAddr);
1403
1404 for (i = 0; i < 6; i++, addr++)
1405 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1406 udelay(10);
1407 wmb();
1408
1409 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1410 udelay(10);
1411
1412 iowrite8(0, ioaddr + CamCon);
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1424 {
1425 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1426 wmb();
1427
1428
1429 idx &= (VCAM_SIZE - 1);
1430
1431 iowrite8((u8) idx, ioaddr + CamAddr);
1432
1433 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1434 udelay(10);
1435 wmb();
1436
1437 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1438 udelay(10);
1439
1440 iowrite8(0, ioaddr + CamCon);
1441 }
1442
1443
1444
1445
1446
1447
1448
1449
1450 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1451 {
1452 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1453 wmb();
1454
1455
1456 iowrite32(mask, ioaddr + CamMask);
1457
1458
1459 iowrite8(0, ioaddr + CamCon);
1460 }
1461
1462
1463
1464
1465
1466
1467
1468
1469 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1470 {
1471 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1472 wmb();
1473
1474
1475 iowrite32(mask, ioaddr + CamMask);
1476
1477
1478 iowrite8(0, ioaddr + CamCon);
1479 }
1480
1481
1482
1483
1484
1485
1486
1487
1488 static void rhine_init_cam_filter(struct net_device *dev)
1489 {
1490 struct rhine_private *rp = netdev_priv(dev);
1491 void __iomem *ioaddr = rp->base;
1492
1493
1494 rhine_set_vlan_cam_mask(ioaddr, 0);
1495 rhine_set_cam_mask(ioaddr, 0);
1496
1497
1498 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1499 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1500 }
1501
1502
1503
1504
1505
1506
1507
1508 static void rhine_update_vcam(struct net_device *dev)
1509 {
1510 struct rhine_private *rp = netdev_priv(dev);
1511 void __iomem *ioaddr = rp->base;
1512 u16 vid;
1513 u32 vCAMmask = 0;
1514 unsigned int i = 0;
1515
1516 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1517 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1518 vCAMmask |= 1 << i;
1519 if (++i >= VCAM_SIZE)
1520 break;
1521 }
1522 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1523 }
1524
1525 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1526 {
1527 struct rhine_private *rp = netdev_priv(dev);
1528
1529 spin_lock_bh(&rp->lock);
1530 set_bit(vid, rp->active_vlans);
1531 rhine_update_vcam(dev);
1532 spin_unlock_bh(&rp->lock);
1533 return 0;
1534 }
1535
1536 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1537 {
1538 struct rhine_private *rp = netdev_priv(dev);
1539
1540 spin_lock_bh(&rp->lock);
1541 clear_bit(vid, rp->active_vlans);
1542 rhine_update_vcam(dev);
1543 spin_unlock_bh(&rp->lock);
1544 return 0;
1545 }
1546
1547 static void init_registers(struct net_device *dev)
1548 {
1549 struct rhine_private *rp = netdev_priv(dev);
1550 void __iomem *ioaddr = rp->base;
1551 int i;
1552
1553 for (i = 0; i < 6; i++)
1554 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1555
1556
1557 iowrite16(0x0006, ioaddr + PCIBusConfig);
1558
1559 iowrite8(0x20, ioaddr + TxConfig);
1560 rp->tx_thresh = 0x20;
1561 rp->rx_thresh = 0x60;
1562
1563 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1564 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1565
1566 rhine_set_rx_mode(dev);
1567
1568 if (rp->quirks & rqMgmt)
1569 rhine_init_cam_filter(dev);
1570
1571 napi_enable(&rp->napi);
1572
1573 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1574
1575 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1576 ioaddr + ChipCmd);
1577 rhine_check_media(dev, 1);
1578 }
1579
1580
1581 static void rhine_enable_linkmon(struct rhine_private *rp)
1582 {
1583 void __iomem *ioaddr = rp->base;
1584
1585 iowrite8(0, ioaddr + MIICmd);
1586 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1587 iowrite8(0x80, ioaddr + MIICmd);
1588
1589 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1590
1591 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1592 }
1593
1594
1595 static void rhine_disable_linkmon(struct rhine_private *rp)
1596 {
1597 void __iomem *ioaddr = rp->base;
1598
1599 iowrite8(0, ioaddr + MIICmd);
1600
1601 if (rp->quirks & rqRhineI) {
1602 iowrite8(0x01, ioaddr + MIIRegAddr);
1603
1604
1605 mdelay(1);
1606
1607
1608 iowrite8(0x80, ioaddr + MIICmd);
1609
1610 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1611
1612
1613 iowrite8(0, ioaddr + MIICmd);
1614 }
1615 else
1616 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1617 }
1618
1619
1620
1621 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1622 {
1623 struct rhine_private *rp = netdev_priv(dev);
1624 void __iomem *ioaddr = rp->base;
1625 int result;
1626
1627 rhine_disable_linkmon(rp);
1628
1629
1630 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1631 iowrite8(regnum, ioaddr + MIIRegAddr);
1632 iowrite8(0x40, ioaddr + MIICmd);
1633 rhine_wait_bit_low(rp, MIICmd, 0x40);
1634 result = ioread16(ioaddr + MIIData);
1635
1636 rhine_enable_linkmon(rp);
1637 return result;
1638 }
1639
1640 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1641 {
1642 struct rhine_private *rp = netdev_priv(dev);
1643 void __iomem *ioaddr = rp->base;
1644
1645 rhine_disable_linkmon(rp);
1646
1647
1648 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1649 iowrite8(regnum, ioaddr + MIIRegAddr);
1650 iowrite16(value, ioaddr + MIIData);
1651 iowrite8(0x20, ioaddr + MIICmd);
1652 rhine_wait_bit_low(rp, MIICmd, 0x20);
1653
1654 rhine_enable_linkmon(rp);
1655 }
1656
1657 static void rhine_task_disable(struct rhine_private *rp)
1658 {
1659 mutex_lock(&rp->task_lock);
1660 rp->task_enable = false;
1661 mutex_unlock(&rp->task_lock);
1662
1663 cancel_work_sync(&rp->slow_event_task);
1664 cancel_work_sync(&rp->reset_task);
1665 }
1666
1667 static void rhine_task_enable(struct rhine_private *rp)
1668 {
1669 mutex_lock(&rp->task_lock);
1670 rp->task_enable = true;
1671 mutex_unlock(&rp->task_lock);
1672 }
1673
1674 static int rhine_open(struct net_device *dev)
1675 {
1676 struct rhine_private *rp = netdev_priv(dev);
1677 void __iomem *ioaddr = rp->base;
1678 int rc;
1679
1680 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1681 if (rc)
1682 goto out;
1683
1684 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1685
1686 rc = alloc_ring(dev);
1687 if (rc < 0)
1688 goto out_free_irq;
1689
1690 rc = alloc_rbufs(dev);
1691 if (rc < 0)
1692 goto out_free_ring;
1693
1694 alloc_tbufs(dev);
1695 enable_mmio(rp->pioaddr, rp->quirks);
1696 rhine_power_init(dev);
1697 rhine_chip_reset(dev);
1698 rhine_task_enable(rp);
1699 init_registers(dev);
1700
1701 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1702 __func__, ioread16(ioaddr + ChipCmd),
1703 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1704
1705 netif_start_queue(dev);
1706
1707 out:
1708 return rc;
1709
1710 out_free_ring:
1711 free_ring(dev);
1712 out_free_irq:
1713 free_irq(rp->irq, dev);
1714 goto out;
1715 }
1716
1717 static void rhine_reset_task(struct work_struct *work)
1718 {
1719 struct rhine_private *rp = container_of(work, struct rhine_private,
1720 reset_task);
1721 struct net_device *dev = rp->dev;
1722
1723 mutex_lock(&rp->task_lock);
1724
1725 if (!rp->task_enable)
1726 goto out_unlock;
1727
1728 napi_disable(&rp->napi);
1729 netif_tx_disable(dev);
1730 spin_lock_bh(&rp->lock);
1731
1732
1733 free_tbufs(dev);
1734 alloc_tbufs(dev);
1735
1736 rhine_reset_rbufs(rp);
1737
1738
1739 rhine_chip_reset(dev);
1740 init_registers(dev);
1741
1742 spin_unlock_bh(&rp->lock);
1743
1744 netif_trans_update(dev);
1745 dev->stats.tx_errors++;
1746 netif_wake_queue(dev);
1747
1748 out_unlock:
1749 mutex_unlock(&rp->task_lock);
1750 }
1751
1752 static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
1753 {
1754 struct rhine_private *rp = netdev_priv(dev);
1755 void __iomem *ioaddr = rp->base;
1756
1757 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1758 ioread16(ioaddr + IntrStatus),
1759 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1760
1761 schedule_work(&rp->reset_task);
1762 }
1763
1764 static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1765 {
1766 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1767 }
1768
1769 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1770 struct net_device *dev)
1771 {
1772 struct rhine_private *rp = netdev_priv(dev);
1773 struct device *hwdev = dev->dev.parent;
1774 void __iomem *ioaddr = rp->base;
1775 unsigned entry;
1776
1777
1778
1779
1780
1781 entry = rp->cur_tx % TX_RING_SIZE;
1782
1783 if (skb_padto(skb, ETH_ZLEN))
1784 return NETDEV_TX_OK;
1785
1786 rp->tx_skbuff[entry] = skb;
1787
1788 if ((rp->quirks & rqRhineI) &&
1789 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1790
1791 if (skb->len > PKT_BUF_SZ) {
1792
1793 dev_kfree_skb_any(skb);
1794 rp->tx_skbuff[entry] = NULL;
1795 dev->stats.tx_dropped++;
1796 return NETDEV_TX_OK;
1797 }
1798
1799
1800 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1801 if (skb->len < ETH_ZLEN)
1802 memset(rp->tx_buf[entry] + skb->len, 0,
1803 ETH_ZLEN - skb->len);
1804 rp->tx_skbuff_dma[entry] = 0;
1805 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1806 (rp->tx_buf[entry] -
1807 rp->tx_bufs));
1808 } else {
1809 rp->tx_skbuff_dma[entry] =
1810 dma_map_single(hwdev, skb->data, skb->len,
1811 DMA_TO_DEVICE);
1812 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1813 dev_kfree_skb_any(skb);
1814 rp->tx_skbuff_dma[entry] = 0;
1815 dev->stats.tx_dropped++;
1816 return NETDEV_TX_OK;
1817 }
1818 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1819 }
1820
1821 rp->tx_ring[entry].desc_length =
1822 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1823
1824 if (unlikely(skb_vlan_tag_present(skb))) {
1825 u16 vid_pcp = skb_vlan_tag_get(skb);
1826
1827
1828 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1829 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1830 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1831
1832 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1833 }
1834 else
1835 rp->tx_ring[entry].tx_status = 0;
1836
1837 netdev_sent_queue(dev, skb->len);
1838
1839 dma_wmb();
1840 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1841 wmb();
1842
1843 rp->cur_tx++;
1844
1845
1846
1847
1848
1849 smp_wmb();
1850
1851
1852
1853 if (skb_vlan_tag_present(skb))
1854
1855 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1856
1857
1858 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1859 ioaddr + ChipCmd1);
1860 IOSYNC;
1861
1862
1863 if (rhine_tx_queue_full(rp)) {
1864 netif_stop_queue(dev);
1865 smp_rmb();
1866
1867 if (!rhine_tx_queue_full(rp))
1868 netif_wake_queue(dev);
1869 }
1870
1871 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1872 rp->cur_tx - 1, entry);
1873
1874 return NETDEV_TX_OK;
1875 }
1876
1877 static void rhine_irq_disable(struct rhine_private *rp)
1878 {
1879 iowrite16(0x0000, rp->base + IntrEnable);
1880 }
1881
1882
1883
1884 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1885 {
1886 struct net_device *dev = dev_instance;
1887 struct rhine_private *rp = netdev_priv(dev);
1888 u32 status;
1889 int handled = 0;
1890
1891 status = rhine_get_events(rp);
1892
1893 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1894
1895 if (status & RHINE_EVENT) {
1896 handled = 1;
1897
1898 rhine_irq_disable(rp);
1899 napi_schedule(&rp->napi);
1900 }
1901
1902 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1903 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1904 status);
1905 }
1906
1907 return IRQ_RETVAL(handled);
1908 }
1909
1910
1911
1912 static void rhine_tx(struct net_device *dev)
1913 {
1914 struct rhine_private *rp = netdev_priv(dev);
1915 struct device *hwdev = dev->dev.parent;
1916 unsigned int pkts_compl = 0, bytes_compl = 0;
1917 unsigned int dirty_tx = rp->dirty_tx;
1918 unsigned int cur_tx;
1919 struct sk_buff *skb;
1920
1921
1922
1923
1924
1925
1926
1927 smp_rmb();
1928 cur_tx = rp->cur_tx;
1929
1930 while (dirty_tx != cur_tx) {
1931 unsigned int entry = dirty_tx % TX_RING_SIZE;
1932 u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1933
1934 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1935 entry, txstatus);
1936 if (txstatus & DescOwn)
1937 break;
1938 skb = rp->tx_skbuff[entry];
1939 if (txstatus & 0x8000) {
1940 netif_dbg(rp, tx_done, dev,
1941 "Transmit error, Tx status %08x\n", txstatus);
1942 dev->stats.tx_errors++;
1943 if (txstatus & 0x0400)
1944 dev->stats.tx_carrier_errors++;
1945 if (txstatus & 0x0200)
1946 dev->stats.tx_window_errors++;
1947 if (txstatus & 0x0100)
1948 dev->stats.tx_aborted_errors++;
1949 if (txstatus & 0x0080)
1950 dev->stats.tx_heartbeat_errors++;
1951 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1952 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1953 dev->stats.tx_fifo_errors++;
1954 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1955 break;
1956 }
1957
1958 } else {
1959 if (rp->quirks & rqRhineI)
1960 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1961 else
1962 dev->stats.collisions += txstatus & 0x0F;
1963 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1964 (txstatus >> 3) & 0xF, txstatus & 0xF);
1965
1966 u64_stats_update_begin(&rp->tx_stats.syncp);
1967 rp->tx_stats.bytes += skb->len;
1968 rp->tx_stats.packets++;
1969 u64_stats_update_end(&rp->tx_stats.syncp);
1970 }
1971
1972 if (rp->tx_skbuff_dma[entry]) {
1973 dma_unmap_single(hwdev,
1974 rp->tx_skbuff_dma[entry],
1975 skb->len,
1976 DMA_TO_DEVICE);
1977 }
1978 bytes_compl += skb->len;
1979 pkts_compl++;
1980 dev_consume_skb_any(skb);
1981 rp->tx_skbuff[entry] = NULL;
1982 dirty_tx++;
1983 }
1984
1985 rp->dirty_tx = dirty_tx;
1986
1987 smp_wmb();
1988
1989 netdev_completed_queue(dev, pkts_compl, bytes_compl);
1990
1991
1992 if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
1993 netif_wake_queue(dev);
1994 smp_rmb();
1995
1996 if (rhine_tx_queue_full(rp))
1997 netif_stop_queue(dev);
1998 }
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2011 {
2012 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2013 return be16_to_cpup((__be16 *)trailer);
2014 }
2015
2016 static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2017 int data_size)
2018 {
2019 dma_rmb();
2020 if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2021 u16 vlan_tci;
2022
2023 vlan_tci = rhine_get_vlan_tci(skb, data_size);
2024 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2025 }
2026 }
2027
2028
2029 static int rhine_rx(struct net_device *dev, int limit)
2030 {
2031 struct rhine_private *rp = netdev_priv(dev);
2032 struct device *hwdev = dev->dev.parent;
2033 int entry = rp->cur_rx % RX_RING_SIZE;
2034 int count;
2035
2036 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2037 entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2038
2039
2040 for (count = 0; count < limit; ++count) {
2041 struct rx_desc *desc = rp->rx_ring + entry;
2042 u32 desc_status = le32_to_cpu(desc->rx_status);
2043 int data_size = desc_status >> 16;
2044
2045 if (desc_status & DescOwn)
2046 break;
2047
2048 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2049 desc_status);
2050
2051 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2052 if ((desc_status & RxWholePkt) != RxWholePkt) {
2053 netdev_warn(dev,
2054 "Oversized Ethernet frame spanned multiple buffers, "
2055 "entry %#x length %d status %08x!\n",
2056 entry, data_size,
2057 desc_status);
2058 dev->stats.rx_length_errors++;
2059 } else if (desc_status & RxErr) {
2060
2061 netif_dbg(rp, rx_err, dev,
2062 "%s() Rx error %08x\n", __func__,
2063 desc_status);
2064 dev->stats.rx_errors++;
2065 if (desc_status & 0x0030)
2066 dev->stats.rx_length_errors++;
2067 if (desc_status & 0x0048)
2068 dev->stats.rx_fifo_errors++;
2069 if (desc_status & 0x0004)
2070 dev->stats.rx_frame_errors++;
2071 if (desc_status & 0x0002) {
2072
2073 spin_lock(&rp->lock);
2074 dev->stats.rx_crc_errors++;
2075 spin_unlock(&rp->lock);
2076 }
2077 }
2078 } else {
2079
2080 int pkt_len = data_size - 4;
2081 struct sk_buff *skb;
2082
2083
2084
2085 if (pkt_len < rx_copybreak) {
2086 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2087 if (unlikely(!skb))
2088 goto drop;
2089
2090 dma_sync_single_for_cpu(hwdev,
2091 rp->rx_skbuff_dma[entry],
2092 rp->rx_buf_sz,
2093 DMA_FROM_DEVICE);
2094
2095 skb_copy_to_linear_data(skb,
2096 rp->rx_skbuff[entry]->data,
2097 pkt_len);
2098
2099 dma_sync_single_for_device(hwdev,
2100 rp->rx_skbuff_dma[entry],
2101 rp->rx_buf_sz,
2102 DMA_FROM_DEVICE);
2103 } else {
2104 struct rhine_skb_dma sd;
2105
2106 if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2107 goto drop;
2108
2109 skb = rp->rx_skbuff[entry];
2110
2111 dma_unmap_single(hwdev,
2112 rp->rx_skbuff_dma[entry],
2113 rp->rx_buf_sz,
2114 DMA_FROM_DEVICE);
2115 rhine_skb_dma_nic_store(rp, &sd, entry);
2116 }
2117
2118 skb_put(skb, pkt_len);
2119
2120 rhine_rx_vlan_tag(skb, desc, data_size);
2121
2122 skb->protocol = eth_type_trans(skb, dev);
2123
2124 netif_receive_skb(skb);
2125
2126 u64_stats_update_begin(&rp->rx_stats.syncp);
2127 rp->rx_stats.bytes += pkt_len;
2128 rp->rx_stats.packets++;
2129 u64_stats_update_end(&rp->rx_stats.syncp);
2130 }
2131 give_descriptor_to_nic:
2132 desc->rx_status = cpu_to_le32(DescOwn);
2133 entry = (++rp->cur_rx) % RX_RING_SIZE;
2134 }
2135
2136 return count;
2137
2138 drop:
2139 dev->stats.rx_dropped++;
2140 goto give_descriptor_to_nic;
2141 }
2142
2143 static void rhine_restart_tx(struct net_device *dev) {
2144 struct rhine_private *rp = netdev_priv(dev);
2145 void __iomem *ioaddr = rp->base;
2146 int entry = rp->dirty_tx % TX_RING_SIZE;
2147 u32 intr_status;
2148
2149
2150
2151
2152
2153 intr_status = rhine_get_events(rp);
2154
2155 if ((intr_status & IntrTxErrSummary) == 0) {
2156
2157
2158 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2159 ioaddr + TxRingPtr);
2160
2161 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2162 ioaddr + ChipCmd);
2163
2164 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2165
2166 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2167
2168 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2169 ioaddr + ChipCmd1);
2170 IOSYNC;
2171 }
2172 else {
2173
2174 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2175 intr_status);
2176 }
2177
2178 }
2179
2180 static void rhine_slow_event_task(struct work_struct *work)
2181 {
2182 struct rhine_private *rp =
2183 container_of(work, struct rhine_private, slow_event_task);
2184 struct net_device *dev = rp->dev;
2185 u32 intr_status;
2186
2187 mutex_lock(&rp->task_lock);
2188
2189 if (!rp->task_enable)
2190 goto out_unlock;
2191
2192 intr_status = rhine_get_events(rp);
2193 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2194
2195 if (intr_status & IntrLinkChange)
2196 rhine_check_media(dev, 0);
2197
2198 if (intr_status & IntrPCIErr)
2199 netif_warn(rp, hw, dev, "PCI error\n");
2200
2201 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2202
2203 out_unlock:
2204 mutex_unlock(&rp->task_lock);
2205 }
2206
2207 static void
2208 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2209 {
2210 struct rhine_private *rp = netdev_priv(dev);
2211 unsigned int start;
2212
2213 spin_lock_bh(&rp->lock);
2214 rhine_update_rx_crc_and_missed_errord(rp);
2215 spin_unlock_bh(&rp->lock);
2216
2217 netdev_stats_to_stats64(stats, &dev->stats);
2218
2219 do {
2220 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2221 stats->rx_packets = rp->rx_stats.packets;
2222 stats->rx_bytes = rp->rx_stats.bytes;
2223 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2224
2225 do {
2226 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2227 stats->tx_packets = rp->tx_stats.packets;
2228 stats->tx_bytes = rp->tx_stats.bytes;
2229 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2230 }
2231
2232 static void rhine_set_rx_mode(struct net_device *dev)
2233 {
2234 struct rhine_private *rp = netdev_priv(dev);
2235 void __iomem *ioaddr = rp->base;
2236 u32 mc_filter[2];
2237 u8 rx_mode = 0x0C;
2238 struct netdev_hw_addr *ha;
2239
2240 if (dev->flags & IFF_PROMISC) {
2241 rx_mode = 0x1C;
2242 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2243 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2244 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2245 (dev->flags & IFF_ALLMULTI)) {
2246
2247 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2248 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2249 } else if (rp->quirks & rqMgmt) {
2250 int i = 0;
2251 u32 mCAMmask = 0;
2252 netdev_for_each_mc_addr(ha, dev) {
2253 if (i == MCAM_SIZE)
2254 break;
2255 rhine_set_cam(ioaddr, i, ha->addr);
2256 mCAMmask |= 1 << i;
2257 i++;
2258 }
2259 rhine_set_cam_mask(ioaddr, mCAMmask);
2260 } else {
2261 memset(mc_filter, 0, sizeof(mc_filter));
2262 netdev_for_each_mc_addr(ha, dev) {
2263 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2264
2265 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2266 }
2267 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2268 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2269 }
2270
2271 if (rp->quirks & rqMgmt) {
2272 if (dev->flags & IFF_PROMISC)
2273 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2274 else
2275 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2276 }
2277 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2278 }
2279
2280 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2281 {
2282 struct device *hwdev = dev->dev.parent;
2283
2284 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2285 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2286 }
2287
2288 static int netdev_get_link_ksettings(struct net_device *dev,
2289 struct ethtool_link_ksettings *cmd)
2290 {
2291 struct rhine_private *rp = netdev_priv(dev);
2292
2293 mutex_lock(&rp->task_lock);
2294 mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2295 mutex_unlock(&rp->task_lock);
2296
2297 return 0;
2298 }
2299
2300 static int netdev_set_link_ksettings(struct net_device *dev,
2301 const struct ethtool_link_ksettings *cmd)
2302 {
2303 struct rhine_private *rp = netdev_priv(dev);
2304 int rc;
2305
2306 mutex_lock(&rp->task_lock);
2307 rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2308 rhine_set_carrier(&rp->mii_if);
2309 mutex_unlock(&rp->task_lock);
2310
2311 return rc;
2312 }
2313
2314 static int netdev_nway_reset(struct net_device *dev)
2315 {
2316 struct rhine_private *rp = netdev_priv(dev);
2317
2318 return mii_nway_restart(&rp->mii_if);
2319 }
2320
2321 static u32 netdev_get_link(struct net_device *dev)
2322 {
2323 struct rhine_private *rp = netdev_priv(dev);
2324
2325 return mii_link_ok(&rp->mii_if);
2326 }
2327
2328 static u32 netdev_get_msglevel(struct net_device *dev)
2329 {
2330 struct rhine_private *rp = netdev_priv(dev);
2331
2332 return rp->msg_enable;
2333 }
2334
2335 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2336 {
2337 struct rhine_private *rp = netdev_priv(dev);
2338
2339 rp->msg_enable = value;
2340 }
2341
2342 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2343 {
2344 struct rhine_private *rp = netdev_priv(dev);
2345
2346 if (!(rp->quirks & rqWOL))
2347 return;
2348
2349 spin_lock_irq(&rp->lock);
2350 wol->supported = WAKE_PHY | WAKE_MAGIC |
2351 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2352 wol->wolopts = rp->wolopts;
2353 spin_unlock_irq(&rp->lock);
2354 }
2355
2356 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2357 {
2358 struct rhine_private *rp = netdev_priv(dev);
2359 u32 support = WAKE_PHY | WAKE_MAGIC |
2360 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2361
2362 if (!(rp->quirks & rqWOL))
2363 return -EINVAL;
2364
2365 if (wol->wolopts & ~support)
2366 return -EINVAL;
2367
2368 spin_lock_irq(&rp->lock);
2369 rp->wolopts = wol->wolopts;
2370 spin_unlock_irq(&rp->lock);
2371
2372 return 0;
2373 }
2374
2375 static const struct ethtool_ops netdev_ethtool_ops = {
2376 .get_drvinfo = netdev_get_drvinfo,
2377 .nway_reset = netdev_nway_reset,
2378 .get_link = netdev_get_link,
2379 .get_msglevel = netdev_get_msglevel,
2380 .set_msglevel = netdev_set_msglevel,
2381 .get_wol = rhine_get_wol,
2382 .set_wol = rhine_set_wol,
2383 .get_link_ksettings = netdev_get_link_ksettings,
2384 .set_link_ksettings = netdev_set_link_ksettings,
2385 };
2386
2387 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2388 {
2389 struct rhine_private *rp = netdev_priv(dev);
2390 int rc;
2391
2392 if (!netif_running(dev))
2393 return -EINVAL;
2394
2395 mutex_lock(&rp->task_lock);
2396 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2397 rhine_set_carrier(&rp->mii_if);
2398 mutex_unlock(&rp->task_lock);
2399
2400 return rc;
2401 }
2402
2403 static int rhine_close(struct net_device *dev)
2404 {
2405 struct rhine_private *rp = netdev_priv(dev);
2406 void __iomem *ioaddr = rp->base;
2407
2408 rhine_task_disable(rp);
2409 napi_disable(&rp->napi);
2410 netif_stop_queue(dev);
2411
2412 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2413 ioread16(ioaddr + ChipCmd));
2414
2415
2416 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2417
2418 rhine_irq_disable(rp);
2419
2420
2421 iowrite16(CmdStop, ioaddr + ChipCmd);
2422
2423 free_irq(rp->irq, dev);
2424 free_rbufs(dev);
2425 free_tbufs(dev);
2426 free_ring(dev);
2427
2428 return 0;
2429 }
2430
2431
2432 static void rhine_remove_one_pci(struct pci_dev *pdev)
2433 {
2434 struct net_device *dev = pci_get_drvdata(pdev);
2435 struct rhine_private *rp = netdev_priv(dev);
2436
2437 unregister_netdev(dev);
2438
2439 pci_iounmap(pdev, rp->base);
2440 pci_release_regions(pdev);
2441
2442 free_netdev(dev);
2443 pci_disable_device(pdev);
2444 }
2445
2446 static int rhine_remove_one_platform(struct platform_device *pdev)
2447 {
2448 struct net_device *dev = platform_get_drvdata(pdev);
2449 struct rhine_private *rp = netdev_priv(dev);
2450
2451 unregister_netdev(dev);
2452
2453 iounmap(rp->base);
2454
2455 free_netdev(dev);
2456
2457 return 0;
2458 }
2459
2460 static void rhine_shutdown_pci(struct pci_dev *pdev)
2461 {
2462 struct net_device *dev = pci_get_drvdata(pdev);
2463 struct rhine_private *rp = netdev_priv(dev);
2464 void __iomem *ioaddr = rp->base;
2465
2466 if (!(rp->quirks & rqWOL))
2467 return;
2468
2469 rhine_power_init(dev);
2470
2471
2472 if (rp->quirks & rq6patterns)
2473 iowrite8(0x04, ioaddr + WOLcgClr);
2474
2475 spin_lock(&rp->lock);
2476
2477 if (rp->wolopts & WAKE_MAGIC) {
2478 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2479
2480
2481
2482
2483 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2484 }
2485
2486 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2487 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2488
2489 if (rp->wolopts & WAKE_PHY)
2490 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2491
2492 if (rp->wolopts & WAKE_UCAST)
2493 iowrite8(WOLucast, ioaddr + WOLcrSet);
2494
2495 if (rp->wolopts) {
2496
2497 iowrite8(0x01, ioaddr + PwcfgSet);
2498 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2499 }
2500
2501 spin_unlock(&rp->lock);
2502
2503 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2504 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2505
2506 pci_wake_from_d3(pdev, true);
2507 pci_set_power_state(pdev, PCI_D3hot);
2508 }
2509 }
2510
2511 #ifdef CONFIG_PM_SLEEP
2512 static int rhine_suspend(struct device *device)
2513 {
2514 struct net_device *dev = dev_get_drvdata(device);
2515 struct rhine_private *rp = netdev_priv(dev);
2516
2517 if (!netif_running(dev))
2518 return 0;
2519
2520 rhine_task_disable(rp);
2521 rhine_irq_disable(rp);
2522 napi_disable(&rp->napi);
2523
2524 netif_device_detach(dev);
2525
2526 if (dev_is_pci(device))
2527 rhine_shutdown_pci(to_pci_dev(device));
2528
2529 return 0;
2530 }
2531
2532 static int rhine_resume(struct device *device)
2533 {
2534 struct net_device *dev = dev_get_drvdata(device);
2535 struct rhine_private *rp = netdev_priv(dev);
2536
2537 if (!netif_running(dev))
2538 return 0;
2539
2540 enable_mmio(rp->pioaddr, rp->quirks);
2541 rhine_power_init(dev);
2542 free_tbufs(dev);
2543 alloc_tbufs(dev);
2544 rhine_reset_rbufs(rp);
2545 rhine_task_enable(rp);
2546 spin_lock_bh(&rp->lock);
2547 init_registers(dev);
2548 spin_unlock_bh(&rp->lock);
2549
2550 netif_device_attach(dev);
2551
2552 return 0;
2553 }
2554
2555 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2556 #define RHINE_PM_OPS (&rhine_pm_ops)
2557
2558 #else
2559
2560 #define RHINE_PM_OPS NULL
2561
2562 #endif
2563
2564 static struct pci_driver rhine_driver_pci = {
2565 .name = DRV_NAME,
2566 .id_table = rhine_pci_tbl,
2567 .probe = rhine_init_one_pci,
2568 .remove = rhine_remove_one_pci,
2569 .shutdown = rhine_shutdown_pci,
2570 .driver.pm = RHINE_PM_OPS,
2571 };
2572
2573 static struct platform_driver rhine_driver_platform = {
2574 .probe = rhine_init_one_platform,
2575 .remove = rhine_remove_one_platform,
2576 .driver = {
2577 .name = DRV_NAME,
2578 .of_match_table = rhine_of_tbl,
2579 .pm = RHINE_PM_OPS,
2580 }
2581 };
2582
2583 static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2584 {
2585 .ident = "EPIA-M",
2586 .matches = {
2587 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2588 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2589 },
2590 },
2591 {
2592 .ident = "KV7",
2593 .matches = {
2594 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2595 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2596 },
2597 },
2598 { NULL }
2599 };
2600
2601 static int __init rhine_init(void)
2602 {
2603 int ret_pci, ret_platform;
2604
2605
2606 if (dmi_check_system(rhine_dmi_table)) {
2607
2608 avoid_D3 = true;
2609 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2610 }
2611 else if (avoid_D3)
2612 pr_info("avoid_D3 set\n");
2613
2614 ret_pci = pci_register_driver(&rhine_driver_pci);
2615 ret_platform = platform_driver_register(&rhine_driver_platform);
2616 if ((ret_pci < 0) && (ret_platform < 0))
2617 return ret_pci;
2618
2619 return 0;
2620 }
2621
2622
2623 static void __exit rhine_cleanup(void)
2624 {
2625 platform_driver_unregister(&rhine_driver_platform);
2626 pci_unregister_driver(&rhine_driver_pci);
2627 }
2628
2629
2630 module_init(rhine_init);
2631 module_exit(rhine_cleanup);