0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/module.h>
0031 #include <linux/kernel.h>
0032 #include <linux/string.h>
0033 #include <linux/timer.h>
0034 #include <linux/errno.h>
0035 #include <linux/ioport.h>
0036 #include <linux/slab.h>
0037 #include <linux/interrupt.h>
0038 #include <linux/pci.h>
0039 #include <linux/netdevice.h>
0040 #include <linux/etherdevice.h>
0041 #include <linux/skbuff.h>
0042 #include <linux/init.h>
0043 #include <linux/spinlock.h>
0044 #include <linux/ethtool.h>
0045 #include <linux/delay.h>
0046 #include <linux/rtnetlink.h>
0047 #include <linux/mii.h>
0048 #include <linux/crc32.h>
0049 #include <linux/bitops.h>
0050 #include <linux/prefetch.h>
0051 #include <asm/processor.h> /* Processor type for cache alignment. */
0052 #include <asm/io.h>
0053 #include <asm/irq.h>
0054 #include <linux/uaccess.h>
0055
0056 #define DRV_NAME "natsemi"
0057 #define DRV_VERSION "2.1"
0058 #define DRV_RELDATE "Sept 11, 2006"
0059
0060 #define RX_OFFSET 2
0061
0062
0063
0064
0065
0066
0067 #define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
0068 NETIF_MSG_LINK | \
0069 NETIF_MSG_WOL | \
0070 NETIF_MSG_RX_ERR | \
0071 NETIF_MSG_TX_ERR)
0072 static int debug = -1;
0073
0074 static int mtu;
0075
0076
0077
0078 static const int multicast_filter_limit = 100;
0079
0080
0081
0082 static int rx_copybreak;
0083
0084 static int dspcfg_workaround = 1;
0085
0086
0087
0088
0089
0090
0091 #define MAX_UNITS 8
0092 static int options[MAX_UNITS];
0093 static int full_duplex[MAX_UNITS];
0094
0095
0096
0097
0098
0099
0100
0101
0102 #define TX_RING_SIZE 16
0103 #define TX_QUEUE_LEN 10
0104 #define RX_RING_SIZE 32
0105
0106
0107
0108 #define TX_TIMEOUT (2*HZ)
0109
0110 #define NATSEMI_HW_TIMEOUT 400
0111 #define NATSEMI_TIMER_FREQ 5*HZ
0112 #define NATSEMI_PG0_NREGS 64
0113 #define NATSEMI_RFDR_NREGS 8
0114 #define NATSEMI_PG1_NREGS 4
0115 #define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
0116 NATSEMI_PG1_NREGS)
0117 #define NATSEMI_REGS_VER 1
0118 #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
0119
0120
0121
0122
0123
0124 #define NATSEMI_HEADERS 22
0125 #define NATSEMI_PADDING 16
0126 #define NATSEMI_LONGPKT 1518
0127 #define NATSEMI_RX_LIMIT 2046
0128
0129
0130 static const char version[] =
0131 KERN_INFO DRV_NAME " dp8381x driver, version "
0132 DRV_VERSION ", " DRV_RELDATE "\n"
0133 " originally by Donald Becker <becker@scyld.com>\n"
0134 " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
0135
0136 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
0137 MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
0138 MODULE_LICENSE("GPL");
0139
0140 module_param(mtu, int, 0);
0141 module_param(debug, int, 0);
0142 module_param(rx_copybreak, int, 0);
0143 module_param(dspcfg_workaround, int, 0);
0144 module_param_array(options, int, NULL, 0);
0145 module_param_array(full_duplex, int, NULL, 0);
0146 MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
0147 MODULE_PARM_DESC(debug, "DP8381x default debug level");
0148 MODULE_PARM_DESC(rx_copybreak,
0149 "DP8381x copy breakpoint for copy-only-tiny-frames");
0150 MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
0151 MODULE_PARM_DESC(options,
0152 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
0153 MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 #define PHYID_AM79C874 0x0022561b
0229
0230 enum {
0231 MII_MCTRL = 0x15,
0232 MII_FX_SEL = 0x0001,
0233 MII_EN_SCRM = 0x0004,
0234 };
0235
0236 enum {
0237 NATSEMI_FLAG_IGNORE_PHY = 0x1,
0238 };
0239
0240
0241 static struct {
0242 const char *name;
0243 unsigned long flags;
0244 unsigned int eeprom_size;
0245 } natsemi_pci_info[] = {
0246 { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
0247 { "NatSemi DP8381[56]", 0, 24 },
0248 };
0249
0250 static const struct pci_device_id natsemi_pci_tbl[] = {
0251 { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
0252 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
0253 { }
0254 };
0255 MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
0256
0257
0258
0259
0260
0261
0262 enum register_offsets {
0263 ChipCmd = 0x00,
0264 ChipConfig = 0x04,
0265 EECtrl = 0x08,
0266 PCIBusCfg = 0x0C,
0267 IntrStatus = 0x10,
0268 IntrMask = 0x14,
0269 IntrEnable = 0x18,
0270 IntrHoldoff = 0x1C,
0271 TxRingPtr = 0x20,
0272 TxConfig = 0x24,
0273 RxRingPtr = 0x30,
0274 RxConfig = 0x34,
0275 ClkRun = 0x3C,
0276 WOLCmd = 0x40,
0277 PauseCmd = 0x44,
0278 RxFilterAddr = 0x48,
0279 RxFilterData = 0x4C,
0280 BootRomAddr = 0x50,
0281 BootRomData = 0x54,
0282 SiliconRev = 0x58,
0283 StatsCtrl = 0x5C,
0284 StatsData = 0x60,
0285 RxPktErrs = 0x60,
0286 RxMissed = 0x68,
0287 RxCRCErrs = 0x64,
0288 BasicControl = 0x80,
0289 BasicStatus = 0x84,
0290 AnegAdv = 0x90,
0291 AnegPeer = 0x94,
0292 PhyStatus = 0xC0,
0293 MIntrCtrl = 0xC4,
0294 MIntrStatus = 0xC8,
0295 PhyCtrl = 0xE4,
0296
0297
0298
0299 PGSEL = 0xCC,
0300 PMDCSR = 0xE4,
0301 TSTDAT = 0xFC,
0302 DSPCFG = 0xF4,
0303 SDCFG = 0xF8
0304 };
0305
0306 #define PMDCSR_VAL 0x189c
0307 #define TSTDAT_VAL 0x0
0308 #define DSPCFG_VAL 0x5040
0309 #define SDCFG_VAL 0x008c
0310 #define DSPCFG_LOCK 0x20
0311 #define DSPCFG_COEF 0x1000
0312 #define TSTDAT_FIXED 0xe8
0313
0314
0315 enum pci_register_offsets {
0316 PCIPM = 0x44,
0317 };
0318
0319 enum ChipCmd_bits {
0320 ChipReset = 0x100,
0321 RxReset = 0x20,
0322 TxReset = 0x10,
0323 RxOff = 0x08,
0324 RxOn = 0x04,
0325 TxOff = 0x02,
0326 TxOn = 0x01,
0327 };
0328
0329 enum ChipConfig_bits {
0330 CfgPhyDis = 0x200,
0331 CfgPhyRst = 0x400,
0332 CfgExtPhy = 0x1000,
0333 CfgAnegEnable = 0x2000,
0334 CfgAneg100 = 0x4000,
0335 CfgAnegFull = 0x8000,
0336 CfgAnegDone = 0x8000000,
0337 CfgFullDuplex = 0x20000000,
0338 CfgSpeed100 = 0x40000000,
0339 CfgLink = 0x80000000,
0340 };
0341
0342 enum EECtrl_bits {
0343 EE_ShiftClk = 0x04,
0344 EE_DataIn = 0x01,
0345 EE_ChipSelect = 0x08,
0346 EE_DataOut = 0x02,
0347 MII_Data = 0x10,
0348 MII_Write = 0x20,
0349 MII_ShiftClk = 0x40,
0350 };
0351
0352 enum PCIBusCfg_bits {
0353 EepromReload = 0x4,
0354 };
0355
0356
0357 enum IntrStatus_bits {
0358 IntrRxDone = 0x0001,
0359 IntrRxIntr = 0x0002,
0360 IntrRxErr = 0x0004,
0361 IntrRxEarly = 0x0008,
0362 IntrRxIdle = 0x0010,
0363 IntrRxOverrun = 0x0020,
0364 IntrTxDone = 0x0040,
0365 IntrTxIntr = 0x0080,
0366 IntrTxErr = 0x0100,
0367 IntrTxIdle = 0x0200,
0368 IntrTxUnderrun = 0x0400,
0369 StatsMax = 0x0800,
0370 SWInt = 0x1000,
0371 WOLPkt = 0x2000,
0372 LinkChange = 0x4000,
0373 IntrHighBits = 0x8000,
0374 RxStatusFIFOOver = 0x10000,
0375 IntrPCIErr = 0xf00000,
0376 RxResetDone = 0x1000000,
0377 TxResetDone = 0x2000000,
0378 IntrAbnormalSummary = 0xCD20,
0379 };
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390 #define DEFAULT_INTR 0x00f1cd65
0391
0392 enum TxConfig_bits {
0393 TxDrthMask = 0x3f,
0394 TxFlthMask = 0x3f00,
0395 TxMxdmaMask = 0x700000,
0396 TxMxdma_512 = 0x0,
0397 TxMxdma_4 = 0x100000,
0398 TxMxdma_8 = 0x200000,
0399 TxMxdma_16 = 0x300000,
0400 TxMxdma_32 = 0x400000,
0401 TxMxdma_64 = 0x500000,
0402 TxMxdma_128 = 0x600000,
0403 TxMxdma_256 = 0x700000,
0404 TxCollRetry = 0x800000,
0405 TxAutoPad = 0x10000000,
0406 TxMacLoop = 0x20000000,
0407 TxHeartIgn = 0x40000000,
0408 TxCarrierIgn = 0x80000000
0409 };
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 #define TX_FLTH_VAL ((512/32) << 8)
0423 #define TX_DRTH_VAL_START (64/32)
0424 #define TX_DRTH_VAL_INC 2
0425 #define TX_DRTH_VAL_LIMIT (1472/32)
0426
0427 enum RxConfig_bits {
0428 RxDrthMask = 0x3e,
0429 RxMxdmaMask = 0x700000,
0430 RxMxdma_512 = 0x0,
0431 RxMxdma_4 = 0x100000,
0432 RxMxdma_8 = 0x200000,
0433 RxMxdma_16 = 0x300000,
0434 RxMxdma_32 = 0x400000,
0435 RxMxdma_64 = 0x500000,
0436 RxMxdma_128 = 0x600000,
0437 RxMxdma_256 = 0x700000,
0438 RxAcceptLong = 0x8000000,
0439 RxAcceptTx = 0x10000000,
0440 RxAcceptRunt = 0x40000000,
0441 RxAcceptErr = 0x80000000
0442 };
0443 #define RX_DRTH_VAL (128/8)
0444
0445 enum ClkRun_bits {
0446 PMEEnable = 0x100,
0447 PMEStatus = 0x8000,
0448 };
0449
0450 enum WolCmd_bits {
0451 WakePhy = 0x1,
0452 WakeUnicast = 0x2,
0453 WakeMulticast = 0x4,
0454 WakeBroadcast = 0x8,
0455 WakeArp = 0x10,
0456 WakePMatch0 = 0x20,
0457 WakePMatch1 = 0x40,
0458 WakePMatch2 = 0x80,
0459 WakePMatch3 = 0x100,
0460 WakeMagic = 0x200,
0461 WakeMagicSecure = 0x400,
0462 SecureHack = 0x100000,
0463 WokePhy = 0x400000,
0464 WokeUnicast = 0x800000,
0465 WokeMulticast = 0x1000000,
0466 WokeBroadcast = 0x2000000,
0467 WokeArp = 0x4000000,
0468 WokePMatch0 = 0x8000000,
0469 WokePMatch1 = 0x10000000,
0470 WokePMatch2 = 0x20000000,
0471 WokePMatch3 = 0x40000000,
0472 WokeMagic = 0x80000000,
0473 WakeOptsSummary = 0x7ff
0474 };
0475
0476 enum RxFilterAddr_bits {
0477 RFCRAddressMask = 0x3ff,
0478 AcceptMulticast = 0x00200000,
0479 AcceptMyPhys = 0x08000000,
0480 AcceptAllPhys = 0x10000000,
0481 AcceptAllMulticast = 0x20000000,
0482 AcceptBroadcast = 0x40000000,
0483 RxFilterEnable = 0x80000000
0484 };
0485
0486 enum StatsCtrl_bits {
0487 StatsWarn = 0x1,
0488 StatsFreeze = 0x2,
0489 StatsClear = 0x4,
0490 StatsStrobe = 0x8,
0491 };
0492
0493 enum MIntrCtrl_bits {
0494 MICRIntEn = 0x2,
0495 };
0496
0497 enum PhyCtrl_bits {
0498 PhyAddrMask = 0x1f,
0499 };
0500
0501 #define PHY_ADDR_NONE 32
0502 #define PHY_ADDR_INTERNAL 1
0503
0504
0505 #define SRR_DP83815_C 0x0302
0506 #define SRR_DP83815_D 0x0403
0507 #define SRR_DP83816_A4 0x0504
0508 #define SRR_DP83816_A5 0x0505
0509
0510
0511
0512
0513 struct netdev_desc {
0514 __le32 next_desc;
0515 __le32 cmd_status;
0516 __le32 addr;
0517 __le32 software_use;
0518 };
0519
0520
0521 enum desc_status_bits {
0522 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
0523 DescNoCRC=0x10000000, DescPktOK=0x08000000,
0524 DescSizeMask=0xfff,
0525
0526 DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
0527 DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
0528 DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
0529 DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
0530
0531 DescRxAbort=0x04000000, DescRxOver=0x02000000,
0532 DescRxDest=0x01800000, DescRxLong=0x00400000,
0533 DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
0534 DescRxCRC=0x00080000, DescRxAlign=0x00040000,
0535 DescRxLoop=0x00020000, DesRxColl=0x00010000,
0536 };
0537
0538 struct netdev_private {
0539
0540 dma_addr_t ring_dma;
0541 struct netdev_desc *rx_ring;
0542 struct netdev_desc *tx_ring;
0543
0544 struct sk_buff *rx_skbuff[RX_RING_SIZE];
0545 dma_addr_t rx_dma[RX_RING_SIZE];
0546
0547 struct sk_buff *tx_skbuff[TX_RING_SIZE];
0548 dma_addr_t tx_dma[TX_RING_SIZE];
0549 struct net_device *dev;
0550 void __iomem *ioaddr;
0551 struct napi_struct napi;
0552
0553 struct timer_list timer;
0554
0555 struct pci_dev *pci_dev;
0556 struct netdev_desc *rx_head_desc;
0557
0558 unsigned int cur_rx, dirty_rx;
0559 unsigned int cur_tx, dirty_tx;
0560
0561 unsigned int rx_buf_sz;
0562 int oom;
0563
0564 u32 intr_status;
0565
0566 int hands_off;
0567
0568 int ignore_phy;
0569
0570 int mii;
0571 int phy_addr_external;
0572 unsigned int full_duplex;
0573
0574 u32 cur_rx_mode;
0575 u32 rx_filter[16];
0576
0577 u32 tx_config, rx_config;
0578
0579 u32 SavedClkRun;
0580
0581 u32 srr;
0582
0583 u16 dspcfg;
0584 int dspcfg_workaround;
0585
0586 u16 speed;
0587 u8 duplex;
0588 u8 autoneg;
0589
0590 u16 advertising;
0591 unsigned int iosize;
0592 spinlock_t lock;
0593 u32 msg_enable;
0594
0595 int eeprom_size;
0596 };
0597
0598 static void move_int_phy(struct net_device *dev, int addr);
0599 static int eeprom_read(void __iomem *ioaddr, int location);
0600 static int mdio_read(struct net_device *dev, int reg);
0601 static void mdio_write(struct net_device *dev, int reg, u16 data);
0602 static void init_phy_fixup(struct net_device *dev);
0603 static int miiport_read(struct net_device *dev, int phy_id, int reg);
0604 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
0605 static int find_mii(struct net_device *dev);
0606 static void natsemi_reset(struct net_device *dev);
0607 static void natsemi_reload_eeprom(struct net_device *dev);
0608 static void natsemi_stop_rxtx(struct net_device *dev);
0609 static int netdev_open(struct net_device *dev);
0610 static void do_cable_magic(struct net_device *dev);
0611 static void undo_cable_magic(struct net_device *dev);
0612 static void check_link(struct net_device *dev);
0613 static void netdev_timer(struct timer_list *t);
0614 static void dump_ring(struct net_device *dev);
0615 static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
0616 static int alloc_ring(struct net_device *dev);
0617 static void refill_rx(struct net_device *dev);
0618 static void init_ring(struct net_device *dev);
0619 static void drain_tx(struct net_device *dev);
0620 static void drain_ring(struct net_device *dev);
0621 static void free_ring(struct net_device *dev);
0622 static void reinit_ring(struct net_device *dev);
0623 static void init_registers(struct net_device *dev);
0624 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
0625 static irqreturn_t intr_handler(int irq, void *dev_instance);
0626 static void netdev_error(struct net_device *dev, int intr_status);
0627 static int natsemi_poll(struct napi_struct *napi, int budget);
0628 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
0629 static void netdev_tx_done(struct net_device *dev);
0630 static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
0631 #ifdef CONFIG_NET_POLL_CONTROLLER
0632 static void natsemi_poll_controller(struct net_device *dev);
0633 #endif
0634 static void __set_rx_mode(struct net_device *dev);
0635 static void set_rx_mode(struct net_device *dev);
0636 static void __get_stats(struct net_device *dev);
0637 static struct net_device_stats *get_stats(struct net_device *dev);
0638 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0639 static int netdev_set_wol(struct net_device *dev, u32 newval);
0640 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
0641 static int netdev_set_sopass(struct net_device *dev, u8 *newval);
0642 static int netdev_get_sopass(struct net_device *dev, u8 *data);
0643 static int netdev_get_ecmd(struct net_device *dev,
0644 struct ethtool_link_ksettings *ecmd);
0645 static int netdev_set_ecmd(struct net_device *dev,
0646 const struct ethtool_link_ksettings *ecmd);
0647 static void enable_wol_mode(struct net_device *dev, int enable_intr);
0648 static int netdev_close(struct net_device *dev);
0649 static int netdev_get_regs(struct net_device *dev, u8 *buf);
0650 static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
0651 static const struct ethtool_ops ethtool_ops;
0652
0653 #define NATSEMI_ATTR(_name) \
0654 static ssize_t natsemi_show_##_name(struct device *dev, \
0655 struct device_attribute *attr, char *buf); \
0656 static ssize_t natsemi_set_##_name(struct device *dev, \
0657 struct device_attribute *attr, \
0658 const char *buf, size_t count); \
0659 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
0660
0661 #define NATSEMI_CREATE_FILE(_dev, _name) \
0662 device_create_file(&_dev->dev, &dev_attr_##_name)
0663 #define NATSEMI_REMOVE_FILE(_dev, _name) \
0664 device_remove_file(&_dev->dev, &dev_attr_##_name)
0665
0666 NATSEMI_ATTR(dspcfg_workaround);
0667
0668 static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
0669 struct device_attribute *attr,
0670 char *buf)
0671 {
0672 struct netdev_private *np = netdev_priv(to_net_dev(dev));
0673
0674 return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
0675 }
0676
0677 static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
0678 struct device_attribute *attr,
0679 const char *buf, size_t count)
0680 {
0681 struct netdev_private *np = netdev_priv(to_net_dev(dev));
0682 int new_setting;
0683 unsigned long flags;
0684
0685
0686 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
0687 new_setting = 1;
0688 else if (!strncmp("off", buf, count - 1) ||
0689 !strncmp("0", buf, count - 1))
0690 new_setting = 0;
0691 else
0692 return count;
0693
0694 spin_lock_irqsave(&np->lock, flags);
0695
0696 np->dspcfg_workaround = new_setting;
0697
0698 spin_unlock_irqrestore(&np->lock, flags);
0699
0700 return count;
0701 }
0702
0703 static inline void __iomem *ns_ioaddr(struct net_device *dev)
0704 {
0705 struct netdev_private *np = netdev_priv(dev);
0706
0707 return np->ioaddr;
0708 }
0709
0710 static inline void natsemi_irq_enable(struct net_device *dev)
0711 {
0712 writel(1, ns_ioaddr(dev) + IntrEnable);
0713 readl(ns_ioaddr(dev) + IntrEnable);
0714 }
0715
0716 static inline void natsemi_irq_disable(struct net_device *dev)
0717 {
0718 writel(0, ns_ioaddr(dev) + IntrEnable);
0719 readl(ns_ioaddr(dev) + IntrEnable);
0720 }
0721
0722 static void move_int_phy(struct net_device *dev, int addr)
0723 {
0724 struct netdev_private *np = netdev_priv(dev);
0725 void __iomem *ioaddr = ns_ioaddr(dev);
0726 int target = 31;
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738 if (target == addr)
0739 target--;
0740 if (target == np->phy_addr_external)
0741 target--;
0742 writew(target, ioaddr + PhyCtrl);
0743 readw(ioaddr + PhyCtrl);
0744 udelay(1);
0745 }
0746
0747 static void natsemi_init_media(struct net_device *dev)
0748 {
0749 struct netdev_private *np = netdev_priv(dev);
0750 u32 tmp;
0751
0752 if (np->ignore_phy)
0753 netif_carrier_on(dev);
0754 else
0755 netif_carrier_off(dev);
0756
0757
0758 tmp = mdio_read(dev, MII_BMCR);
0759 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
0760 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
0761 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
0762 np->advertising= mdio_read(dev, MII_ADVERTISE);
0763
0764 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
0765 netif_msg_probe(np)) {
0766 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
0767 "10%s %s duplex.\n",
0768 pci_name(np->pci_dev),
0769 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
0770 "enabled, advertise" : "disabled, force",
0771 (np->advertising &
0772 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
0773 "0" : "",
0774 (np->advertising &
0775 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
0776 "full" : "half");
0777 }
0778 if (netif_msg_probe(np))
0779 printk(KERN_INFO
0780 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
0781 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
0782 np->advertising);
0783
0784 }
0785
0786 static const struct net_device_ops natsemi_netdev_ops = {
0787 .ndo_open = netdev_open,
0788 .ndo_stop = netdev_close,
0789 .ndo_start_xmit = start_tx,
0790 .ndo_get_stats = get_stats,
0791 .ndo_set_rx_mode = set_rx_mode,
0792 .ndo_change_mtu = natsemi_change_mtu,
0793 .ndo_eth_ioctl = netdev_ioctl,
0794 .ndo_tx_timeout = ns_tx_timeout,
0795 .ndo_set_mac_address = eth_mac_addr,
0796 .ndo_validate_addr = eth_validate_addr,
0797 #ifdef CONFIG_NET_POLL_CONTROLLER
0798 .ndo_poll_controller = natsemi_poll_controller,
0799 #endif
0800 };
0801
0802 static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
0803 {
0804 struct net_device *dev;
0805 struct netdev_private *np;
0806 int i, option, irq, chip_idx = ent->driver_data;
0807 static int find_cnt = -1;
0808 resource_size_t iostart;
0809 unsigned long iosize;
0810 void __iomem *ioaddr;
0811 const int pcibar = 1;
0812 u8 addr[ETH_ALEN];
0813 int prev_eedata;
0814 u32 tmp;
0815
0816
0817 #ifndef MODULE
0818 static int printed_version;
0819 if (!printed_version++)
0820 printk(version);
0821 #endif
0822
0823 i = pcim_enable_device(pdev);
0824 if (i) return i;
0825
0826
0827
0828
0829
0830 pci_read_config_dword(pdev, PCIPM, &tmp);
0831 if (tmp & PCI_PM_CTRL_STATE_MASK) {
0832
0833 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
0834 pci_write_config_dword(pdev, PCIPM, newtmp);
0835 }
0836
0837 find_cnt++;
0838 iostart = pci_resource_start(pdev, pcibar);
0839 iosize = pci_resource_len(pdev, pcibar);
0840 irq = pdev->irq;
0841
0842 pci_set_master(pdev);
0843
0844 dev = alloc_etherdev(sizeof (struct netdev_private));
0845 if (!dev)
0846 return -ENOMEM;
0847 SET_NETDEV_DEV(dev, &pdev->dev);
0848
0849 i = pci_request_regions(pdev, DRV_NAME);
0850 if (i)
0851 goto err_pci_request_regions;
0852
0853 ioaddr = ioremap(iostart, iosize);
0854 if (!ioaddr) {
0855 i = -ENOMEM;
0856 goto err_pci_request_regions;
0857 }
0858
0859
0860 prev_eedata = eeprom_read(ioaddr, 6);
0861 for (i = 0; i < 3; i++) {
0862 int eedata = eeprom_read(ioaddr, i + 7);
0863 addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
0864 addr[i*2+1] = eedata >> 7;
0865 prev_eedata = eedata;
0866 }
0867 eth_hw_addr_set(dev, addr);
0868
0869 np = netdev_priv(dev);
0870 np->ioaddr = ioaddr;
0871
0872 netif_napi_add(dev, &np->napi, natsemi_poll, 64);
0873 np->dev = dev;
0874
0875 np->pci_dev = pdev;
0876 pci_set_drvdata(pdev, dev);
0877 np->iosize = iosize;
0878 spin_lock_init(&np->lock);
0879 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
0880 np->hands_off = 0;
0881 np->intr_status = 0;
0882 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
0883 if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
0884 np->ignore_phy = 1;
0885 else
0886 np->ignore_phy = 0;
0887 np->dspcfg_workaround = dspcfg_workaround;
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
0899 dev->if_port = PORT_MII;
0900 else
0901 dev->if_port = PORT_TP;
0902
0903 natsemi_reload_eeprom(dev);
0904 natsemi_reset(dev);
0905
0906 if (dev->if_port != PORT_TP) {
0907 np->phy_addr_external = find_mii(dev);
0908
0909
0910 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
0911 dev->if_port = PORT_TP;
0912 np->phy_addr_external = PHY_ADDR_INTERNAL;
0913 }
0914 } else {
0915 np->phy_addr_external = PHY_ADDR_INTERNAL;
0916 }
0917
0918 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
0919
0920 if (option) {
0921 if (option & 0x200)
0922 np->full_duplex = 1;
0923 if (option & 15)
0924 printk(KERN_INFO
0925 "natsemi %s: ignoring user supplied media type %d",
0926 pci_name(np->pci_dev), option & 15);
0927 }
0928 if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
0929 np->full_duplex = 1;
0930
0931 dev->netdev_ops = &natsemi_netdev_ops;
0932 dev->watchdog_timeo = TX_TIMEOUT;
0933
0934 dev->ethtool_ops = ðtool_ops;
0935
0936
0937 dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
0938 dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
0939
0940 if (mtu)
0941 dev->mtu = mtu;
0942
0943 natsemi_init_media(dev);
0944
0945
0946 np->srr = readl(ioaddr + SiliconRev);
0947 if (netif_msg_hw(np))
0948 printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
0949 pci_name(np->pci_dev), np->srr);
0950
0951 i = register_netdev(dev);
0952 if (i)
0953 goto err_register_netdev;
0954 i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
0955 if (i)
0956 goto err_create_file;
0957
0958 if (netif_msg_drv(np)) {
0959 printk(KERN_INFO "natsemi %s: %s at %#08llx "
0960 "(%s), %pM, IRQ %d",
0961 dev->name, natsemi_pci_info[chip_idx].name,
0962 (unsigned long long)iostart, pci_name(np->pci_dev),
0963 dev->dev_addr, irq);
0964 if (dev->if_port == PORT_TP)
0965 printk(", port TP.\n");
0966 else if (np->ignore_phy)
0967 printk(", port MII, ignoring PHY\n");
0968 else
0969 printk(", port MII, phy ad %d.\n", np->phy_addr_external);
0970 }
0971 return 0;
0972
0973 err_create_file:
0974 unregister_netdev(dev);
0975
0976 err_register_netdev:
0977 iounmap(ioaddr);
0978
0979 err_pci_request_regions:
0980 free_netdev(dev);
0981 return i;
0982 }
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993 #define eeprom_delay(ee_addr) readl(ee_addr)
0994
0995 #define EE_Write0 (EE_ChipSelect)
0996 #define EE_Write1 (EE_ChipSelect | EE_DataIn)
0997
0998
0999 enum EEPROM_Cmds {
1000 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
1001 };
1002
1003 static int eeprom_read(void __iomem *addr, int location)
1004 {
1005 int i;
1006 int retval = 0;
1007 void __iomem *ee_addr = addr + EECtrl;
1008 int read_cmd = location | EE_ReadCmd;
1009
1010 writel(EE_Write0, ee_addr);
1011
1012
1013 for (i = 10; i >= 0; i--) {
1014 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1015 writel(dataval, ee_addr);
1016 eeprom_delay(ee_addr);
1017 writel(dataval | EE_ShiftClk, ee_addr);
1018 eeprom_delay(ee_addr);
1019 }
1020 writel(EE_ChipSelect, ee_addr);
1021 eeprom_delay(ee_addr);
1022
1023 for (i = 0; i < 16; i++) {
1024 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1025 eeprom_delay(ee_addr);
1026 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1027 writel(EE_ChipSelect, ee_addr);
1028 eeprom_delay(ee_addr);
1029 }
1030
1031
1032 writel(EE_Write0, ee_addr);
1033 writel(0, ee_addr);
1034 return retval;
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 #define mii_delay(ioaddr) readl(ioaddr + EECtrl)
1047
1048 static int mii_getbit (struct net_device *dev)
1049 {
1050 int data;
1051 void __iomem *ioaddr = ns_ioaddr(dev);
1052
1053 writel(MII_ShiftClk, ioaddr + EECtrl);
1054 data = readl(ioaddr + EECtrl);
1055 writel(0, ioaddr + EECtrl);
1056 mii_delay(ioaddr);
1057 return (data & MII_Data)? 1 : 0;
1058 }
1059
1060 static void mii_send_bits (struct net_device *dev, u32 data, int len)
1061 {
1062 u32 i;
1063 void __iomem *ioaddr = ns_ioaddr(dev);
1064
1065 for (i = (1 << (len-1)); i; i >>= 1)
1066 {
1067 u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1068 writel(mdio_val, ioaddr + EECtrl);
1069 mii_delay(ioaddr);
1070 writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1071 mii_delay(ioaddr);
1072 }
1073 writel(0, ioaddr + EECtrl);
1074 mii_delay(ioaddr);
1075 }
1076
1077 static int miiport_read(struct net_device *dev, int phy_id, int reg)
1078 {
1079 u32 cmd;
1080 int i;
1081 u32 retval = 0;
1082
1083
1084 mii_send_bits (dev, 0xffffffff, 32);
1085
1086
1087 cmd = (0x06 << 10) | (phy_id << 5) | reg;
1088 mii_send_bits (dev, cmd, 14);
1089
1090 if (mii_getbit (dev))
1091 return 0;
1092
1093 for (i = 0; i < 16; i++) {
1094 retval <<= 1;
1095 retval |= mii_getbit (dev);
1096 }
1097
1098 mii_getbit (dev);
1099 return retval;
1100 }
1101
1102 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1103 {
1104 u32 cmd;
1105
1106
1107 mii_send_bits (dev, 0xffffffff, 32);
1108
1109
1110 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1111 mii_send_bits (dev, cmd, 32);
1112
1113 mii_getbit (dev);
1114 }
1115
1116 static int mdio_read(struct net_device *dev, int reg)
1117 {
1118 struct netdev_private *np = netdev_priv(dev);
1119 void __iomem *ioaddr = ns_ioaddr(dev);
1120
1121
1122
1123
1124
1125 if (dev->if_port == PORT_TP)
1126 return readw(ioaddr+BasicControl+(reg<<2));
1127 else
1128 return miiport_read(dev, np->phy_addr_external, reg);
1129 }
1130
1131 static void mdio_write(struct net_device *dev, int reg, u16 data)
1132 {
1133 struct netdev_private *np = netdev_priv(dev);
1134 void __iomem *ioaddr = ns_ioaddr(dev);
1135
1136
1137 if (dev->if_port == PORT_TP)
1138 writew(data, ioaddr+BasicControl+(reg<<2));
1139 else
1140 miiport_write(dev, np->phy_addr_external, reg, data);
1141 }
1142
1143 static void init_phy_fixup(struct net_device *dev)
1144 {
1145 struct netdev_private *np = netdev_priv(dev);
1146 void __iomem *ioaddr = ns_ioaddr(dev);
1147 int i;
1148 u32 cfg;
1149 u16 tmp;
1150
1151
1152 tmp = mdio_read(dev, MII_BMCR);
1153 if (np->autoneg == AUTONEG_ENABLE) {
1154
1155 if ((tmp & BMCR_ANENABLE) == 0 ||
1156 np->advertising != mdio_read(dev, MII_ADVERTISE))
1157 {
1158
1159 tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1160 mdio_write(dev, MII_ADVERTISE, np->advertising);
1161 }
1162 } else {
1163
1164 tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1165 if (np->speed == SPEED_100)
1166 tmp |= BMCR_SPEED100;
1167 if (np->duplex == DUPLEX_FULL)
1168 tmp |= BMCR_FULLDPLX;
1169
1170
1171
1172
1173
1174
1175
1176 }
1177 mdio_write(dev, MII_BMCR, tmp);
1178 readl(ioaddr + ChipConfig);
1179 udelay(1);
1180
1181
1182 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1183 + mdio_read(dev, MII_PHYSID2);
1184
1185
1186 switch (np->mii) {
1187 case PHYID_AM79C874:
1188
1189 tmp = mdio_read(dev, MII_MCTRL);
1190 tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1191 if (dev->if_port == PORT_FIBRE)
1192 tmp |= MII_FX_SEL;
1193 else
1194 tmp |= MII_EN_SCRM;
1195 mdio_write(dev, MII_MCTRL, tmp);
1196 break;
1197 default:
1198 break;
1199 }
1200 cfg = readl(ioaddr + ChipConfig);
1201 if (cfg & CfgExtPhy)
1202 return;
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1216
1217 int dspcfg;
1218 writew(1, ioaddr + PGSEL);
1219 writew(PMDCSR_VAL, ioaddr + PMDCSR);
1220 writew(TSTDAT_VAL, ioaddr + TSTDAT);
1221 np->dspcfg = (np->srr <= SRR_DP83815_C)?
1222 DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1223 writew(np->dspcfg, ioaddr + DSPCFG);
1224 writew(SDCFG_VAL, ioaddr + SDCFG);
1225 writew(0, ioaddr + PGSEL);
1226 readl(ioaddr + ChipConfig);
1227 udelay(10);
1228
1229 writew(1, ioaddr + PGSEL);
1230 dspcfg = readw(ioaddr + DSPCFG);
1231 writew(0, ioaddr + PGSEL);
1232 if (np->dspcfg == dspcfg)
1233 break;
1234 }
1235
1236 if (netif_msg_link(np)) {
1237 if (i==NATSEMI_HW_TIMEOUT) {
1238 printk(KERN_INFO
1239 "%s: DSPCFG mismatch after retrying for %d usec.\n",
1240 dev->name, i*10);
1241 } else {
1242 printk(KERN_INFO
1243 "%s: DSPCFG accepted after %d usec.\n",
1244 dev->name, i*10);
1245 }
1246 }
1247
1248
1249
1250
1251
1252 readw(ioaddr + MIntrStatus);
1253 writew(MICRIntEn, ioaddr + MIntrCtrl);
1254 }
1255
1256 static int switch_port_external(struct net_device *dev)
1257 {
1258 struct netdev_private *np = netdev_priv(dev);
1259 void __iomem *ioaddr = ns_ioaddr(dev);
1260 u32 cfg;
1261
1262 cfg = readl(ioaddr + ChipConfig);
1263 if (cfg & CfgExtPhy)
1264 return 0;
1265
1266 if (netif_msg_link(np)) {
1267 printk(KERN_INFO "%s: switching to external transceiver.\n",
1268 dev->name);
1269 }
1270
1271
1272 writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1273 readl(ioaddr + ChipConfig);
1274 udelay(1);
1275
1276
1277
1278
1279
1280
1281
1282
1283 move_int_phy(dev, np->phy_addr_external);
1284 init_phy_fixup(dev);
1285
1286 return 1;
1287 }
1288
1289 static int switch_port_internal(struct net_device *dev)
1290 {
1291 struct netdev_private *np = netdev_priv(dev);
1292 void __iomem *ioaddr = ns_ioaddr(dev);
1293 int i;
1294 u32 cfg;
1295 u16 bmcr;
1296
1297 cfg = readl(ioaddr + ChipConfig);
1298 if (!(cfg &CfgExtPhy))
1299 return 0;
1300
1301 if (netif_msg_link(np)) {
1302 printk(KERN_INFO "%s: switching to internal transceiver.\n",
1303 dev->name);
1304 }
1305
1306 cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1307 writel(cfg, ioaddr + ChipConfig);
1308 readl(ioaddr + ChipConfig);
1309 udelay(1);
1310
1311
1312 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1313 writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1314 readl(ioaddr + ChipConfig);
1315 udelay(10);
1316 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1317 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1318 if (!(bmcr & BMCR_RESET))
1319 break;
1320 udelay(10);
1321 }
1322 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1323 printk(KERN_INFO
1324 "%s: phy reset did not complete in %d usec.\n",
1325 dev->name, i*10);
1326 }
1327
1328 init_phy_fixup(dev);
1329
1330 return 1;
1331 }
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 static int find_mii(struct net_device *dev)
1342 {
1343 struct netdev_private *np = netdev_priv(dev);
1344 int tmp;
1345 int i;
1346 int did_switch;
1347
1348
1349 did_switch = switch_port_external(dev);
1350
1351
1352
1353
1354
1355
1356
1357 for (i = 1; i <= 31; i++) {
1358 move_int_phy(dev, i);
1359 tmp = miiport_read(dev, i, MII_BMSR);
1360 if (tmp != 0xffff && tmp != 0x0000) {
1361
1362 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1363 + mdio_read(dev, MII_PHYSID2);
1364 if (netif_msg_probe(np)) {
1365 printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1366 pci_name(np->pci_dev), np->mii, i);
1367 }
1368 break;
1369 }
1370 }
1371
1372 if (did_switch)
1373 switch_port_internal(dev);
1374 return i;
1375 }
1376
1377
1378 #define CFG_RESET_SAVE 0xfde000
1379
1380 #define WCSR_RESET_SAVE 0x61f
1381
1382 #define RFCR_RESET_SAVE 0xf8500000
1383
1384 static void natsemi_reset(struct net_device *dev)
1385 {
1386 int i;
1387 u32 cfg;
1388 u32 wcsr;
1389 u32 rfcr;
1390 u16 pmatch[3];
1391 u16 sopass[3];
1392 struct netdev_private *np = netdev_priv(dev);
1393 void __iomem *ioaddr = ns_ioaddr(dev);
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404 cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1405
1406 wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1407
1408 rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1409
1410 for (i = 0; i < 3; i++) {
1411 writel(i*2, ioaddr + RxFilterAddr);
1412 pmatch[i] = readw(ioaddr + RxFilterData);
1413 }
1414
1415 for (i = 0; i < 3; i++) {
1416 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1417 sopass[i] = readw(ioaddr + RxFilterData);
1418 }
1419
1420
1421 writel(ChipReset, ioaddr + ChipCmd);
1422 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1423 if (!(readl(ioaddr + ChipCmd) & ChipReset))
1424 break;
1425 udelay(5);
1426 }
1427 if (i==NATSEMI_HW_TIMEOUT) {
1428 printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1429 dev->name, i*5);
1430 } else if (netif_msg_hw(np)) {
1431 printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1432 dev->name, i*5);
1433 }
1434
1435
1436 cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1437
1438 if (dev->if_port == PORT_TP)
1439 cfg &= ~(CfgExtPhy | CfgPhyDis);
1440 else
1441 cfg |= (CfgExtPhy | CfgPhyDis);
1442 writel(cfg, ioaddr + ChipConfig);
1443
1444 wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1445 writel(wcsr, ioaddr + WOLCmd);
1446
1447 rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1448
1449 for (i = 0; i < 3; i++) {
1450 writel(i*2, ioaddr + RxFilterAddr);
1451 writew(pmatch[i], ioaddr + RxFilterData);
1452 }
1453 for (i = 0; i < 3; i++) {
1454 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1455 writew(sopass[i], ioaddr + RxFilterData);
1456 }
1457
1458 writel(rfcr, ioaddr + RxFilterAddr);
1459 }
1460
1461 static void reset_rx(struct net_device *dev)
1462 {
1463 int i;
1464 struct netdev_private *np = netdev_priv(dev);
1465 void __iomem *ioaddr = ns_ioaddr(dev);
1466
1467 np->intr_status &= ~RxResetDone;
1468
1469 writel(RxReset, ioaddr + ChipCmd);
1470
1471 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1472 np->intr_status |= readl(ioaddr + IntrStatus);
1473 if (np->intr_status & RxResetDone)
1474 break;
1475 udelay(15);
1476 }
1477 if (i==NATSEMI_HW_TIMEOUT) {
1478 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1479 dev->name, i*15);
1480 } else if (netif_msg_hw(np)) {
1481 printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1482 dev->name, i*15);
1483 }
1484 }
1485
1486 static void natsemi_reload_eeprom(struct net_device *dev)
1487 {
1488 struct netdev_private *np = netdev_priv(dev);
1489 void __iomem *ioaddr = ns_ioaddr(dev);
1490 int i;
1491
1492 writel(EepromReload, ioaddr + PCIBusCfg);
1493 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1494 udelay(50);
1495 if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1496 break;
1497 }
1498 if (i==NATSEMI_HW_TIMEOUT) {
1499 printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1500 pci_name(np->pci_dev), i*50);
1501 } else if (netif_msg_hw(np)) {
1502 printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1503 pci_name(np->pci_dev), i*50);
1504 }
1505 }
1506
1507 static void natsemi_stop_rxtx(struct net_device *dev)
1508 {
1509 void __iomem * ioaddr = ns_ioaddr(dev);
1510 struct netdev_private *np = netdev_priv(dev);
1511 int i;
1512
1513 writel(RxOff | TxOff, ioaddr + ChipCmd);
1514 for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1515 if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1516 break;
1517 udelay(5);
1518 }
1519 if (i==NATSEMI_HW_TIMEOUT) {
1520 printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1521 dev->name, i*5);
1522 } else if (netif_msg_hw(np)) {
1523 printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1524 dev->name, i*5);
1525 }
1526 }
1527
1528 static int netdev_open(struct net_device *dev)
1529 {
1530 struct netdev_private *np = netdev_priv(dev);
1531 void __iomem * ioaddr = ns_ioaddr(dev);
1532 const int irq = np->pci_dev->irq;
1533 int i;
1534
1535
1536 natsemi_reset(dev);
1537
1538 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1539 if (i) return i;
1540
1541 if (netif_msg_ifup(np))
1542 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1543 dev->name, irq);
1544 i = alloc_ring(dev);
1545 if (i < 0) {
1546 free_irq(irq, dev);
1547 return i;
1548 }
1549 napi_enable(&np->napi);
1550
1551 init_ring(dev);
1552 spin_lock_irq(&np->lock);
1553 init_registers(dev);
1554
1555 for (i = 0; i < 3; i++) {
1556 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1557
1558 writel(i*2, ioaddr + RxFilterAddr);
1559 writew(mac, ioaddr + RxFilterData);
1560 }
1561 writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1562 spin_unlock_irq(&np->lock);
1563
1564 netif_start_queue(dev);
1565
1566 if (netif_msg_ifup(np))
1567 printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1568 dev->name, (int)readl(ioaddr + ChipCmd));
1569
1570
1571 timer_setup(&np->timer, netdev_timer, 0);
1572 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1573 add_timer(&np->timer);
1574
1575 return 0;
1576 }
1577
1578 static void do_cable_magic(struct net_device *dev)
1579 {
1580 struct netdev_private *np = netdev_priv(dev);
1581 void __iomem *ioaddr = ns_ioaddr(dev);
1582
1583 if (dev->if_port != PORT_TP)
1584 return;
1585
1586 if (np->srr >= SRR_DP83816_A5)
1587 return;
1588
1589
1590
1591
1592
1593
1594
1595 if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1596 u16 data;
1597
1598 writew(1, ioaddr + PGSEL);
1599
1600
1601
1602
1603 data = readw(ioaddr + TSTDAT) & 0xff;
1604
1605
1606
1607
1608 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1609 np = netdev_priv(dev);
1610
1611
1612 writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1613
1614 data = readw(ioaddr + DSPCFG);
1615 np->dspcfg = data | DSPCFG_LOCK;
1616 writew(np->dspcfg, ioaddr + DSPCFG);
1617 }
1618 writew(0, ioaddr + PGSEL);
1619 }
1620 }
1621
1622 static void undo_cable_magic(struct net_device *dev)
1623 {
1624 u16 data;
1625 struct netdev_private *np = netdev_priv(dev);
1626 void __iomem * ioaddr = ns_ioaddr(dev);
1627
1628 if (dev->if_port != PORT_TP)
1629 return;
1630
1631 if (np->srr >= SRR_DP83816_A5)
1632 return;
1633
1634 writew(1, ioaddr + PGSEL);
1635
1636 data = readw(ioaddr + DSPCFG);
1637 np->dspcfg = data & ~DSPCFG_LOCK;
1638 writew(np->dspcfg, ioaddr + DSPCFG);
1639 writew(0, ioaddr + PGSEL);
1640 }
1641
1642 static void check_link(struct net_device *dev)
1643 {
1644 struct netdev_private *np = netdev_priv(dev);
1645 void __iomem * ioaddr = ns_ioaddr(dev);
1646 int duplex = np->duplex;
1647 u16 bmsr;
1648
1649
1650 if (np->ignore_phy)
1651 goto propagate_state;
1652
1653
1654
1655
1656
1657 mdio_read(dev, MII_BMSR);
1658 bmsr = mdio_read(dev, MII_BMSR);
1659
1660 if (!(bmsr & BMSR_LSTATUS)) {
1661 if (netif_carrier_ok(dev)) {
1662 if (netif_msg_link(np))
1663 printk(KERN_NOTICE "%s: link down.\n",
1664 dev->name);
1665 netif_carrier_off(dev);
1666 undo_cable_magic(dev);
1667 }
1668 return;
1669 }
1670 if (!netif_carrier_ok(dev)) {
1671 if (netif_msg_link(np))
1672 printk(KERN_NOTICE "%s: link up.\n", dev->name);
1673 netif_carrier_on(dev);
1674 do_cable_magic(dev);
1675 }
1676
1677 duplex = np->full_duplex;
1678 if (!duplex) {
1679 if (bmsr & BMSR_ANEGCOMPLETE) {
1680 int tmp = mii_nway_result(
1681 np->advertising & mdio_read(dev, MII_LPA));
1682 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1683 duplex = 1;
1684 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1685 duplex = 1;
1686 }
1687
1688 propagate_state:
1689
1690 if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1691 if (netif_msg_link(np))
1692 printk(KERN_INFO
1693 "%s: Setting %s-duplex based on negotiated "
1694 "link capability.\n", dev->name,
1695 duplex ? "full" : "half");
1696 if (duplex) {
1697 np->rx_config |= RxAcceptTx;
1698 np->tx_config |= TxCarrierIgn | TxHeartIgn;
1699 } else {
1700 np->rx_config &= ~RxAcceptTx;
1701 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1702 }
1703 writel(np->tx_config, ioaddr + TxConfig);
1704 writel(np->rx_config, ioaddr + RxConfig);
1705 }
1706 }
1707
1708 static void init_registers(struct net_device *dev)
1709 {
1710 struct netdev_private *np = netdev_priv(dev);
1711 void __iomem * ioaddr = ns_ioaddr(dev);
1712
1713 init_phy_fixup(dev);
1714
1715
1716 readl(ioaddr + IntrStatus);
1717
1718 writel(np->ring_dma, ioaddr + RxRingPtr);
1719 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1720 ioaddr + TxRingPtr);
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1737 TX_FLTH_VAL | TX_DRTH_VAL_START;
1738 writel(np->tx_config, ioaddr + TxConfig);
1739
1740
1741
1742
1743 np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1744
1745 if (np->rx_buf_sz > NATSEMI_LONGPKT)
1746 np->rx_config |= RxAcceptLong;
1747
1748 writel(np->rx_config, ioaddr + RxConfig);
1749
1750
1751
1752
1753
1754
1755
1756 np->SavedClkRun = readl(ioaddr + ClkRun);
1757 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1758 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1759 printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1760 dev->name, readl(ioaddr + WOLCmd));
1761 }
1762
1763 check_link(dev);
1764 __set_rx_mode(dev);
1765
1766
1767 writel(DEFAULT_INTR, ioaddr + IntrMask);
1768 natsemi_irq_enable(dev);
1769
1770 writel(RxOn | TxOn, ioaddr + ChipCmd);
1771 writel(StatsClear, ioaddr + StatsCtrl);
1772 }
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 static void netdev_timer(struct timer_list *t)
1788 {
1789 struct netdev_private *np = from_timer(np, t, timer);
1790 struct net_device *dev = np->dev;
1791 void __iomem * ioaddr = ns_ioaddr(dev);
1792 int next_tick = NATSEMI_TIMER_FREQ;
1793 const int irq = np->pci_dev->irq;
1794
1795 if (netif_msg_timer(np)) {
1796
1797
1798
1799 printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1800 dev->name);
1801 }
1802
1803 if (dev->if_port == PORT_TP) {
1804 u16 dspcfg;
1805
1806 spin_lock_irq(&np->lock);
1807
1808 writew(1, ioaddr+PGSEL);
1809 dspcfg = readw(ioaddr+DSPCFG);
1810 writew(0, ioaddr+PGSEL);
1811 if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1812 if (!netif_queue_stopped(dev)) {
1813 spin_unlock_irq(&np->lock);
1814 if (netif_msg_drv(np))
1815 printk(KERN_NOTICE "%s: possible phy reset: "
1816 "re-initializing\n", dev->name);
1817 disable_irq(irq);
1818 spin_lock_irq(&np->lock);
1819 natsemi_stop_rxtx(dev);
1820 dump_ring(dev);
1821 reinit_ring(dev);
1822 init_registers(dev);
1823 spin_unlock_irq(&np->lock);
1824 enable_irq(irq);
1825 } else {
1826
1827 next_tick = HZ;
1828 spin_unlock_irq(&np->lock);
1829 }
1830 } else {
1831
1832 check_link(dev);
1833 spin_unlock_irq(&np->lock);
1834 }
1835 } else {
1836 spin_lock_irq(&np->lock);
1837 check_link(dev);
1838 spin_unlock_irq(&np->lock);
1839 }
1840 if (np->oom) {
1841 disable_irq(irq);
1842 np->oom = 0;
1843 refill_rx(dev);
1844 enable_irq(irq);
1845 if (!np->oom) {
1846 writel(RxOn, ioaddr + ChipCmd);
1847 } else {
1848 next_tick = 1;
1849 }
1850 }
1851
1852 if (next_tick > 1)
1853 mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1854 else
1855 mod_timer(&np->timer, jiffies + next_tick);
1856 }
1857
1858 static void dump_ring(struct net_device *dev)
1859 {
1860 struct netdev_private *np = netdev_priv(dev);
1861
1862 if (netif_msg_pktdata(np)) {
1863 int i;
1864 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
1865 for (i = 0; i < TX_RING_SIZE; i++) {
1866 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1867 i, np->tx_ring[i].next_desc,
1868 np->tx_ring[i].cmd_status,
1869 np->tx_ring[i].addr);
1870 }
1871 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1872 for (i = 0; i < RX_RING_SIZE; i++) {
1873 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1874 i, np->rx_ring[i].next_desc,
1875 np->rx_ring[i].cmd_status,
1876 np->rx_ring[i].addr);
1877 }
1878 }
1879 }
1880
1881 static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
1882 {
1883 struct netdev_private *np = netdev_priv(dev);
1884 void __iomem * ioaddr = ns_ioaddr(dev);
1885 const int irq = np->pci_dev->irq;
1886
1887 disable_irq(irq);
1888 spin_lock_irq(&np->lock);
1889 if (!np->hands_off) {
1890 if (netif_msg_tx_err(np))
1891 printk(KERN_WARNING
1892 "%s: Transmit timed out, status %#08x,"
1893 " resetting...\n",
1894 dev->name, readl(ioaddr + IntrStatus));
1895 dump_ring(dev);
1896
1897 natsemi_reset(dev);
1898 reinit_ring(dev);
1899 init_registers(dev);
1900 } else {
1901 printk(KERN_WARNING
1902 "%s: tx_timeout while in hands_off state?\n",
1903 dev->name);
1904 }
1905 spin_unlock_irq(&np->lock);
1906 enable_irq(irq);
1907
1908 netif_trans_update(dev);
1909 dev->stats.tx_errors++;
1910 netif_wake_queue(dev);
1911 }
1912
1913 static int alloc_ring(struct net_device *dev)
1914 {
1915 struct netdev_private *np = netdev_priv(dev);
1916 np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
1917 sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
1918 &np->ring_dma, GFP_KERNEL);
1919 if (!np->rx_ring)
1920 return -ENOMEM;
1921 np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1922 return 0;
1923 }
1924
1925 static void refill_rx(struct net_device *dev)
1926 {
1927 struct netdev_private *np = netdev_priv(dev);
1928
1929
1930 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1931 struct sk_buff *skb;
1932 int entry = np->dirty_rx % RX_RING_SIZE;
1933 if (np->rx_skbuff[entry] == NULL) {
1934 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1935 skb = netdev_alloc_skb(dev, buflen);
1936 np->rx_skbuff[entry] = skb;
1937 if (skb == NULL)
1938 break;
1939 np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
1940 skb->data, buflen,
1941 DMA_FROM_DEVICE);
1942 if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
1943 dev_kfree_skb_any(skb);
1944 np->rx_skbuff[entry] = NULL;
1945 break;
1946 }
1947 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1948 }
1949 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1950 }
1951 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1952 if (netif_msg_rx_err(np))
1953 printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1954 np->oom = 1;
1955 }
1956 }
1957
1958 static void set_bufsize(struct net_device *dev)
1959 {
1960 struct netdev_private *np = netdev_priv(dev);
1961 if (dev->mtu <= ETH_DATA_LEN)
1962 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1963 else
1964 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1965 }
1966
1967
1968 static void init_ring(struct net_device *dev)
1969 {
1970 struct netdev_private *np = netdev_priv(dev);
1971 int i;
1972
1973
1974 np->dirty_tx = np->cur_tx = 0;
1975 for (i = 0; i < TX_RING_SIZE; i++) {
1976 np->tx_skbuff[i] = NULL;
1977 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1978 +sizeof(struct netdev_desc)
1979 *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1980 np->tx_ring[i].cmd_status = 0;
1981 }
1982
1983
1984 np->dirty_rx = 0;
1985 np->cur_rx = RX_RING_SIZE;
1986 np->oom = 0;
1987 set_bufsize(dev);
1988
1989 np->rx_head_desc = &np->rx_ring[0];
1990
1991
1992
1993
1994
1995 for (i = 0; i < RX_RING_SIZE; i++) {
1996 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1997 +sizeof(struct netdev_desc)
1998 *((i+1)%RX_RING_SIZE));
1999 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2000 np->rx_skbuff[i] = NULL;
2001 }
2002 refill_rx(dev);
2003 dump_ring(dev);
2004 }
2005
2006 static void drain_tx(struct net_device *dev)
2007 {
2008 struct netdev_private *np = netdev_priv(dev);
2009 int i;
2010
2011 for (i = 0; i < TX_RING_SIZE; i++) {
2012 if (np->tx_skbuff[i]) {
2013 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
2014 np->tx_skbuff[i]->len, DMA_TO_DEVICE);
2015 dev_kfree_skb(np->tx_skbuff[i]);
2016 dev->stats.tx_dropped++;
2017 }
2018 np->tx_skbuff[i] = NULL;
2019 }
2020 }
2021
2022 static void drain_rx(struct net_device *dev)
2023 {
2024 struct netdev_private *np = netdev_priv(dev);
2025 unsigned int buflen = np->rx_buf_sz;
2026 int i;
2027
2028
2029 for (i = 0; i < RX_RING_SIZE; i++) {
2030 np->rx_ring[i].cmd_status = 0;
2031 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
2032 if (np->rx_skbuff[i]) {
2033 dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
2034 buflen + NATSEMI_PADDING,
2035 DMA_FROM_DEVICE);
2036 dev_kfree_skb(np->rx_skbuff[i]);
2037 }
2038 np->rx_skbuff[i] = NULL;
2039 }
2040 }
2041
2042 static void drain_ring(struct net_device *dev)
2043 {
2044 drain_rx(dev);
2045 drain_tx(dev);
2046 }
2047
2048 static void free_ring(struct net_device *dev)
2049 {
2050 struct netdev_private *np = netdev_priv(dev);
2051 dma_free_coherent(&np->pci_dev->dev,
2052 sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
2053 np->rx_ring, np->ring_dma);
2054 }
2055
2056 static void reinit_rx(struct net_device *dev)
2057 {
2058 struct netdev_private *np = netdev_priv(dev);
2059 int i;
2060
2061
2062 np->dirty_rx = 0;
2063 np->cur_rx = RX_RING_SIZE;
2064 np->rx_head_desc = &np->rx_ring[0];
2065
2066 for (i = 0; i < RX_RING_SIZE; i++)
2067 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2068
2069 refill_rx(dev);
2070 }
2071
2072 static void reinit_ring(struct net_device *dev)
2073 {
2074 struct netdev_private *np = netdev_priv(dev);
2075 int i;
2076
2077
2078 drain_tx(dev);
2079 np->dirty_tx = np->cur_tx = 0;
2080 for (i=0;i<TX_RING_SIZE;i++)
2081 np->tx_ring[i].cmd_status = 0;
2082
2083 reinit_rx(dev);
2084 }
2085
2086 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2087 {
2088 struct netdev_private *np = netdev_priv(dev);
2089 void __iomem * ioaddr = ns_ioaddr(dev);
2090 unsigned entry;
2091 unsigned long flags;
2092
2093
2094
2095
2096
2097 entry = np->cur_tx % TX_RING_SIZE;
2098
2099 np->tx_skbuff[entry] = skb;
2100 np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
2101 skb->len, DMA_TO_DEVICE);
2102 if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
2103 np->tx_skbuff[entry] = NULL;
2104 dev_kfree_skb_irq(skb);
2105 dev->stats.tx_dropped++;
2106 return NETDEV_TX_OK;
2107 }
2108
2109 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2110
2111 spin_lock_irqsave(&np->lock, flags);
2112
2113 if (!np->hands_off) {
2114 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2115
2116
2117 wmb();
2118 np->cur_tx++;
2119 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2120 netdev_tx_done(dev);
2121 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2122 netif_stop_queue(dev);
2123 }
2124
2125 writel(TxOn, ioaddr + ChipCmd);
2126 } else {
2127 dev_kfree_skb_irq(skb);
2128 dev->stats.tx_dropped++;
2129 }
2130 spin_unlock_irqrestore(&np->lock, flags);
2131
2132 if (netif_msg_tx_queued(np)) {
2133 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2134 dev->name, np->cur_tx, entry);
2135 }
2136 return NETDEV_TX_OK;
2137 }
2138
2139 static void netdev_tx_done(struct net_device *dev)
2140 {
2141 struct netdev_private *np = netdev_priv(dev);
2142
2143 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2144 int entry = np->dirty_tx % TX_RING_SIZE;
2145 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2146 break;
2147 if (netif_msg_tx_done(np))
2148 printk(KERN_DEBUG
2149 "%s: tx frame #%d finished, status %#08x.\n",
2150 dev->name, np->dirty_tx,
2151 le32_to_cpu(np->tx_ring[entry].cmd_status));
2152 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2153 dev->stats.tx_packets++;
2154 dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2155 } else {
2156 int tx_status =
2157 le32_to_cpu(np->tx_ring[entry].cmd_status);
2158 if (tx_status & (DescTxAbort|DescTxExcColl))
2159 dev->stats.tx_aborted_errors++;
2160 if (tx_status & DescTxFIFO)
2161 dev->stats.tx_fifo_errors++;
2162 if (tx_status & DescTxCarrier)
2163 dev->stats.tx_carrier_errors++;
2164 if (tx_status & DescTxOOWCol)
2165 dev->stats.tx_window_errors++;
2166 dev->stats.tx_errors++;
2167 }
2168 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
2169 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
2170
2171 dev_consume_skb_irq(np->tx_skbuff[entry]);
2172 np->tx_skbuff[entry] = NULL;
2173 }
2174 if (netif_queue_stopped(dev) &&
2175 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2176
2177 netif_wake_queue(dev);
2178 }
2179 }
2180
2181
2182
2183 static irqreturn_t intr_handler(int irq, void *dev_instance)
2184 {
2185 struct net_device *dev = dev_instance;
2186 struct netdev_private *np = netdev_priv(dev);
2187 void __iomem * ioaddr = ns_ioaddr(dev);
2188
2189
2190
2191
2192 if (np->hands_off || !readl(ioaddr + IntrEnable))
2193 return IRQ_NONE;
2194
2195 np->intr_status = readl(ioaddr + IntrStatus);
2196
2197 if (!np->intr_status)
2198 return IRQ_NONE;
2199
2200 if (netif_msg_intr(np))
2201 printk(KERN_DEBUG
2202 "%s: Interrupt, status %#08x, mask %#08x.\n",
2203 dev->name, np->intr_status,
2204 readl(ioaddr + IntrMask));
2205
2206 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2207
2208 if (napi_schedule_prep(&np->napi)) {
2209
2210 natsemi_irq_disable(dev);
2211 __napi_schedule(&np->napi);
2212 } else
2213 printk(KERN_WARNING
2214 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2215 dev->name, np->intr_status,
2216 readl(ioaddr + IntrMask));
2217
2218 return IRQ_HANDLED;
2219 }
2220
2221
2222
2223
2224 static int natsemi_poll(struct napi_struct *napi, int budget)
2225 {
2226 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2227 struct net_device *dev = np->dev;
2228 void __iomem * ioaddr = ns_ioaddr(dev);
2229 int work_done = 0;
2230
2231 do {
2232 if (netif_msg_intr(np))
2233 printk(KERN_DEBUG
2234 "%s: Poll, status %#08x, mask %#08x.\n",
2235 dev->name, np->intr_status,
2236 readl(ioaddr + IntrMask));
2237
2238
2239
2240 if (np->intr_status &
2241 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2242 IntrRxErr | IntrRxOverrun)) {
2243 netdev_rx(dev, &work_done, budget);
2244 }
2245
2246 if (np->intr_status &
2247 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2248 spin_lock(&np->lock);
2249 netdev_tx_done(dev);
2250 spin_unlock(&np->lock);
2251 }
2252
2253
2254 if (np->intr_status & IntrAbnormalSummary)
2255 netdev_error(dev, np->intr_status);
2256
2257 if (work_done >= budget)
2258 return work_done;
2259
2260 np->intr_status = readl(ioaddr + IntrStatus);
2261 } while (np->intr_status);
2262
2263 napi_complete_done(napi, work_done);
2264
2265
2266
2267 spin_lock(&np->lock);
2268 if (!np->hands_off)
2269 natsemi_irq_enable(dev);
2270 spin_unlock(&np->lock);
2271
2272 return work_done;
2273 }
2274
2275
2276
2277 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2278 {
2279 struct netdev_private *np = netdev_priv(dev);
2280 int entry = np->cur_rx % RX_RING_SIZE;
2281 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2282 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2283 unsigned int buflen = np->rx_buf_sz;
2284 void __iomem * ioaddr = ns_ioaddr(dev);
2285
2286
2287 while (desc_status < 0) {
2288 int pkt_len;
2289 if (netif_msg_rx_status(np))
2290 printk(KERN_DEBUG
2291 " netdev_rx() entry %d status was %#08x.\n",
2292 entry, desc_status);
2293 if (--boguscnt < 0)
2294 break;
2295
2296 if (*work_done >= work_to_do)
2297 break;
2298
2299 (*work_done)++;
2300
2301 pkt_len = (desc_status & DescSizeMask) - 4;
2302 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2303 if (desc_status & DescMore) {
2304 unsigned long flags;
2305
2306 if (netif_msg_rx_err(np))
2307 printk(KERN_WARNING
2308 "%s: Oversized(?) Ethernet "
2309 "frame spanned multiple "
2310 "buffers, entry %#08x "
2311 "status %#08x.\n", dev->name,
2312 np->cur_rx, desc_status);
2313 dev->stats.rx_length_errors++;
2314
2315
2316
2317
2318
2319
2320 spin_lock_irqsave(&np->lock, flags);
2321 reset_rx(dev);
2322 reinit_rx(dev);
2323 writel(np->ring_dma, ioaddr + RxRingPtr);
2324 check_link(dev);
2325 spin_unlock_irqrestore(&np->lock, flags);
2326
2327
2328
2329 break;
2330
2331 } else {
2332
2333 dev->stats.rx_errors++;
2334 if (desc_status & (DescRxAbort|DescRxOver))
2335 dev->stats.rx_over_errors++;
2336 if (desc_status & (DescRxLong|DescRxRunt))
2337 dev->stats.rx_length_errors++;
2338 if (desc_status & (DescRxInvalid|DescRxAlign))
2339 dev->stats.rx_frame_errors++;
2340 if (desc_status & DescRxCRC)
2341 dev->stats.rx_crc_errors++;
2342 }
2343 } else if (pkt_len > np->rx_buf_sz) {
2344
2345
2346
2347
2348 } else {
2349 struct sk_buff *skb;
2350
2351
2352
2353 if (pkt_len < rx_copybreak &&
2354 (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2355
2356 skb_reserve(skb, RX_OFFSET);
2357 dma_sync_single_for_cpu(&np->pci_dev->dev,
2358 np->rx_dma[entry],
2359 buflen,
2360 DMA_FROM_DEVICE);
2361 skb_copy_to_linear_data(skb,
2362 np->rx_skbuff[entry]->data, pkt_len);
2363 skb_put(skb, pkt_len);
2364 dma_sync_single_for_device(&np->pci_dev->dev,
2365 np->rx_dma[entry],
2366 buflen,
2367 DMA_FROM_DEVICE);
2368 } else {
2369 dma_unmap_single(&np->pci_dev->dev,
2370 np->rx_dma[entry],
2371 buflen + NATSEMI_PADDING,
2372 DMA_FROM_DEVICE);
2373 skb_put(skb = np->rx_skbuff[entry], pkt_len);
2374 np->rx_skbuff[entry] = NULL;
2375 }
2376 skb->protocol = eth_type_trans(skb, dev);
2377 netif_receive_skb(skb);
2378 dev->stats.rx_packets++;
2379 dev->stats.rx_bytes += pkt_len;
2380 }
2381 entry = (++np->cur_rx) % RX_RING_SIZE;
2382 np->rx_head_desc = &np->rx_ring[entry];
2383 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2384 }
2385 refill_rx(dev);
2386
2387
2388 if (np->oom)
2389 mod_timer(&np->timer, jiffies + 1);
2390 else
2391 writel(RxOn, ioaddr + ChipCmd);
2392 }
2393
2394 static void netdev_error(struct net_device *dev, int intr_status)
2395 {
2396 struct netdev_private *np = netdev_priv(dev);
2397 void __iomem * ioaddr = ns_ioaddr(dev);
2398
2399 spin_lock(&np->lock);
2400 if (intr_status & LinkChange) {
2401 u16 lpa = mdio_read(dev, MII_LPA);
2402 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2403 netif_msg_link(np)) {
2404 printk(KERN_INFO
2405 "%s: Autonegotiation advertising"
2406 " %#04x partner %#04x.\n", dev->name,
2407 np->advertising, lpa);
2408 }
2409
2410
2411 readw(ioaddr + MIntrStatus);
2412 check_link(dev);
2413 }
2414 if (intr_status & StatsMax) {
2415 __get_stats(dev);
2416 }
2417 if (intr_status & IntrTxUnderrun) {
2418 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2419 np->tx_config += TX_DRTH_VAL_INC;
2420 if (netif_msg_tx_err(np))
2421 printk(KERN_NOTICE
2422 "%s: increased tx threshold, txcfg %#08x.\n",
2423 dev->name, np->tx_config);
2424 } else {
2425 if (netif_msg_tx_err(np))
2426 printk(KERN_NOTICE
2427 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2428 dev->name, np->tx_config);
2429 }
2430 writel(np->tx_config, ioaddr + TxConfig);
2431 }
2432 if (intr_status & WOLPkt && netif_msg_wol(np)) {
2433 int wol_status = readl(ioaddr + WOLCmd);
2434 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2435 dev->name, wol_status);
2436 }
2437 if (intr_status & RxStatusFIFOOver) {
2438 if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2439 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2440 dev->name);
2441 }
2442 dev->stats.rx_fifo_errors++;
2443 dev->stats.rx_errors++;
2444 }
2445
2446 if (intr_status & IntrPCIErr) {
2447 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2448 intr_status & IntrPCIErr);
2449 dev->stats.tx_fifo_errors++;
2450 dev->stats.tx_errors++;
2451 dev->stats.rx_fifo_errors++;
2452 dev->stats.rx_errors++;
2453 }
2454 spin_unlock(&np->lock);
2455 }
2456
2457 static void __get_stats(struct net_device *dev)
2458 {
2459 void __iomem * ioaddr = ns_ioaddr(dev);
2460
2461
2462 dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2463 dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2464 }
2465
2466 static struct net_device_stats *get_stats(struct net_device *dev)
2467 {
2468 struct netdev_private *np = netdev_priv(dev);
2469
2470
2471 spin_lock_irq(&np->lock);
2472 if (netif_running(dev) && !np->hands_off)
2473 __get_stats(dev);
2474 spin_unlock_irq(&np->lock);
2475
2476 return &dev->stats;
2477 }
2478
2479 #ifdef CONFIG_NET_POLL_CONTROLLER
2480 static void natsemi_poll_controller(struct net_device *dev)
2481 {
2482 struct netdev_private *np = netdev_priv(dev);
2483 const int irq = np->pci_dev->irq;
2484
2485 disable_irq(irq);
2486 intr_handler(irq, dev);
2487 enable_irq(irq);
2488 }
2489 #endif
2490
2491 #define HASH_TABLE 0x200
2492 static void __set_rx_mode(struct net_device *dev)
2493 {
2494 void __iomem * ioaddr = ns_ioaddr(dev);
2495 struct netdev_private *np = netdev_priv(dev);
2496 u8 mc_filter[64];
2497 u32 rx_mode;
2498
2499 if (dev->flags & IFF_PROMISC) {
2500 rx_mode = RxFilterEnable | AcceptBroadcast
2501 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2502 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2503 (dev->flags & IFF_ALLMULTI)) {
2504 rx_mode = RxFilterEnable | AcceptBroadcast
2505 | AcceptAllMulticast | AcceptMyPhys;
2506 } else {
2507 struct netdev_hw_addr *ha;
2508 int i;
2509
2510 memset(mc_filter, 0, sizeof(mc_filter));
2511 netdev_for_each_mc_addr(ha, dev) {
2512 int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2513 mc_filter[b/8] |= (1 << (b & 0x07));
2514 }
2515 rx_mode = RxFilterEnable | AcceptBroadcast
2516 | AcceptMulticast | AcceptMyPhys;
2517 for (i = 0; i < 64; i += 2) {
2518 writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2519 writel((mc_filter[i + 1] << 8) + mc_filter[i],
2520 ioaddr + RxFilterData);
2521 }
2522 }
2523 writel(rx_mode, ioaddr + RxFilterAddr);
2524 np->cur_rx_mode = rx_mode;
2525 }
2526
2527 static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2528 {
2529 dev->mtu = new_mtu;
2530
2531
2532 if (netif_running(dev)) {
2533 struct netdev_private *np = netdev_priv(dev);
2534 void __iomem * ioaddr = ns_ioaddr(dev);
2535 const int irq = np->pci_dev->irq;
2536
2537 disable_irq(irq);
2538 spin_lock(&np->lock);
2539
2540 natsemi_stop_rxtx(dev);
2541
2542 drain_rx(dev);
2543
2544 set_bufsize(dev);
2545 reinit_rx(dev);
2546 writel(np->ring_dma, ioaddr + RxRingPtr);
2547
2548 writel(RxOn | TxOn, ioaddr + ChipCmd);
2549 spin_unlock(&np->lock);
2550 enable_irq(irq);
2551 }
2552 return 0;
2553 }
2554
2555 static void set_rx_mode(struct net_device *dev)
2556 {
2557 struct netdev_private *np = netdev_priv(dev);
2558 spin_lock_irq(&np->lock);
2559 if (!np->hands_off)
2560 __set_rx_mode(dev);
2561 spin_unlock_irq(&np->lock);
2562 }
2563
2564 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2565 {
2566 struct netdev_private *np = netdev_priv(dev);
2567 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2568 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2569 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2570 }
2571
2572 static int get_regs_len(struct net_device *dev)
2573 {
2574 return NATSEMI_REGS_SIZE;
2575 }
2576
2577 static int get_eeprom_len(struct net_device *dev)
2578 {
2579 struct netdev_private *np = netdev_priv(dev);
2580 return np->eeprom_size;
2581 }
2582
2583 static int get_link_ksettings(struct net_device *dev,
2584 struct ethtool_link_ksettings *ecmd)
2585 {
2586 struct netdev_private *np = netdev_priv(dev);
2587 spin_lock_irq(&np->lock);
2588 netdev_get_ecmd(dev, ecmd);
2589 spin_unlock_irq(&np->lock);
2590 return 0;
2591 }
2592
2593 static int set_link_ksettings(struct net_device *dev,
2594 const struct ethtool_link_ksettings *ecmd)
2595 {
2596 struct netdev_private *np = netdev_priv(dev);
2597 int res;
2598 spin_lock_irq(&np->lock);
2599 res = netdev_set_ecmd(dev, ecmd);
2600 spin_unlock_irq(&np->lock);
2601 return res;
2602 }
2603
2604 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2605 {
2606 struct netdev_private *np = netdev_priv(dev);
2607 spin_lock_irq(&np->lock);
2608 netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2609 netdev_get_sopass(dev, wol->sopass);
2610 spin_unlock_irq(&np->lock);
2611 }
2612
2613 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2614 {
2615 struct netdev_private *np = netdev_priv(dev);
2616 int res;
2617 spin_lock_irq(&np->lock);
2618 netdev_set_wol(dev, wol->wolopts);
2619 res = netdev_set_sopass(dev, wol->sopass);
2620 spin_unlock_irq(&np->lock);
2621 return res;
2622 }
2623
2624 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2625 {
2626 struct netdev_private *np = netdev_priv(dev);
2627 regs->version = NATSEMI_REGS_VER;
2628 spin_lock_irq(&np->lock);
2629 netdev_get_regs(dev, buf);
2630 spin_unlock_irq(&np->lock);
2631 }
2632
2633 static u32 get_msglevel(struct net_device *dev)
2634 {
2635 struct netdev_private *np = netdev_priv(dev);
2636 return np->msg_enable;
2637 }
2638
2639 static void set_msglevel(struct net_device *dev, u32 val)
2640 {
2641 struct netdev_private *np = netdev_priv(dev);
2642 np->msg_enable = val;
2643 }
2644
2645 static int nway_reset(struct net_device *dev)
2646 {
2647 int tmp;
2648 int r = -EINVAL;
2649
2650 tmp = mdio_read(dev, MII_BMCR);
2651 if (tmp & BMCR_ANENABLE) {
2652 tmp |= (BMCR_ANRESTART);
2653 mdio_write(dev, MII_BMCR, tmp);
2654 r = 0;
2655 }
2656 return r;
2657 }
2658
2659 static u32 get_link(struct net_device *dev)
2660 {
2661
2662 mdio_read(dev, MII_BMSR);
2663 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2664 }
2665
2666 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2667 {
2668 struct netdev_private *np = netdev_priv(dev);
2669 u8 *eebuf;
2670 int res;
2671
2672 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2673 if (!eebuf)
2674 return -ENOMEM;
2675
2676 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2677 spin_lock_irq(&np->lock);
2678 res = netdev_get_eeprom(dev, eebuf);
2679 spin_unlock_irq(&np->lock);
2680 if (!res)
2681 memcpy(data, eebuf+eeprom->offset, eeprom->len);
2682 kfree(eebuf);
2683 return res;
2684 }
2685
2686 static const struct ethtool_ops ethtool_ops = {
2687 .get_drvinfo = get_drvinfo,
2688 .get_regs_len = get_regs_len,
2689 .get_eeprom_len = get_eeprom_len,
2690 .get_wol = get_wol,
2691 .set_wol = set_wol,
2692 .get_regs = get_regs,
2693 .get_msglevel = get_msglevel,
2694 .set_msglevel = set_msglevel,
2695 .nway_reset = nway_reset,
2696 .get_link = get_link,
2697 .get_eeprom = get_eeprom,
2698 .get_link_ksettings = get_link_ksettings,
2699 .set_link_ksettings = set_link_ksettings,
2700 };
2701
2702 static int netdev_set_wol(struct net_device *dev, u32 newval)
2703 {
2704 struct netdev_private *np = netdev_priv(dev);
2705 void __iomem * ioaddr = ns_ioaddr(dev);
2706 u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2707
2708
2709 if (newval & WAKE_PHY)
2710 data |= WakePhy;
2711 if (newval & WAKE_UCAST)
2712 data |= WakeUnicast;
2713 if (newval & WAKE_MCAST)
2714 data |= WakeMulticast;
2715 if (newval & WAKE_BCAST)
2716 data |= WakeBroadcast;
2717 if (newval & WAKE_ARP)
2718 data |= WakeArp;
2719 if (newval & WAKE_MAGIC)
2720 data |= WakeMagic;
2721 if (np->srr >= SRR_DP83815_D) {
2722 if (newval & WAKE_MAGICSECURE) {
2723 data |= WakeMagicSecure;
2724 }
2725 }
2726
2727 writel(data, ioaddr + WOLCmd);
2728
2729 return 0;
2730 }
2731
2732 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2733 {
2734 struct netdev_private *np = netdev_priv(dev);
2735 void __iomem * ioaddr = ns_ioaddr(dev);
2736 u32 regval = readl(ioaddr + WOLCmd);
2737
2738 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2739 | WAKE_ARP | WAKE_MAGIC);
2740
2741 if (np->srr >= SRR_DP83815_D) {
2742
2743 *supported |= WAKE_MAGICSECURE;
2744 }
2745 *cur = 0;
2746
2747
2748 if (regval & WakePhy)
2749 *cur |= WAKE_PHY;
2750 if (regval & WakeUnicast)
2751 *cur |= WAKE_UCAST;
2752 if (regval & WakeMulticast)
2753 *cur |= WAKE_MCAST;
2754 if (regval & WakeBroadcast)
2755 *cur |= WAKE_BCAST;
2756 if (regval & WakeArp)
2757 *cur |= WAKE_ARP;
2758 if (regval & WakeMagic)
2759 *cur |= WAKE_MAGIC;
2760 if (regval & WakeMagicSecure) {
2761
2762 *cur |= WAKE_MAGICSECURE;
2763 }
2764
2765 return 0;
2766 }
2767
2768 static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2769 {
2770 struct netdev_private *np = netdev_priv(dev);
2771 void __iomem * ioaddr = ns_ioaddr(dev);
2772 u16 *sval = (u16 *)newval;
2773 u32 addr;
2774
2775 if (np->srr < SRR_DP83815_D) {
2776 return 0;
2777 }
2778
2779
2780 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2781 addr &= ~RxFilterEnable;
2782 writel(addr, ioaddr + RxFilterAddr);
2783
2784
2785 writel(addr | 0xa, ioaddr + RxFilterAddr);
2786 writew(sval[0], ioaddr + RxFilterData);
2787
2788 writel(addr | 0xc, ioaddr + RxFilterAddr);
2789 writew(sval[1], ioaddr + RxFilterData);
2790
2791 writel(addr | 0xe, ioaddr + RxFilterAddr);
2792 writew(sval[2], ioaddr + RxFilterData);
2793
2794
2795 writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2796
2797 return 0;
2798 }
2799
2800 static int netdev_get_sopass(struct net_device *dev, u8 *data)
2801 {
2802 struct netdev_private *np = netdev_priv(dev);
2803 void __iomem * ioaddr = ns_ioaddr(dev);
2804 u16 *sval = (u16 *)data;
2805 u32 addr;
2806
2807 if (np->srr < SRR_DP83815_D) {
2808 sval[0] = sval[1] = sval[2] = 0;
2809 return 0;
2810 }
2811
2812
2813 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2814
2815 writel(addr | 0xa, ioaddr + RxFilterAddr);
2816 sval[0] = readw(ioaddr + RxFilterData);
2817
2818 writel(addr | 0xc, ioaddr + RxFilterAddr);
2819 sval[1] = readw(ioaddr + RxFilterData);
2820
2821 writel(addr | 0xe, ioaddr + RxFilterAddr);
2822 sval[2] = readw(ioaddr + RxFilterData);
2823
2824 writel(addr, ioaddr + RxFilterAddr);
2825
2826 return 0;
2827 }
2828
2829 static int netdev_get_ecmd(struct net_device *dev,
2830 struct ethtool_link_ksettings *ecmd)
2831 {
2832 struct netdev_private *np = netdev_priv(dev);
2833 u32 supported, advertising;
2834 u32 tmp;
2835
2836 ecmd->base.port = dev->if_port;
2837 ecmd->base.speed = np->speed;
2838 ecmd->base.duplex = np->duplex;
2839 ecmd->base.autoneg = np->autoneg;
2840 advertising = 0;
2841
2842 if (np->advertising & ADVERTISE_10HALF)
2843 advertising |= ADVERTISED_10baseT_Half;
2844 if (np->advertising & ADVERTISE_10FULL)
2845 advertising |= ADVERTISED_10baseT_Full;
2846 if (np->advertising & ADVERTISE_100HALF)
2847 advertising |= ADVERTISED_100baseT_Half;
2848 if (np->advertising & ADVERTISE_100FULL)
2849 advertising |= ADVERTISED_100baseT_Full;
2850 supported = (SUPPORTED_Autoneg |
2851 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2852 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2853 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2854 ecmd->base.phy_address = np->phy_addr_external;
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874 switch (ecmd->base.port) {
2875 default:
2876 case PORT_TP:
2877 advertising |= ADVERTISED_TP;
2878 break;
2879 case PORT_MII:
2880 advertising |= ADVERTISED_MII;
2881 break;
2882 case PORT_FIBRE:
2883 advertising |= ADVERTISED_FIBRE;
2884 break;
2885 }
2886
2887
2888 if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2889 advertising |= ADVERTISED_Autoneg;
2890 tmp = mii_nway_result(
2891 np->advertising & mdio_read(dev, MII_LPA));
2892 if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2893 ecmd->base.speed = SPEED_100;
2894 else
2895 ecmd->base.speed = SPEED_10;
2896 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2897 ecmd->base.duplex = DUPLEX_FULL;
2898 else
2899 ecmd->base.duplex = DUPLEX_HALF;
2900 }
2901
2902
2903
2904 ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
2905 supported);
2906 ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
2907 advertising);
2908
2909 return 0;
2910 }
2911
2912 static int netdev_set_ecmd(struct net_device *dev,
2913 const struct ethtool_link_ksettings *ecmd)
2914 {
2915 struct netdev_private *np = netdev_priv(dev);
2916 u32 advertising;
2917
2918 ethtool_convert_link_mode_to_legacy_u32(&advertising,
2919 ecmd->link_modes.advertising);
2920
2921 if (ecmd->base.port != PORT_TP &&
2922 ecmd->base.port != PORT_MII &&
2923 ecmd->base.port != PORT_FIBRE)
2924 return -EINVAL;
2925 if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2926 if ((advertising & (ADVERTISED_10baseT_Half |
2927 ADVERTISED_10baseT_Full |
2928 ADVERTISED_100baseT_Half |
2929 ADVERTISED_100baseT_Full)) == 0) {
2930 return -EINVAL;
2931 }
2932 } else if (ecmd->base.autoneg == AUTONEG_DISABLE) {
2933 u32 speed = ecmd->base.speed;
2934 if (speed != SPEED_10 && speed != SPEED_100)
2935 return -EINVAL;
2936 if (ecmd->base.duplex != DUPLEX_HALF &&
2937 ecmd->base.duplex != DUPLEX_FULL)
2938 return -EINVAL;
2939 } else {
2940 return -EINVAL;
2941 }
2942
2943
2944
2945
2946
2947
2948 if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE ||
2949 ecmd->base.port == PORT_TP))
2950 return -EINVAL;
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968 dev->if_port = ecmd->base.port;
2969 np->autoneg = ecmd->base.autoneg;
2970 np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask;
2971 if (np->autoneg == AUTONEG_ENABLE) {
2972
2973 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2974 if (advertising & ADVERTISED_10baseT_Half)
2975 np->advertising |= ADVERTISE_10HALF;
2976 if (advertising & ADVERTISED_10baseT_Full)
2977 np->advertising |= ADVERTISE_10FULL;
2978 if (advertising & ADVERTISED_100baseT_Half)
2979 np->advertising |= ADVERTISE_100HALF;
2980 if (advertising & ADVERTISED_100baseT_Full)
2981 np->advertising |= ADVERTISE_100FULL;
2982 } else {
2983 np->speed = ecmd->base.speed;
2984 np->duplex = ecmd->base.duplex;
2985
2986 if (np->duplex == DUPLEX_HALF)
2987 np->full_duplex = 0;
2988 }
2989
2990
2991 if (ecmd->base.port == PORT_TP)
2992 switch_port_internal(dev);
2993 else
2994 switch_port_external(dev);
2995
2996
2997 init_phy_fixup(dev);
2998 check_link(dev);
2999 return 0;
3000 }
3001
3002 static int netdev_get_regs(struct net_device *dev, u8 *buf)
3003 {
3004 int i;
3005 int j;
3006 u32 rfcr;
3007 u32 *rbuf = (u32 *)buf;
3008 void __iomem * ioaddr = ns_ioaddr(dev);
3009
3010
3011 for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
3012 rbuf[i] = readl(ioaddr + i*4);
3013 }
3014
3015
3016 for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3017 rbuf[i] = mdio_read(dev, i & 0x1f);
3018
3019
3020 writew(1, ioaddr + PGSEL);
3021 rbuf[i++] = readw(ioaddr + PMDCSR);
3022 rbuf[i++] = readw(ioaddr + TSTDAT);
3023 rbuf[i++] = readw(ioaddr + DSPCFG);
3024 rbuf[i++] = readw(ioaddr + SDCFG);
3025 writew(0, ioaddr + PGSEL);
3026
3027
3028 rfcr = readl(ioaddr + RxFilterAddr);
3029 for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3030 writel(j*2, ioaddr + RxFilterAddr);
3031 rbuf[i++] = readw(ioaddr + RxFilterData);
3032 }
3033 writel(rfcr, ioaddr + RxFilterAddr);
3034
3035
3036 if (rbuf[4] & rbuf[5]) {
3037 printk(KERN_WARNING
3038 "%s: shoot, we dropped an interrupt (%#08x)\n",
3039 dev->name, rbuf[4] & rbuf[5]);
3040 }
3041
3042 return 0;
3043 }
3044
3045 #define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3046 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
3047 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
3048 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
3049 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
3050 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
3051 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
3052 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3053
3054 static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3055 {
3056 int i;
3057 u16 *ebuf = (u16 *)buf;
3058 void __iomem * ioaddr = ns_ioaddr(dev);
3059 struct netdev_private *np = netdev_priv(dev);
3060
3061
3062 for (i = 0; i < np->eeprom_size/2; i++) {
3063 ebuf[i] = eeprom_read(ioaddr, i);
3064
3065
3066
3067 ebuf[i] = SWAP_BITS(ebuf[i]);
3068 }
3069 return 0;
3070 }
3071
3072 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3073 {
3074 struct mii_ioctl_data *data = if_mii(rq);
3075 struct netdev_private *np = netdev_priv(dev);
3076
3077 switch(cmd) {
3078 case SIOCGMIIPHY:
3079 data->phy_id = np->phy_addr_external;
3080 fallthrough;
3081
3082 case SIOCGMIIREG:
3083
3084
3085
3086
3087 if (dev->if_port == PORT_TP) {
3088 if ((data->phy_id & 0x1f) == np->phy_addr_external)
3089 data->val_out = mdio_read(dev,
3090 data->reg_num & 0x1f);
3091 else
3092 data->val_out = 0;
3093 } else {
3094 move_int_phy(dev, data->phy_id & 0x1f);
3095 data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3096 data->reg_num & 0x1f);
3097 }
3098 return 0;
3099
3100 case SIOCSMIIREG:
3101 if (dev->if_port == PORT_TP) {
3102 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3103 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3104 np->advertising = data->val_in;
3105 mdio_write(dev, data->reg_num & 0x1f,
3106 data->val_in);
3107 }
3108 } else {
3109 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3110 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3111 np->advertising = data->val_in;
3112 }
3113 move_int_phy(dev, data->phy_id & 0x1f);
3114 miiport_write(dev, data->phy_id & 0x1f,
3115 data->reg_num & 0x1f,
3116 data->val_in);
3117 }
3118 return 0;
3119 default:
3120 return -EOPNOTSUPP;
3121 }
3122 }
3123
3124 static void enable_wol_mode(struct net_device *dev, int enable_intr)
3125 {
3126 void __iomem * ioaddr = ns_ioaddr(dev);
3127 struct netdev_private *np = netdev_priv(dev);
3128
3129 if (netif_msg_wol(np))
3130 printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3131 dev->name);
3132
3133
3134
3135
3136
3137 writel(0, ioaddr + RxRingPtr);
3138
3139
3140 readl(ioaddr + WOLCmd);
3141
3142
3143 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3144
3145
3146 writel(RxOn, ioaddr + ChipCmd);
3147
3148 if (enable_intr) {
3149
3150
3151
3152 writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3153 natsemi_irq_enable(dev);
3154 }
3155 }
3156
3157 static int netdev_close(struct net_device *dev)
3158 {
3159 void __iomem * ioaddr = ns_ioaddr(dev);
3160 struct netdev_private *np = netdev_priv(dev);
3161 const int irq = np->pci_dev->irq;
3162
3163 if (netif_msg_ifdown(np))
3164 printk(KERN_DEBUG
3165 "%s: Shutting down ethercard, status was %#04x.\n",
3166 dev->name, (int)readl(ioaddr + ChipCmd));
3167 if (netif_msg_pktdata(np))
3168 printk(KERN_DEBUG
3169 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3170 dev->name, np->cur_tx, np->dirty_tx,
3171 np->cur_rx, np->dirty_rx);
3172
3173 napi_disable(&np->napi);
3174
3175
3176
3177
3178
3179
3180
3181
3182 del_timer_sync(&np->timer);
3183 disable_irq(irq);
3184 spin_lock_irq(&np->lock);
3185 natsemi_irq_disable(dev);
3186 np->hands_off = 1;
3187 spin_unlock_irq(&np->lock);
3188 enable_irq(irq);
3189
3190 free_irq(irq, dev);
3191
3192
3193
3194
3195
3196 spin_lock_irq(&np->lock);
3197 np->hands_off = 0;
3198 readl(ioaddr + IntrMask);
3199 readw(ioaddr + MIntrStatus);
3200
3201
3202 writel(StatsFreeze, ioaddr + StatsCtrl);
3203
3204
3205 natsemi_stop_rxtx(dev);
3206
3207 __get_stats(dev);
3208 spin_unlock_irq(&np->lock);
3209
3210
3211 netif_carrier_off(dev);
3212 netif_stop_queue(dev);
3213
3214 dump_ring(dev);
3215 drain_ring(dev);
3216 free_ring(dev);
3217
3218 {
3219 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3220 if (wol) {
3221
3222
3223
3224 enable_wol_mode(dev, 0);
3225 } else {
3226
3227 writel(np->SavedClkRun, ioaddr + ClkRun);
3228 }
3229 }
3230 return 0;
3231 }
3232
3233
3234 static void natsemi_remove1(struct pci_dev *pdev)
3235 {
3236 struct net_device *dev = pci_get_drvdata(pdev);
3237 void __iomem * ioaddr = ns_ioaddr(dev);
3238
3239 NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3240 unregister_netdev (dev);
3241 iounmap(ioaddr);
3242 free_netdev (dev);
3243 }
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271 static int __maybe_unused natsemi_suspend(struct device *dev_d)
3272 {
3273 struct net_device *dev = dev_get_drvdata(dev_d);
3274 struct netdev_private *np = netdev_priv(dev);
3275 void __iomem * ioaddr = ns_ioaddr(dev);
3276
3277 rtnl_lock();
3278 if (netif_running (dev)) {
3279 const int irq = np->pci_dev->irq;
3280
3281 del_timer_sync(&np->timer);
3282
3283 disable_irq(irq);
3284 spin_lock_irq(&np->lock);
3285
3286 natsemi_irq_disable(dev);
3287 np->hands_off = 1;
3288 natsemi_stop_rxtx(dev);
3289 netif_stop_queue(dev);
3290
3291 spin_unlock_irq(&np->lock);
3292 enable_irq(irq);
3293
3294 napi_disable(&np->napi);
3295
3296
3297 __get_stats(dev);
3298
3299
3300 drain_ring(dev);
3301 {
3302 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3303
3304 if (wol) {
3305
3306
3307
3308
3309 enable_wol_mode(dev, 0);
3310 } else {
3311
3312 writel(np->SavedClkRun, ioaddr + ClkRun);
3313 }
3314 }
3315 }
3316 netif_device_detach(dev);
3317 rtnl_unlock();
3318 return 0;
3319 }
3320
3321
3322 static int __maybe_unused natsemi_resume(struct device *dev_d)
3323 {
3324 struct net_device *dev = dev_get_drvdata(dev_d);
3325 struct netdev_private *np = netdev_priv(dev);
3326
3327 rtnl_lock();
3328 if (netif_device_present(dev))
3329 goto out;
3330 if (netif_running(dev)) {
3331 const int irq = np->pci_dev->irq;
3332
3333 BUG_ON(!np->hands_off);
3334
3335
3336 napi_enable(&np->napi);
3337
3338 natsemi_reset(dev);
3339 init_ring(dev);
3340 disable_irq(irq);
3341 spin_lock_irq(&np->lock);
3342 np->hands_off = 0;
3343 init_registers(dev);
3344 netif_device_attach(dev);
3345 spin_unlock_irq(&np->lock);
3346 enable_irq(irq);
3347
3348 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3349 }
3350 netif_device_attach(dev);
3351 out:
3352 rtnl_unlock();
3353 return 0;
3354 }
3355
3356 static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume);
3357
3358 static struct pci_driver natsemi_driver = {
3359 .name = DRV_NAME,
3360 .id_table = natsemi_pci_tbl,
3361 .probe = natsemi_probe1,
3362 .remove = natsemi_remove1,
3363 .driver.pm = &natsemi_pm_ops,
3364 };
3365
3366 static int __init natsemi_init_mod (void)
3367 {
3368
3369 #ifdef MODULE
3370 printk(version);
3371 #endif
3372
3373 return pci_register_driver(&natsemi_driver);
3374 }
3375
3376 static void __exit natsemi_exit_mod (void)
3377 {
3378 pci_unregister_driver (&natsemi_driver);
3379 }
3380
3381 module_init(natsemi_init_mod);
3382 module_exit(natsemi_exit_mod);
3383