0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 #include <linux/module.h>
0053 #include <linux/moduleparam.h>
0054 #include <linux/kernel.h>
0055 #include <linux/sched.h>
0056 #include <linux/string.h>
0057 #include <linux/timer.h>
0058 #include <linux/errno.h>
0059 #include <linux/ioport.h>
0060 #include <linux/slab.h>
0061 #include <linux/interrupt.h>
0062 #include <linux/pci.h>
0063 #include <linux/netdevice.h>
0064 #include <linux/init.h>
0065 #include <linux/mii.h>
0066 #include <linux/etherdevice.h>
0067 #include <linux/skbuff.h>
0068 #include <linux/delay.h>
0069 #include <linux/ethtool.h>
0070 #include <linux/crc32.h>
0071 #include <linux/bitops.h>
0072 #include <linux/dma-mapping.h>
0073
0074 #include <asm/processor.h> /* Processor type for cache alignment. */
0075 #include <asm/io.h>
0076 #include <asm/irq.h>
0077 #include <linux/uaccess.h> /* User space memory access functions */
0078
0079 #include "sis900.h"
0080
0081 #define SIS900_MODULE_NAME "sis900"
0082 #define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
0083
0084 static const char version[] =
0085 KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
0086
0087 static int max_interrupt_work = 40;
0088 static int multicast_filter_limit = 128;
0089
0090 static int sis900_debug = -1;
0091
0092 #define SIS900_DEF_MSG \
0093 (NETIF_MSG_DRV | \
0094 NETIF_MSG_LINK | \
0095 NETIF_MSG_RX_ERR | \
0096 NETIF_MSG_TX_ERR)
0097
0098
0099 #define TX_TIMEOUT (4*HZ)
0100
0101 enum {
0102 SIS_900 = 0,
0103 SIS_7016
0104 };
0105 static const char * card_names[] = {
0106 "SiS 900 PCI Fast Ethernet",
0107 "SiS 7016 PCI Fast Ethernet"
0108 };
0109
0110 static const struct pci_device_id sis900_pci_tbl[] = {
0111 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
0112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
0113 {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
0114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_7016},
0115 {0,}
0116 };
0117 MODULE_DEVICE_TABLE (pci, sis900_pci_tbl);
0118
0119 static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex);
0120
0121 static const struct mii_chip_info {
0122 const char * name;
0123 u16 phy_id0;
0124 u16 phy_id1;
0125 u8 phy_types;
0126 #define HOME 0x0001
0127 #define LAN 0x0002
0128 #define MIX 0x0003
0129 #define UNKNOWN 0x0
0130 } mii_chip_table[] = {
0131 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
0132 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
0133 { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
0134 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
0135 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
0136 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
0137 { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
0138 { "ICS LAN PHY", 0x0015, 0xF440, LAN },
0139 { "ICS LAN PHY", 0x0143, 0xBC70, LAN },
0140 { "NS 83851 PHY", 0x2000, 0x5C20, MIX },
0141 { "NS 83847 PHY", 0x2000, 0x5C30, MIX },
0142 { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
0143 { "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
0144 {NULL,},
0145 };
0146
0147 struct mii_phy {
0148 struct mii_phy * next;
0149 int phy_addr;
0150 u16 phy_id0;
0151 u16 phy_id1;
0152 u16 status;
0153 u8 phy_types;
0154 };
0155
0156 typedef struct _BufferDesc {
0157 u32 link;
0158 u32 cmdsts;
0159 u32 bufptr;
0160 } BufferDesc;
0161
0162 struct sis900_private {
0163 struct pci_dev * pci_dev;
0164
0165 spinlock_t lock;
0166
0167 struct mii_phy * mii;
0168 struct mii_phy * first_mii;
0169 unsigned int cur_phy;
0170 struct mii_if_info mii_info;
0171
0172 void __iomem *ioaddr;
0173
0174 struct timer_list timer;
0175 u8 autong_complete;
0176
0177 u32 msg_enable;
0178
0179 unsigned int cur_rx, dirty_rx;
0180 unsigned int cur_tx, dirty_tx;
0181
0182
0183 struct sk_buff *tx_skbuff[NUM_TX_DESC];
0184 struct sk_buff *rx_skbuff[NUM_RX_DESC];
0185 BufferDesc *tx_ring;
0186 BufferDesc *rx_ring;
0187
0188 dma_addr_t tx_ring_dma;
0189 dma_addr_t rx_ring_dma;
0190
0191 unsigned int tx_full;
0192 u8 host_bridge_rev;
0193 u8 chipset_rev;
0194
0195 int eeprom_size;
0196 };
0197
0198 MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
0199 MODULE_DESCRIPTION("SiS 900 PCI Fast Ethernet driver");
0200 MODULE_LICENSE("GPL");
0201
0202 module_param(multicast_filter_limit, int, 0444);
0203 module_param(max_interrupt_work, int, 0444);
0204 module_param(sis900_debug, int, 0444);
0205 MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtered multicast addresses");
0206 MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
0207 MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
0208
0209 #define sw32(reg, val) iowrite32(val, ioaddr + (reg))
0210 #define sw8(reg, val) iowrite8(val, ioaddr + (reg))
0211 #define sr32(reg) ioread32(ioaddr + (reg))
0212 #define sr16(reg) ioread16(ioaddr + (reg))
0213
0214 #ifdef CONFIG_NET_POLL_CONTROLLER
0215 static void sis900_poll(struct net_device *dev);
0216 #endif
0217 static int sis900_open(struct net_device *net_dev);
0218 static int sis900_mii_probe (struct net_device * net_dev);
0219 static void sis900_init_rxfilter (struct net_device * net_dev);
0220 static u16 read_eeprom(void __iomem *ioaddr, int location);
0221 static int mdio_read(struct net_device *net_dev, int phy_id, int location);
0222 static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
0223 static void sis900_timer(struct timer_list *t);
0224 static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
0225 static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue);
0226 static void sis900_init_tx_ring(struct net_device *net_dev);
0227 static void sis900_init_rx_ring(struct net_device *net_dev);
0228 static netdev_tx_t sis900_start_xmit(struct sk_buff *skb,
0229 struct net_device *net_dev);
0230 static int sis900_rx(struct net_device *net_dev);
0231 static void sis900_finish_xmit (struct net_device *net_dev);
0232 static irqreturn_t sis900_interrupt(int irq, void *dev_instance);
0233 static int sis900_close(struct net_device *net_dev);
0234 static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd);
0235 static u16 sis900_mcast_bitnr(u8 *addr, u8 revision);
0236 static void set_rx_mode(struct net_device *net_dev);
0237 static void sis900_reset(struct net_device *net_dev);
0238 static void sis630_set_eq(struct net_device *net_dev, u8 revision);
0239 static int sis900_set_config(struct net_device *dev, struct ifmap *map);
0240 static u16 sis900_default_phy(struct net_device * net_dev);
0241 static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
0242 static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
0243 static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
0244 static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
0245 static const struct ethtool_ops sis900_ethtool_ops;
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 static int sis900_get_mac_addr(struct pci_dev *pci_dev,
0257 struct net_device *net_dev)
0258 {
0259 struct sis900_private *sis_priv = netdev_priv(net_dev);
0260 void __iomem *ioaddr = sis_priv->ioaddr;
0261 u16 addr[ETH_ALEN / 2];
0262 u16 signature;
0263 int i;
0264
0265
0266 signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
0267 if (signature == 0xffff || signature == 0x0000) {
0268 printk (KERN_WARNING "%s: Error EEPROM read %x\n",
0269 pci_name(pci_dev), signature);
0270 return 0;
0271 }
0272
0273
0274 for (i = 0; i < 3; i++)
0275 addr[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
0276 eth_hw_addr_set(net_dev, (u8 *)addr);
0277
0278 return 1;
0279 }
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
0292 struct net_device *net_dev)
0293 {
0294 struct pci_dev *isa_bridge = NULL;
0295 u8 addr[ETH_ALEN];
0296 u8 reg;
0297 int i;
0298
0299 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0008, isa_bridge);
0300 if (!isa_bridge)
0301 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0018, isa_bridge);
0302 if (!isa_bridge) {
0303 printk(KERN_WARNING "%s: Can not find ISA bridge\n",
0304 pci_name(pci_dev));
0305 return 0;
0306 }
0307 pci_read_config_byte(isa_bridge, 0x48, ®);
0308 pci_write_config_byte(isa_bridge, 0x48, reg | 0x40);
0309
0310 for (i = 0; i < 6; i++) {
0311 outb(0x09 + i, 0x70);
0312 addr[i] = inb(0x71);
0313 }
0314 eth_hw_addr_set(net_dev, addr);
0315
0316 pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
0317 pci_dev_put(isa_bridge);
0318
0319 return 1;
0320 }
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 static int sis635_get_mac_addr(struct pci_dev *pci_dev,
0334 struct net_device *net_dev)
0335 {
0336 struct sis900_private *sis_priv = netdev_priv(net_dev);
0337 void __iomem *ioaddr = sis_priv->ioaddr;
0338 u16 addr[ETH_ALEN / 2];
0339 u32 rfcrSave;
0340 u32 i;
0341
0342 rfcrSave = sr32(rfcr);
0343
0344 sw32(cr, rfcrSave | RELOAD);
0345 sw32(cr, 0);
0346
0347
0348 sw32(rfcr, rfcrSave & ~RFEN);
0349
0350
0351 for (i = 0 ; i < 3 ; i++) {
0352 sw32(rfcr, (i << RFADDR_shift));
0353 addr[i] = sr16(rfdr);
0354 }
0355 eth_hw_addr_set(net_dev, (u8 *)addr);
0356
0357
0358 sw32(rfcr, rfcrSave | RFEN);
0359
0360 return 1;
0361 }
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379 static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
0380 struct net_device *net_dev)
0381 {
0382 struct sis900_private *sis_priv = netdev_priv(net_dev);
0383 void __iomem *ioaddr = sis_priv->ioaddr;
0384 u16 addr[ETH_ALEN / 2];
0385 int wait, rc = 0;
0386
0387 sw32(mear, EEREQ);
0388 for (wait = 0; wait < 2000; wait++) {
0389 if (sr32(mear) & EEGNT) {
0390 int i;
0391
0392
0393 for (i = 0; i < 3; i++)
0394 addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
0395 eth_hw_addr_set(net_dev, (u8 *)addr);
0396
0397 rc = 1;
0398 break;
0399 }
0400 udelay(1);
0401 }
0402 sw32(mear, EEDONE);
0403 return rc;
0404 }
0405
0406 static const struct net_device_ops sis900_netdev_ops = {
0407 .ndo_open = sis900_open,
0408 .ndo_stop = sis900_close,
0409 .ndo_start_xmit = sis900_start_xmit,
0410 .ndo_set_config = sis900_set_config,
0411 .ndo_set_rx_mode = set_rx_mode,
0412 .ndo_validate_addr = eth_validate_addr,
0413 .ndo_set_mac_address = eth_mac_addr,
0414 .ndo_eth_ioctl = mii_ioctl,
0415 .ndo_tx_timeout = sis900_tx_timeout,
0416 #ifdef CONFIG_NET_POLL_CONTROLLER
0417 .ndo_poll_controller = sis900_poll,
0418 #endif
0419 };
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432 static int sis900_probe(struct pci_dev *pci_dev,
0433 const struct pci_device_id *pci_id)
0434 {
0435 struct sis900_private *sis_priv;
0436 struct net_device *net_dev;
0437 struct pci_dev *dev;
0438 dma_addr_t ring_dma;
0439 void *ring_space;
0440 void __iomem *ioaddr;
0441 int i, ret;
0442 const char *card_name = card_names[pci_id->driver_data];
0443 const char *dev_name = pci_name(pci_dev);
0444
0445
0446 #ifndef MODULE
0447 static int printed_version;
0448 if (!printed_version++)
0449 printk(version);
0450 #endif
0451
0452
0453 ret = pcim_enable_device(pci_dev);
0454 if(ret) return ret;
0455
0456 i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
0457 if(i){
0458 printk(KERN_ERR "sis900.c: architecture does not support "
0459 "32bit PCI busmaster DMA\n");
0460 return i;
0461 }
0462
0463 pci_set_master(pci_dev);
0464
0465 net_dev = alloc_etherdev(sizeof(struct sis900_private));
0466 if (!net_dev)
0467 return -ENOMEM;
0468 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
0469
0470
0471 ret = pci_request_regions(pci_dev, "sis900");
0472 if (ret)
0473 goto err_out;
0474
0475
0476 ioaddr = pci_iomap(pci_dev, 0, 0);
0477 if (!ioaddr) {
0478 ret = -ENOMEM;
0479 goto err_out;
0480 }
0481
0482 sis_priv = netdev_priv(net_dev);
0483 sis_priv->ioaddr = ioaddr;
0484 sis_priv->pci_dev = pci_dev;
0485 spin_lock_init(&sis_priv->lock);
0486
0487 sis_priv->eeprom_size = 24;
0488
0489 pci_set_drvdata(pci_dev, net_dev);
0490
0491 ring_space = dma_alloc_coherent(&pci_dev->dev, TX_TOTAL_SIZE,
0492 &ring_dma, GFP_KERNEL);
0493 if (!ring_space) {
0494 ret = -ENOMEM;
0495 goto err_out_unmap;
0496 }
0497 sis_priv->tx_ring = ring_space;
0498 sis_priv->tx_ring_dma = ring_dma;
0499
0500 ring_space = dma_alloc_coherent(&pci_dev->dev, RX_TOTAL_SIZE,
0501 &ring_dma, GFP_KERNEL);
0502 if (!ring_space) {
0503 ret = -ENOMEM;
0504 goto err_unmap_tx;
0505 }
0506 sis_priv->rx_ring = ring_space;
0507 sis_priv->rx_ring_dma = ring_dma;
0508
0509
0510 net_dev->netdev_ops = &sis900_netdev_ops;
0511 net_dev->watchdog_timeo = TX_TIMEOUT;
0512 net_dev->ethtool_ops = &sis900_ethtool_ops;
0513
0514 if (sis900_debug > 0)
0515 sis_priv->msg_enable = sis900_debug;
0516 else
0517 sis_priv->msg_enable = SIS900_DEF_MSG;
0518
0519 sis_priv->mii_info.dev = net_dev;
0520 sis_priv->mii_info.mdio_read = mdio_read;
0521 sis_priv->mii_info.mdio_write = mdio_write;
0522 sis_priv->mii_info.phy_id_mask = 0x1f;
0523 sis_priv->mii_info.reg_num_mask = 0x1f;
0524
0525
0526 sis_priv->chipset_rev = pci_dev->revision;
0527 if(netif_msg_probe(sis_priv))
0528 printk(KERN_DEBUG "%s: detected revision %2.2x, "
0529 "trying to get MAC address...\n",
0530 dev_name, sis_priv->chipset_rev);
0531
0532 ret = 0;
0533 if (sis_priv->chipset_rev == SIS630E_900_REV)
0534 ret = sis630e_get_mac_addr(pci_dev, net_dev);
0535 else if ((sis_priv->chipset_rev > 0x81) && (sis_priv->chipset_rev <= 0x90) )
0536 ret = sis635_get_mac_addr(pci_dev, net_dev);
0537 else if (sis_priv->chipset_rev == SIS96x_900_REV)
0538 ret = sis96x_get_mac_addr(pci_dev, net_dev);
0539 else
0540 ret = sis900_get_mac_addr(pci_dev, net_dev);
0541
0542 if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) {
0543 eth_hw_addr_random(net_dev);
0544 printk(KERN_WARNING "%s: Unreadable or invalid MAC address,"
0545 "using random generated one\n", dev_name);
0546 }
0547
0548
0549 if (sis_priv->chipset_rev == SIS630ET_900_REV)
0550 sw32(cr, ACCESSMODE | sr32(cr));
0551
0552
0553 if (sis900_mii_probe(net_dev) == 0) {
0554 printk(KERN_WARNING "%s: Error probing MII device.\n",
0555 dev_name);
0556 ret = -ENODEV;
0557 goto err_unmap_rx;
0558 }
0559
0560
0561 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
0562 if (dev) {
0563 sis_priv->host_bridge_rev = dev->revision;
0564 pci_dev_put(dev);
0565 }
0566
0567 ret = register_netdev(net_dev);
0568 if (ret)
0569 goto err_unmap_rx;
0570
0571
0572 printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
0573 net_dev->name, card_name, ioaddr, pci_dev->irq,
0574 net_dev->dev_addr);
0575
0576
0577 ret = (sr32(CFGPMC) & PMESP) >> 27;
0578 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
0579 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
0580
0581 return 0;
0582
0583 err_unmap_rx:
0584 dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
0585 sis_priv->rx_ring_dma);
0586 err_unmap_tx:
0587 dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
0588 sis_priv->tx_ring_dma);
0589 err_out_unmap:
0590 pci_iounmap(pci_dev, ioaddr);
0591 err_out:
0592 free_netdev(net_dev);
0593 return ret;
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 static int sis900_mii_probe(struct net_device *net_dev)
0606 {
0607 struct sis900_private *sis_priv = netdev_priv(net_dev);
0608 const char *dev_name = pci_name(sis_priv->pci_dev);
0609 u16 poll_bit = MII_STAT_LINK, status = 0;
0610 unsigned long timeout = jiffies + 5 * HZ;
0611 int phy_addr;
0612
0613 sis_priv->mii = NULL;
0614
0615
0616 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
0617 struct mii_phy * mii_phy = NULL;
0618 u16 mii_status;
0619 int i;
0620
0621 mii_phy = NULL;
0622 for(i = 0; i < 2; i++)
0623 mii_status = mdio_read(net_dev, phy_addr, MII_STATUS);
0624
0625 if (mii_status == 0xffff || mii_status == 0x0000) {
0626 if (netif_msg_probe(sis_priv))
0627 printk(KERN_DEBUG "%s: MII at address %d"
0628 " not accessible\n",
0629 dev_name, phy_addr);
0630 continue;
0631 }
0632
0633 if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
0634 mii_phy = sis_priv->first_mii;
0635 while (mii_phy) {
0636 struct mii_phy *phy;
0637 phy = mii_phy;
0638 mii_phy = mii_phy->next;
0639 kfree(phy);
0640 }
0641 return 0;
0642 }
0643
0644 mii_phy->phy_id0 = mdio_read(net_dev, phy_addr, MII_PHY_ID0);
0645 mii_phy->phy_id1 = mdio_read(net_dev, phy_addr, MII_PHY_ID1);
0646 mii_phy->phy_addr = phy_addr;
0647 mii_phy->status = mii_status;
0648 mii_phy->next = sis_priv->mii;
0649 sis_priv->mii = mii_phy;
0650 sis_priv->first_mii = mii_phy;
0651
0652 for (i = 0; mii_chip_table[i].phy_id1; i++)
0653 if ((mii_phy->phy_id0 == mii_chip_table[i].phy_id0 ) &&
0654 ((mii_phy->phy_id1 & 0xFFF0) == mii_chip_table[i].phy_id1)){
0655 mii_phy->phy_types = mii_chip_table[i].phy_types;
0656 if (mii_chip_table[i].phy_types == MIX)
0657 mii_phy->phy_types =
0658 (mii_status & (MII_STAT_CAN_TX_FDX | MII_STAT_CAN_TX)) ? LAN : HOME;
0659 printk(KERN_INFO "%s: %s transceiver found "
0660 "at address %d.\n",
0661 dev_name,
0662 mii_chip_table[i].name,
0663 phy_addr);
0664 break;
0665 }
0666
0667 if( !mii_chip_table[i].phy_id1 ) {
0668 printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n",
0669 dev_name, phy_addr);
0670 mii_phy->phy_types = UNKNOWN;
0671 }
0672 }
0673
0674 if (sis_priv->mii == NULL) {
0675 printk(KERN_INFO "%s: No MII transceivers found!\n", dev_name);
0676 return 0;
0677 }
0678
0679
0680 sis_priv->mii = NULL;
0681 sis900_default_phy( net_dev );
0682
0683
0684 if ((sis_priv->mii->phy_id0 == 0x001D) &&
0685 ((sis_priv->mii->phy_id1&0xFFF0) == 0x8000))
0686 status = sis900_reset_phy(net_dev, sis_priv->cur_phy);
0687
0688
0689 if ((sis_priv->mii->phy_id0 == 0x0015) &&
0690 ((sis_priv->mii->phy_id1&0xFFF0) == 0xF440))
0691 mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
0692
0693 if(status & MII_STAT_LINK){
0694 while (poll_bit) {
0695 yield();
0696
0697 poll_bit ^= (mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS) & poll_bit);
0698 if (time_after_eq(jiffies, timeout)) {
0699 printk(KERN_WARNING "%s: reset phy and link down now\n",
0700 dev_name);
0701 return -ETIME;
0702 }
0703 }
0704 }
0705
0706 if (sis_priv->chipset_rev == SIS630E_900_REV) {
0707
0708 mdio_write(net_dev, sis_priv->cur_phy, MII_ANADV, 0x05e1);
0709 mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG1, 0x22);
0710 mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG2, 0xff00);
0711 mdio_write(net_dev, sis_priv->cur_phy, MII_MASK, 0xffc0);
0712
0713 }
0714
0715 if (sis_priv->mii->status & MII_STAT_LINK)
0716 netif_carrier_on(net_dev);
0717 else
0718 netif_carrier_off(net_dev);
0719
0720 return 1;
0721 }
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 static u16 sis900_default_phy(struct net_device * net_dev)
0733 {
0734 struct sis900_private *sis_priv = netdev_priv(net_dev);
0735 struct mii_phy *phy = NULL, *phy_home = NULL,
0736 *default_phy = NULL, *phy_lan = NULL;
0737 u16 status;
0738
0739 for (phy=sis_priv->first_mii; phy; phy=phy->next) {
0740 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
0741 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
0742
0743
0744 if ((status & MII_STAT_LINK) && !default_phy &&
0745 (phy->phy_types != UNKNOWN)) {
0746 default_phy = phy;
0747 } else {
0748 status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
0749 mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
0750 status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
0751 if (phy->phy_types == HOME)
0752 phy_home = phy;
0753 else if(phy->phy_types == LAN)
0754 phy_lan = phy;
0755 }
0756 }
0757
0758 if (!default_phy && phy_home)
0759 default_phy = phy_home;
0760 else if (!default_phy && phy_lan)
0761 default_phy = phy_lan;
0762 else if (!default_phy)
0763 default_phy = sis_priv->first_mii;
0764
0765 if (sis_priv->mii != default_phy) {
0766 sis_priv->mii = default_phy;
0767 sis_priv->cur_phy = default_phy->phy_addr;
0768 printk(KERN_INFO "%s: Using transceiver found at address %d as default\n",
0769 pci_name(sis_priv->pci_dev), sis_priv->cur_phy);
0770 }
0771
0772 sis_priv->mii_info.phy_id = sis_priv->cur_phy;
0773
0774 status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
0775 status &= (~MII_CNTL_ISOLATE);
0776
0777 mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, status);
0778 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
0779 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
0780
0781 return status;
0782 }
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794 static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy)
0795 {
0796 u16 cap;
0797
0798 mdio_read(net_dev, phy->phy_addr, MII_STATUS);
0799 mdio_read(net_dev, phy->phy_addr, MII_STATUS);
0800
0801 cap = MII_NWAY_CSMA_CD |
0802 ((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
0803 ((phy->status & MII_STAT_CAN_TX) ? MII_NWAY_TX:0) |
0804 ((phy->status & MII_STAT_CAN_T_FDX) ? MII_NWAY_T_FDX:0)|
0805 ((phy->status & MII_STAT_CAN_T) ? MII_NWAY_T:0);
0806
0807 mdio_write(net_dev, phy->phy_addr, MII_ANADV, cap);
0808 }
0809
0810
0811
0812 #define eeprom_delay() sr32(mear)
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823 static u16 read_eeprom(void __iomem *ioaddr, int location)
0824 {
0825 u32 read_cmd = location | EEread;
0826 int i;
0827 u16 retval = 0;
0828
0829 sw32(mear, 0);
0830 eeprom_delay();
0831 sw32(mear, EECS);
0832 eeprom_delay();
0833
0834
0835 for (i = 8; i >= 0; i--) {
0836 u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
0837
0838 sw32(mear, dataval);
0839 eeprom_delay();
0840 sw32(mear, dataval | EECLK);
0841 eeprom_delay();
0842 }
0843 sw32(mear, EECS);
0844 eeprom_delay();
0845
0846
0847 for (i = 16; i > 0; i--) {
0848 sw32(mear, EECS);
0849 eeprom_delay();
0850 sw32(mear, EECS | EECLK);
0851 eeprom_delay();
0852 retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
0853 eeprom_delay();
0854 }
0855
0856
0857 sw32(mear, 0);
0858 eeprom_delay();
0859
0860 return retval;
0861 }
0862
0863
0864
0865
0866 #define mdio_delay() sr32(mear)
0867
0868 static void mdio_idle(struct sis900_private *sp)
0869 {
0870 void __iomem *ioaddr = sp->ioaddr;
0871
0872 sw32(mear, MDIO | MDDIR);
0873 mdio_delay();
0874 sw32(mear, MDIO | MDDIR | MDC);
0875 }
0876
0877
0878 static void mdio_reset(struct sis900_private *sp)
0879 {
0880 void __iomem *ioaddr = sp->ioaddr;
0881 int i;
0882
0883 for (i = 31; i >= 0; i--) {
0884 sw32(mear, MDDIR | MDIO);
0885 mdio_delay();
0886 sw32(mear, MDDIR | MDIO | MDC);
0887 mdio_delay();
0888 }
0889 }
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 static int mdio_read(struct net_device *net_dev, int phy_id, int location)
0903 {
0904 int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
0905 struct sis900_private *sp = netdev_priv(net_dev);
0906 void __iomem *ioaddr = sp->ioaddr;
0907 u16 retval = 0;
0908 int i;
0909
0910 mdio_reset(sp);
0911 mdio_idle(sp);
0912
0913 for (i = 15; i >= 0; i--) {
0914 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
0915
0916 sw32(mear, dataval);
0917 mdio_delay();
0918 sw32(mear, dataval | MDC);
0919 mdio_delay();
0920 }
0921
0922
0923 for (i = 16; i > 0; i--) {
0924 sw32(mear, 0);
0925 mdio_delay();
0926 retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
0927 sw32(mear, MDC);
0928 mdio_delay();
0929 }
0930 sw32(mear, 0x00);
0931
0932 return retval;
0933 }
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947 static void mdio_write(struct net_device *net_dev, int phy_id, int location,
0948 int value)
0949 {
0950 int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
0951 struct sis900_private *sp = netdev_priv(net_dev);
0952 void __iomem *ioaddr = sp->ioaddr;
0953 int i;
0954
0955 mdio_reset(sp);
0956 mdio_idle(sp);
0957
0958
0959 for (i = 15; i >= 0; i--) {
0960 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
0961
0962 sw8(mear, dataval);
0963 mdio_delay();
0964 sw8(mear, dataval | MDC);
0965 mdio_delay();
0966 }
0967 mdio_delay();
0968
0969
0970 for (i = 15; i >= 0; i--) {
0971 int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
0972
0973 sw32(mear, dataval);
0974 mdio_delay();
0975 sw32(mear, dataval | MDC);
0976 mdio_delay();
0977 }
0978 mdio_delay();
0979
0980
0981 for (i = 2; i > 0; i--) {
0982 sw8(mear, 0);
0983 mdio_delay();
0984 sw8(mear, MDC);
0985 mdio_delay();
0986 }
0987 sw32(mear, 0x00);
0988 }
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001 static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
1002 {
1003 int i;
1004 u16 status;
1005
1006 for (i = 0; i < 2; i++)
1007 status = mdio_read(net_dev, phy_addr, MII_STATUS);
1008
1009 mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
1010
1011 return status;
1012 }
1013
1014 #ifdef CONFIG_NET_POLL_CONTROLLER
1015
1016
1017
1018
1019
1020 static void sis900_poll(struct net_device *dev)
1021 {
1022 struct sis900_private *sp = netdev_priv(dev);
1023 const int irq = sp->pci_dev->irq;
1024
1025 disable_irq(irq);
1026 sis900_interrupt(irq, dev);
1027 enable_irq(irq);
1028 }
1029 #endif
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 static int
1040 sis900_open(struct net_device *net_dev)
1041 {
1042 struct sis900_private *sis_priv = netdev_priv(net_dev);
1043 void __iomem *ioaddr = sis_priv->ioaddr;
1044 int ret;
1045
1046
1047 sis900_reset(net_dev);
1048
1049
1050 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1051
1052 ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
1053 net_dev->name, net_dev);
1054 if (ret)
1055 return ret;
1056
1057 sis900_init_rxfilter(net_dev);
1058
1059 sis900_init_tx_ring(net_dev);
1060 sis900_init_rx_ring(net_dev);
1061
1062 set_rx_mode(net_dev);
1063
1064 netif_start_queue(net_dev);
1065
1066
1067 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
1068
1069
1070 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
1071 sw32(cr, RxENA | sr32(cr));
1072 sw32(ier, IE);
1073
1074 sis900_check_mode(net_dev, sis_priv->mii);
1075
1076
1077
1078 timer_setup(&sis_priv->timer, sis900_timer, 0);
1079 sis_priv->timer.expires = jiffies + HZ;
1080 add_timer(&sis_priv->timer);
1081
1082 return 0;
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093 static void
1094 sis900_init_rxfilter (struct net_device * net_dev)
1095 {
1096 struct sis900_private *sis_priv = netdev_priv(net_dev);
1097 void __iomem *ioaddr = sis_priv->ioaddr;
1098 u32 rfcrSave;
1099 u32 i;
1100
1101 rfcrSave = sr32(rfcr);
1102
1103
1104 sw32(rfcr, rfcrSave & ~RFEN);
1105
1106
1107 for (i = 0 ; i < 3 ; i++) {
1108 u32 w = (u32) *((const u16 *)(net_dev->dev_addr)+i);
1109
1110 sw32(rfcr, i << RFADDR_shift);
1111 sw32(rfdr, w);
1112
1113 if (netif_msg_hw(sis_priv)) {
1114 printk(KERN_DEBUG "%s: Receive Filter Address[%d]=%x\n",
1115 net_dev->name, i, sr32(rfdr));
1116 }
1117 }
1118
1119
1120 sw32(rfcr, rfcrSave | RFEN);
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130 static void
1131 sis900_init_tx_ring(struct net_device *net_dev)
1132 {
1133 struct sis900_private *sis_priv = netdev_priv(net_dev);
1134 void __iomem *ioaddr = sis_priv->ioaddr;
1135 int i;
1136
1137 sis_priv->tx_full = 0;
1138 sis_priv->dirty_tx = sis_priv->cur_tx = 0;
1139
1140 for (i = 0; i < NUM_TX_DESC; i++) {
1141 sis_priv->tx_skbuff[i] = NULL;
1142
1143 sis_priv->tx_ring[i].link = sis_priv->tx_ring_dma +
1144 ((i+1)%NUM_TX_DESC)*sizeof(BufferDesc);
1145 sis_priv->tx_ring[i].cmdsts = 0;
1146 sis_priv->tx_ring[i].bufptr = 0;
1147 }
1148
1149
1150 sw32(txdp, sis_priv->tx_ring_dma);
1151 if (netif_msg_hw(sis_priv))
1152 printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
1153 net_dev->name, sr32(txdp));
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 static void
1165 sis900_init_rx_ring(struct net_device *net_dev)
1166 {
1167 struct sis900_private *sis_priv = netdev_priv(net_dev);
1168 void __iomem *ioaddr = sis_priv->ioaddr;
1169 int i;
1170
1171 sis_priv->cur_rx = 0;
1172 sis_priv->dirty_rx = 0;
1173
1174
1175 for (i = 0; i < NUM_RX_DESC; i++) {
1176 sis_priv->rx_skbuff[i] = NULL;
1177
1178 sis_priv->rx_ring[i].link = sis_priv->rx_ring_dma +
1179 ((i+1)%NUM_RX_DESC)*sizeof(BufferDesc);
1180 sis_priv->rx_ring[i].cmdsts = 0;
1181 sis_priv->rx_ring[i].bufptr = 0;
1182 }
1183
1184
1185 for (i = 0; i < NUM_RX_DESC; i++) {
1186 struct sk_buff *skb;
1187
1188 if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
1189
1190
1191
1192
1193 break;
1194 }
1195 sis_priv->rx_skbuff[i] = skb;
1196 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
1197 sis_priv->rx_ring[i].bufptr = dma_map_single(&sis_priv->pci_dev->dev,
1198 skb->data,
1199 RX_BUF_SIZE,
1200 DMA_FROM_DEVICE);
1201 if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
1202 sis_priv->rx_ring[i].bufptr))) {
1203 dev_kfree_skb(skb);
1204 sis_priv->rx_skbuff[i] = NULL;
1205 break;
1206 }
1207 }
1208 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
1209
1210
1211 sw32(rxdp, sis_priv->rx_ring_dma);
1212 if (netif_msg_hw(sis_priv))
1213 printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
1214 net_dev->name, sr32(rxdp));
1215 }
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 static void sis630_set_eq(struct net_device *net_dev, u8 revision)
1245 {
1246 struct sis900_private *sis_priv = netdev_priv(net_dev);
1247 u16 reg14h, eq_value=0, max_value=0, min_value=0;
1248 int i, maxcount=10;
1249
1250 if ( !(revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
1251 revision == SIS630A_900_REV || revision == SIS630ET_900_REV) )
1252 return;
1253
1254 if (netif_carrier_ok(net_dev)) {
1255 reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
1256 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
1257 (0x2200 | reg14h) & 0xBFFF);
1258 for (i=0; i < maxcount; i++) {
1259 eq_value = (0x00F8 & mdio_read(net_dev,
1260 sis_priv->cur_phy, MII_RESV)) >> 3;
1261 if (i == 0)
1262 max_value=min_value=eq_value;
1263 max_value = (eq_value > max_value) ?
1264 eq_value : max_value;
1265 min_value = (eq_value < min_value) ?
1266 eq_value : min_value;
1267 }
1268
1269 if (revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
1270 revision == SIS630ET_900_REV) {
1271 if (max_value < 5)
1272 eq_value = max_value;
1273 else if (max_value >= 5 && max_value < 15)
1274 eq_value = (max_value == min_value) ?
1275 max_value+2 : max_value+1;
1276 else if (max_value >= 15)
1277 eq_value=(max_value == min_value) ?
1278 max_value+6 : max_value+5;
1279 }
1280
1281 if (revision == SIS630A_900_REV &&
1282 (sis_priv->host_bridge_rev == SIS630B0 ||
1283 sis_priv->host_bridge_rev == SIS630B1)) {
1284 if (max_value == 0)
1285 eq_value = 3;
1286 else
1287 eq_value = (max_value + min_value + 1)/2;
1288 }
1289
1290 reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
1291 reg14h = (reg14h & 0xFF07) | ((eq_value << 3) & 0x00F8);
1292 reg14h = (reg14h | 0x6000) & 0xFDFF;
1293 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, reg14h);
1294 } else {
1295 reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
1296 if (revision == SIS630A_900_REV &&
1297 (sis_priv->host_bridge_rev == SIS630B0 ||
1298 sis_priv->host_bridge_rev == SIS630B1))
1299 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
1300 (reg14h | 0x2200) & 0xBFFF);
1301 else
1302 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
1303 (reg14h | 0x2000) & 0xBFFF);
1304 }
1305 }
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 static void sis900_timer(struct timer_list *t)
1316 {
1317 struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
1318 struct net_device *net_dev = sis_priv->mii_info.dev;
1319 struct mii_phy *mii_phy = sis_priv->mii;
1320 static const int next_tick = 5*HZ;
1321 int speed = 0, duplex = 0;
1322 u16 status;
1323
1324 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
1325 status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
1326
1327
1328 if (!netif_carrier_ok(net_dev)) {
1329 LookForLink:
1330
1331 status = sis900_default_phy(net_dev);
1332 mii_phy = sis_priv->mii;
1333
1334 if (status & MII_STAT_LINK) {
1335 WARN_ON(!(status & MII_STAT_AUTO_DONE));
1336
1337 sis900_read_mode(net_dev, &speed, &duplex);
1338 if (duplex) {
1339 sis900_set_mode(sis_priv, speed, duplex);
1340 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1341 netif_carrier_on(net_dev);
1342 }
1343 }
1344 } else {
1345
1346 if (!(status & MII_STAT_LINK)){
1347 netif_carrier_off(net_dev);
1348 if(netif_msg_link(sis_priv))
1349 printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
1350
1351
1352 if ((mii_phy->phy_id0 == 0x001D) &&
1353 ((mii_phy->phy_id1 & 0xFFF0) == 0x8000))
1354 sis900_reset_phy(net_dev, sis_priv->cur_phy);
1355
1356 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1357
1358 goto LookForLink;
1359 }
1360 }
1361
1362 sis_priv->timer.expires = jiffies + next_tick;
1363 add_timer(&sis_priv->timer);
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
1379 {
1380 struct sis900_private *sis_priv = netdev_priv(net_dev);
1381 void __iomem *ioaddr = sis_priv->ioaddr;
1382 int speed, duplex;
1383
1384 if (mii_phy->phy_types == LAN) {
1385 sw32(cfg, ~EXD & sr32(cfg));
1386 sis900_set_capability(net_dev , mii_phy);
1387 sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
1388 } else {
1389 sw32(cfg, EXD | sr32(cfg));
1390 speed = HW_SPEED_HOME;
1391 duplex = FDX_CAPABLE_HALF_SELECTED;
1392 sis900_set_mode(sis_priv, speed, duplex);
1393 sis_priv->autong_complete = 1;
1394 }
1395 }
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410 static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
1411 {
1412 void __iomem *ioaddr = sp->ioaddr;
1413 u32 tx_flags = 0, rx_flags = 0;
1414
1415 if (sr32( cfg) & EDB_MASTER_EN) {
1416 tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
1417 (TX_FILL_THRESH << TxFILLT_shift);
1418 rx_flags = DMA_BURST_64 << RxMXDMA_shift;
1419 } else {
1420 tx_flags = TxATP | (DMA_BURST_512 << TxMXDMA_shift) |
1421 (TX_FILL_THRESH << TxFILLT_shift);
1422 rx_flags = DMA_BURST_512 << RxMXDMA_shift;
1423 }
1424
1425 if (speed == HW_SPEED_HOME || speed == HW_SPEED_10_MBPS) {
1426 rx_flags |= (RxDRNT_10 << RxDRNT_shift);
1427 tx_flags |= (TxDRNT_10 << TxDRNT_shift);
1428 } else {
1429 rx_flags |= (RxDRNT_100 << RxDRNT_shift);
1430 tx_flags |= (TxDRNT_100 << TxDRNT_shift);
1431 }
1432
1433 if (duplex == FDX_CAPABLE_FULL_SELECTED) {
1434 tx_flags |= (TxCSI | TxHBI);
1435 rx_flags |= RxATX;
1436 }
1437
1438 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1439
1440 rx_flags |= RxAJAB;
1441 #endif
1442
1443 sw32(txcfg, tx_flags);
1444 sw32(rxcfg, rx_flags);
1445 }
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
1459 {
1460 struct sis900_private *sis_priv = netdev_priv(net_dev);
1461 int i = 0;
1462 u32 status;
1463
1464 for (i = 0; i < 2; i++)
1465 status = mdio_read(net_dev, phy_addr, MII_STATUS);
1466
1467 if (!(status & MII_STAT_LINK)){
1468 if(netif_msg_link(sis_priv))
1469 printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
1470 sis_priv->autong_complete = 1;
1471 netif_carrier_off(net_dev);
1472 return;
1473 }
1474
1475
1476 mdio_write(net_dev, phy_addr, MII_CONTROL,
1477 MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
1478 sis_priv->autong_complete = 0;
1479 }
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex)
1494 {
1495 struct sis900_private *sis_priv = netdev_priv(net_dev);
1496 struct mii_phy *phy = sis_priv->mii;
1497 int phy_addr = sis_priv->cur_phy;
1498 u32 status;
1499 u16 autoadv, autorec;
1500 int i;
1501
1502 for (i = 0; i < 2; i++)
1503 status = mdio_read(net_dev, phy_addr, MII_STATUS);
1504
1505 if (!(status & MII_STAT_LINK))
1506 return;
1507
1508
1509 autoadv = mdio_read(net_dev, phy_addr, MII_ANADV);
1510 autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR);
1511 status = autoadv & autorec;
1512
1513 *speed = HW_SPEED_10_MBPS;
1514 *duplex = FDX_CAPABLE_HALF_SELECTED;
1515
1516 if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX))
1517 *speed = HW_SPEED_100_MBPS;
1518 if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX))
1519 *duplex = FDX_CAPABLE_FULL_SELECTED;
1520
1521 sis_priv->autong_complete = 1;
1522
1523
1524 if ((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)) {
1525 if (mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX)
1526 *duplex = FDX_CAPABLE_FULL_SELECTED;
1527 if (mdio_read(net_dev, phy_addr, 0x0019) & 0x01)
1528 *speed = HW_SPEED_100_MBPS;
1529 }
1530
1531 if(netif_msg_link(sis_priv))
1532 printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
1533 net_dev->name,
1534 *speed == HW_SPEED_100_MBPS ?
1535 "100mbps" : "10mbps",
1536 *duplex == FDX_CAPABLE_FULL_SELECTED ?
1537 "full" : "half");
1538 }
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
1550 {
1551 struct sis900_private *sis_priv = netdev_priv(net_dev);
1552 void __iomem *ioaddr = sis_priv->ioaddr;
1553 unsigned long flags;
1554 int i;
1555
1556 if (netif_msg_tx_err(sis_priv)) {
1557 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
1558 net_dev->name, sr32(cr), sr32(isr));
1559 }
1560
1561
1562 sw32(imr, 0x0000);
1563
1564
1565 spin_lock_irqsave(&sis_priv->lock, flags);
1566
1567
1568 sis_priv->dirty_tx = sis_priv->cur_tx = 0;
1569 for (i = 0; i < NUM_TX_DESC; i++) {
1570 struct sk_buff *skb = sis_priv->tx_skbuff[i];
1571
1572 if (skb) {
1573 dma_unmap_single(&sis_priv->pci_dev->dev,
1574 sis_priv->tx_ring[i].bufptr,
1575 skb->len, DMA_TO_DEVICE);
1576 dev_kfree_skb_irq(skb);
1577 sis_priv->tx_skbuff[i] = NULL;
1578 sis_priv->tx_ring[i].cmdsts = 0;
1579 sis_priv->tx_ring[i].bufptr = 0;
1580 net_dev->stats.tx_dropped++;
1581 }
1582 }
1583 sis_priv->tx_full = 0;
1584 netif_wake_queue(net_dev);
1585
1586 spin_unlock_irqrestore(&sis_priv->lock, flags);
1587
1588 netif_trans_update(net_dev);
1589
1590
1591 sw32(txdp, sis_priv->tx_ring_dma);
1592
1593
1594 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
1595 }
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607 static netdev_tx_t
1608 sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1609 {
1610 struct sis900_private *sis_priv = netdev_priv(net_dev);
1611 void __iomem *ioaddr = sis_priv->ioaddr;
1612 unsigned int entry;
1613 unsigned long flags;
1614 unsigned int index_cur_tx, index_dirty_tx;
1615 unsigned int count_dirty_tx;
1616
1617 spin_lock_irqsave(&sis_priv->lock, flags);
1618
1619
1620 entry = sis_priv->cur_tx % NUM_TX_DESC;
1621 sis_priv->tx_skbuff[entry] = skb;
1622
1623
1624 sis_priv->tx_ring[entry].bufptr = dma_map_single(&sis_priv->pci_dev->dev,
1625 skb->data, skb->len,
1626 DMA_TO_DEVICE);
1627 if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
1628 sis_priv->tx_ring[entry].bufptr))) {
1629 dev_kfree_skb_any(skb);
1630 sis_priv->tx_skbuff[entry] = NULL;
1631 net_dev->stats.tx_dropped++;
1632 spin_unlock_irqrestore(&sis_priv->lock, flags);
1633 return NETDEV_TX_OK;
1634 }
1635 sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
1636 sw32(cr, TxENA | sr32(cr));
1637
1638 sis_priv->cur_tx ++;
1639 index_cur_tx = sis_priv->cur_tx;
1640 index_dirty_tx = sis_priv->dirty_tx;
1641
1642 for (count_dirty_tx = 0; index_cur_tx != index_dirty_tx; index_dirty_tx++)
1643 count_dirty_tx ++;
1644
1645 if (index_cur_tx == index_dirty_tx) {
1646
1647 sis_priv->tx_full = 1;
1648 netif_stop_queue(net_dev);
1649 } else if (count_dirty_tx < NUM_TX_DESC) {
1650
1651 netif_start_queue(net_dev);
1652 } else {
1653
1654 sis_priv->tx_full = 1;
1655 netif_stop_queue(net_dev);
1656 }
1657
1658 spin_unlock_irqrestore(&sis_priv->lock, flags);
1659
1660 if (netif_msg_tx_queued(sis_priv))
1661 printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
1662 "to slot %d.\n",
1663 net_dev->name, skb->data, (int)skb->len, entry);
1664
1665 return NETDEV_TX_OK;
1666 }
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677 static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1678 {
1679 struct net_device *net_dev = dev_instance;
1680 struct sis900_private *sis_priv = netdev_priv(net_dev);
1681 int boguscnt = max_interrupt_work;
1682 void __iomem *ioaddr = sis_priv->ioaddr;
1683 u32 status;
1684 unsigned int handled = 0;
1685
1686 spin_lock (&sis_priv->lock);
1687
1688 do {
1689 status = sr32(isr);
1690
1691 if ((status & (HIBERR|TxURN|TxERR|TxDESC|RxORN|RxERR|RxOK)) == 0)
1692
1693 break;
1694 handled = 1;
1695
1696
1697 if (status & (RxORN | RxERR | RxOK))
1698
1699 sis900_rx(net_dev);
1700
1701 if (status & (TxURN | TxERR | TxDESC))
1702
1703 sis900_finish_xmit(net_dev);
1704
1705
1706 if (status & HIBERR) {
1707 if(netif_msg_intr(sis_priv))
1708 printk(KERN_INFO "%s: Abnormal interrupt, "
1709 "status %#8.8x.\n", net_dev->name, status);
1710 break;
1711 }
1712 if (--boguscnt < 0) {
1713 if(netif_msg_intr(sis_priv))
1714 printk(KERN_INFO "%s: Too much work at interrupt, "
1715 "interrupt status = %#8.8x.\n",
1716 net_dev->name, status);
1717 break;
1718 }
1719 } while (1);
1720
1721 if(netif_msg_intr(sis_priv))
1722 printk(KERN_DEBUG "%s: exiting interrupt, "
1723 "interrupt status = %#8.8x\n",
1724 net_dev->name, sr32(isr));
1725
1726 spin_unlock (&sis_priv->lock);
1727 return IRQ_RETVAL(handled);
1728 }
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740 static int sis900_rx(struct net_device *net_dev)
1741 {
1742 struct sis900_private *sis_priv = netdev_priv(net_dev);
1743 void __iomem *ioaddr = sis_priv->ioaddr;
1744 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1745 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
1746 int rx_work_limit;
1747
1748 if (netif_msg_rx_status(sis_priv))
1749 printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
1750 "status:0x%8.8x\n",
1751 sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
1752 rx_work_limit = sis_priv->dirty_rx + NUM_RX_DESC - sis_priv->cur_rx;
1753
1754 while (rx_status & OWN) {
1755 unsigned int rx_size;
1756 unsigned int data_size;
1757
1758 if (--rx_work_limit < 0)
1759 break;
1760
1761 data_size = rx_status & DSIZE;
1762 rx_size = data_size - CRC_SIZE;
1763
1764 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1765
1766 if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
1767 rx_status &= (~ ((unsigned int)TOOLONG));
1768 #endif
1769
1770 if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
1771
1772 if (netif_msg_rx_err(sis_priv))
1773 printk(KERN_DEBUG "%s: Corrupted packet "
1774 "received, buffer status = 0x%8.8x/%d.\n",
1775 net_dev->name, rx_status, data_size);
1776 net_dev->stats.rx_errors++;
1777 if (rx_status & OVERRUN)
1778 net_dev->stats.rx_over_errors++;
1779 if (rx_status & (TOOLONG|RUNT))
1780 net_dev->stats.rx_length_errors++;
1781 if (rx_status & (RXISERR | FAERR))
1782 net_dev->stats.rx_frame_errors++;
1783 if (rx_status & CRCERR)
1784 net_dev->stats.rx_crc_errors++;
1785
1786 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1787 } else {
1788 struct sk_buff * skb;
1789 struct sk_buff * rx_skb;
1790
1791 dma_unmap_single(&sis_priv->pci_dev->dev,
1792 sis_priv->rx_ring[entry].bufptr,
1793 RX_BUF_SIZE, DMA_FROM_DEVICE);
1794
1795
1796
1797 if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
1798
1799
1800
1801
1802
1803
1804 skb = sis_priv->rx_skbuff[entry];
1805 net_dev->stats.rx_dropped++;
1806 goto refill_rx_ring;
1807 }
1808
1809
1810
1811
1812 if (sis_priv->rx_skbuff[entry] == NULL) {
1813 if (netif_msg_rx_err(sis_priv))
1814 printk(KERN_WARNING "%s: NULL pointer "
1815 "encountered in Rx ring\n"
1816 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1817 net_dev->name, sis_priv->cur_rx,
1818 sis_priv->dirty_rx);
1819 dev_kfree_skb(skb);
1820 break;
1821 }
1822
1823
1824 rx_skb = sis_priv->rx_skbuff[entry];
1825 skb_put(rx_skb, rx_size);
1826 rx_skb->protocol = eth_type_trans(rx_skb, net_dev);
1827 netif_rx(rx_skb);
1828
1829
1830 if ((rx_status & BCAST) == MCAST)
1831 net_dev->stats.multicast++;
1832 net_dev->stats.rx_bytes += rx_size;
1833 net_dev->stats.rx_packets++;
1834 sis_priv->dirty_rx++;
1835 refill_rx_ring:
1836 sis_priv->rx_skbuff[entry] = skb;
1837 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1838 sis_priv->rx_ring[entry].bufptr =
1839 dma_map_single(&sis_priv->pci_dev->dev,
1840 skb->data, RX_BUF_SIZE,
1841 DMA_FROM_DEVICE);
1842 if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
1843 sis_priv->rx_ring[entry].bufptr))) {
1844 dev_kfree_skb_irq(skb);
1845 sis_priv->rx_skbuff[entry] = NULL;
1846 break;
1847 }
1848 }
1849 sis_priv->cur_rx++;
1850 entry = sis_priv->cur_rx % NUM_RX_DESC;
1851 rx_status = sis_priv->rx_ring[entry].cmdsts;
1852 }
1853
1854
1855
1856 for (; sis_priv->cur_rx != sis_priv->dirty_rx; sis_priv->dirty_rx++) {
1857 struct sk_buff *skb;
1858
1859 entry = sis_priv->dirty_rx % NUM_RX_DESC;
1860
1861 if (sis_priv->rx_skbuff[entry] == NULL) {
1862 skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
1863 if (skb == NULL) {
1864
1865
1866
1867
1868 net_dev->stats.rx_dropped++;
1869 break;
1870 }
1871 sis_priv->rx_skbuff[entry] = skb;
1872 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1873 sis_priv->rx_ring[entry].bufptr =
1874 dma_map_single(&sis_priv->pci_dev->dev,
1875 skb->data, RX_BUF_SIZE,
1876 DMA_FROM_DEVICE);
1877 if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
1878 sis_priv->rx_ring[entry].bufptr))) {
1879 dev_kfree_skb_irq(skb);
1880 sis_priv->rx_skbuff[entry] = NULL;
1881 break;
1882 }
1883 }
1884 }
1885
1886 sw32(cr , RxENA | sr32(cr));
1887
1888 return 0;
1889 }
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901 static void sis900_finish_xmit (struct net_device *net_dev)
1902 {
1903 struct sis900_private *sis_priv = netdev_priv(net_dev);
1904
1905 for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) {
1906 struct sk_buff *skb;
1907 unsigned int entry;
1908 u32 tx_status;
1909
1910 entry = sis_priv->dirty_tx % NUM_TX_DESC;
1911 tx_status = sis_priv->tx_ring[entry].cmdsts;
1912
1913 if (tx_status & OWN) {
1914
1915
1916
1917 break;
1918 }
1919
1920 if (tx_status & (ABORT | UNDERRUN | OWCOLL)) {
1921
1922 if (netif_msg_tx_err(sis_priv))
1923 printk(KERN_DEBUG "%s: Transmit "
1924 "error, Tx status %8.8x.\n",
1925 net_dev->name, tx_status);
1926 net_dev->stats.tx_errors++;
1927 if (tx_status & UNDERRUN)
1928 net_dev->stats.tx_fifo_errors++;
1929 if (tx_status & ABORT)
1930 net_dev->stats.tx_aborted_errors++;
1931 if (tx_status & NOCARRIER)
1932 net_dev->stats.tx_carrier_errors++;
1933 if (tx_status & OWCOLL)
1934 net_dev->stats.tx_window_errors++;
1935 } else {
1936
1937 net_dev->stats.collisions += (tx_status & COLCNT) >> 16;
1938 net_dev->stats.tx_bytes += tx_status & DSIZE;
1939 net_dev->stats.tx_packets++;
1940 }
1941
1942 skb = sis_priv->tx_skbuff[entry];
1943 dma_unmap_single(&sis_priv->pci_dev->dev,
1944 sis_priv->tx_ring[entry].bufptr, skb->len,
1945 DMA_TO_DEVICE);
1946 dev_consume_skb_irq(skb);
1947 sis_priv->tx_skbuff[entry] = NULL;
1948 sis_priv->tx_ring[entry].bufptr = 0;
1949 sis_priv->tx_ring[entry].cmdsts = 0;
1950 }
1951
1952 if (sis_priv->tx_full && netif_queue_stopped(net_dev) &&
1953 sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) {
1954
1955
1956 sis_priv->tx_full = 0;
1957 netif_wake_queue (net_dev);
1958 }
1959 }
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969 static int sis900_close(struct net_device *net_dev)
1970 {
1971 struct sis900_private *sis_priv = netdev_priv(net_dev);
1972 struct pci_dev *pdev = sis_priv->pci_dev;
1973 void __iomem *ioaddr = sis_priv->ioaddr;
1974 struct sk_buff *skb;
1975 int i;
1976
1977 netif_stop_queue(net_dev);
1978
1979
1980 sw32(imr, 0x0000);
1981 sw32(ier, 0x0000);
1982
1983
1984 sw32(cr, RxDIS | TxDIS | sr32(cr));
1985
1986 del_timer(&sis_priv->timer);
1987
1988 free_irq(pdev->irq, net_dev);
1989
1990
1991 for (i = 0; i < NUM_RX_DESC; i++) {
1992 skb = sis_priv->rx_skbuff[i];
1993 if (skb) {
1994 dma_unmap_single(&pdev->dev,
1995 sis_priv->rx_ring[i].bufptr,
1996 RX_BUF_SIZE, DMA_FROM_DEVICE);
1997 dev_kfree_skb(skb);
1998 sis_priv->rx_skbuff[i] = NULL;
1999 }
2000 }
2001 for (i = 0; i < NUM_TX_DESC; i++) {
2002 skb = sis_priv->tx_skbuff[i];
2003 if (skb) {
2004 dma_unmap_single(&pdev->dev,
2005 sis_priv->tx_ring[i].bufptr,
2006 skb->len, DMA_TO_DEVICE);
2007 dev_kfree_skb(skb);
2008 sis_priv->tx_skbuff[i] = NULL;
2009 }
2010 }
2011
2012
2013
2014 return 0;
2015 }
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025 static void sis900_get_drvinfo(struct net_device *net_dev,
2026 struct ethtool_drvinfo *info)
2027 {
2028 struct sis900_private *sis_priv = netdev_priv(net_dev);
2029
2030 strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
2031 strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
2032 strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
2033 sizeof(info->bus_info));
2034 }
2035
2036 static u32 sis900_get_msglevel(struct net_device *net_dev)
2037 {
2038 struct sis900_private *sis_priv = netdev_priv(net_dev);
2039 return sis_priv->msg_enable;
2040 }
2041
2042 static void sis900_set_msglevel(struct net_device *net_dev, u32 value)
2043 {
2044 struct sis900_private *sis_priv = netdev_priv(net_dev);
2045 sis_priv->msg_enable = value;
2046 }
2047
2048 static u32 sis900_get_link(struct net_device *net_dev)
2049 {
2050 struct sis900_private *sis_priv = netdev_priv(net_dev);
2051 return mii_link_ok(&sis_priv->mii_info);
2052 }
2053
2054 static int sis900_get_link_ksettings(struct net_device *net_dev,
2055 struct ethtool_link_ksettings *cmd)
2056 {
2057 struct sis900_private *sis_priv = netdev_priv(net_dev);
2058 spin_lock_irq(&sis_priv->lock);
2059 mii_ethtool_get_link_ksettings(&sis_priv->mii_info, cmd);
2060 spin_unlock_irq(&sis_priv->lock);
2061 return 0;
2062 }
2063
2064 static int sis900_set_link_ksettings(struct net_device *net_dev,
2065 const struct ethtool_link_ksettings *cmd)
2066 {
2067 struct sis900_private *sis_priv = netdev_priv(net_dev);
2068 int rt;
2069 spin_lock_irq(&sis_priv->lock);
2070 rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd);
2071 spin_unlock_irq(&sis_priv->lock);
2072 return rt;
2073 }
2074
2075 static int sis900_nway_reset(struct net_device *net_dev)
2076 {
2077 struct sis900_private *sis_priv = netdev_priv(net_dev);
2078 return mii_nway_restart(&sis_priv->mii_info);
2079 }
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092 static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2093 {
2094 struct sis900_private *sis_priv = netdev_priv(net_dev);
2095 void __iomem *ioaddr = sis_priv->ioaddr;
2096 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2097
2098 if (wol->wolopts == 0) {
2099 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2100 cfgpmcsr &= ~PME_EN;
2101 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2102 sw32(pmctrl, pmctrl_bits);
2103 if (netif_msg_wol(sis_priv))
2104 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
2105 return 0;
2106 }
2107
2108 if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST
2109 | WAKE_BCAST | WAKE_ARP))
2110 return -EINVAL;
2111
2112 if (wol->wolopts & WAKE_MAGIC)
2113 pmctrl_bits |= MAGICPKT;
2114 if (wol->wolopts & WAKE_PHY)
2115 pmctrl_bits |= LINKON;
2116
2117 sw32(pmctrl, pmctrl_bits);
2118
2119 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2120 cfgpmcsr |= PME_EN;
2121 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2122 if (netif_msg_wol(sis_priv))
2123 printk(KERN_DEBUG "%s: Wake on LAN enabled\n", net_dev->name);
2124
2125 return 0;
2126 }
2127
2128 static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2129 {
2130 struct sis900_private *sp = netdev_priv(net_dev);
2131 void __iomem *ioaddr = sp->ioaddr;
2132 u32 pmctrl_bits;
2133
2134 pmctrl_bits = sr32(pmctrl);
2135 if (pmctrl_bits & MAGICPKT)
2136 wol->wolopts |= WAKE_MAGIC;
2137 if (pmctrl_bits & LINKON)
2138 wol->wolopts |= WAKE_PHY;
2139
2140 wol->supported = (WAKE_PHY | WAKE_MAGIC);
2141 }
2142
2143 static int sis900_get_eeprom_len(struct net_device *dev)
2144 {
2145 struct sis900_private *sis_priv = netdev_priv(dev);
2146
2147 return sis_priv->eeprom_size;
2148 }
2149
2150 static int sis900_read_eeprom(struct net_device *net_dev, u8 *buf)
2151 {
2152 struct sis900_private *sis_priv = netdev_priv(net_dev);
2153 void __iomem *ioaddr = sis_priv->ioaddr;
2154 int wait, ret = -EAGAIN;
2155 u16 signature;
2156 u16 *ebuf = (u16 *)buf;
2157 int i;
2158
2159 if (sis_priv->chipset_rev == SIS96x_900_REV) {
2160 sw32(mear, EEREQ);
2161 for (wait = 0; wait < 2000; wait++) {
2162 if (sr32(mear) & EEGNT) {
2163
2164 for (i = 0; i < sis_priv->eeprom_size / 2; i++)
2165 ebuf[i] = (u16)read_eeprom(ioaddr, i);
2166 ret = 0;
2167 break;
2168 }
2169 udelay(1);
2170 }
2171 sw32(mear, EEDONE);
2172 } else {
2173 signature = (u16)read_eeprom(ioaddr, EEPROMSignature);
2174 if (signature != 0xffff && signature != 0x0000) {
2175
2176 for (i = 0; i < sis_priv->eeprom_size / 2; i++)
2177 ebuf[i] = (u16)read_eeprom(ioaddr, i);
2178 ret = 0;
2179 }
2180 }
2181 return ret;
2182 }
2183
2184 #define SIS900_EEPROM_MAGIC 0xBABE
2185 static int sis900_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2186 {
2187 struct sis900_private *sis_priv = netdev_priv(dev);
2188 u8 *eebuf;
2189 int res;
2190
2191 eebuf = kmalloc(sis_priv->eeprom_size, GFP_KERNEL);
2192 if (!eebuf)
2193 return -ENOMEM;
2194
2195 eeprom->magic = SIS900_EEPROM_MAGIC;
2196 spin_lock_irq(&sis_priv->lock);
2197 res = sis900_read_eeprom(dev, eebuf);
2198 spin_unlock_irq(&sis_priv->lock);
2199 if (!res)
2200 memcpy(data, eebuf + eeprom->offset, eeprom->len);
2201 kfree(eebuf);
2202 return res;
2203 }
2204
2205 static const struct ethtool_ops sis900_ethtool_ops = {
2206 .get_drvinfo = sis900_get_drvinfo,
2207 .get_msglevel = sis900_get_msglevel,
2208 .set_msglevel = sis900_set_msglevel,
2209 .get_link = sis900_get_link,
2210 .nway_reset = sis900_nway_reset,
2211 .get_wol = sis900_get_wol,
2212 .set_wol = sis900_set_wol,
2213 .get_link_ksettings = sis900_get_link_ksettings,
2214 .set_link_ksettings = sis900_set_link_ksettings,
2215 .get_eeprom_len = sis900_get_eeprom_len,
2216 .get_eeprom = sis900_get_eeprom,
2217 };
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228 static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2229 {
2230 struct sis900_private *sis_priv = netdev_priv(net_dev);
2231 struct mii_ioctl_data *data = if_mii(rq);
2232
2233 switch(cmd) {
2234 case SIOCGMIIPHY:
2235 data->phy_id = sis_priv->mii->phy_addr;
2236 fallthrough;
2237
2238 case SIOCGMIIREG:
2239 data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2240 return 0;
2241
2242 case SIOCSMIIREG:
2243 mdio_write(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
2244 return 0;
2245 default:
2246 return -EOPNOTSUPP;
2247 }
2248 }
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260 static int sis900_set_config(struct net_device *dev, struct ifmap *map)
2261 {
2262 struct sis900_private *sis_priv = netdev_priv(dev);
2263 struct mii_phy *mii_phy = sis_priv->mii;
2264
2265 u16 status;
2266
2267 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
2268
2269
2270
2271
2272
2273
2274 switch(map->port){
2275 case IF_PORT_UNKNOWN:
2276 dev->if_port = map->port;
2277
2278
2279
2280
2281
2282 netif_carrier_off(dev);
2283
2284
2285 status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
2286
2287
2288
2289
2290
2291 mdio_write(dev, mii_phy->phy_addr,
2292 MII_CONTROL, status | MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
2293
2294 break;
2295
2296 case IF_PORT_10BASET:
2297 dev->if_port = map->port;
2298
2299
2300
2301
2302
2303
2304 netif_carrier_off(dev);
2305
2306
2307
2308 status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
2309
2310
2311 mdio_write(dev, mii_phy->phy_addr,
2312 MII_CONTROL, status & ~(MII_CNTL_SPEED |
2313 MII_CNTL_AUTO));
2314 break;
2315
2316 case IF_PORT_100BASET:
2317 case IF_PORT_100BASETX:
2318 dev->if_port = map->port;
2319
2320
2321
2322
2323
2324
2325 netif_carrier_off(dev);
2326
2327
2328
2329 status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
2330 mdio_write(dev, mii_phy->phy_addr,
2331 MII_CONTROL, (status & ~MII_CNTL_SPEED) |
2332 MII_CNTL_SPEED);
2333
2334 break;
2335
2336 case IF_PORT_10BASE2:
2337 case IF_PORT_AUI:
2338 case IF_PORT_100BASEFX:
2339
2340 return -EOPNOTSUPP;
2341
2342 default:
2343 return -EINVAL;
2344 }
2345 }
2346 return 0;
2347 }
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360 static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
2361 {
2362
2363 u32 crc = ether_crc(6, addr);
2364
2365
2366 if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
2367 return (int)(crc >> 24);
2368 else
2369 return (int)(crc >> 25);
2370 }
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381 static void set_rx_mode(struct net_device *net_dev)
2382 {
2383 struct sis900_private *sis_priv = netdev_priv(net_dev);
2384 void __iomem *ioaddr = sis_priv->ioaddr;
2385 u16 mc_filter[16] = {0};
2386 int i, table_entries;
2387 u32 rx_mode;
2388
2389
2390 if((sis_priv->chipset_rev >= SIS635A_900_REV) ||
2391 (sis_priv->chipset_rev == SIS900B_900_REV))
2392 table_entries = 16;
2393 else
2394 table_entries = 8;
2395
2396 if (net_dev->flags & IFF_PROMISC) {
2397
2398 rx_mode = RFPromiscuous;
2399 for (i = 0; i < table_entries; i++)
2400 mc_filter[i] = 0xffff;
2401 } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
2402 (net_dev->flags & IFF_ALLMULTI)) {
2403
2404 rx_mode = RFAAB | RFAAM;
2405 for (i = 0; i < table_entries; i++)
2406 mc_filter[i] = 0xffff;
2407 } else {
2408
2409
2410
2411 struct netdev_hw_addr *ha;
2412 rx_mode = RFAAB;
2413
2414 netdev_for_each_mc_addr(ha, net_dev) {
2415 unsigned int bit_nr;
2416
2417 bit_nr = sis900_mcast_bitnr(ha->addr,
2418 sis_priv->chipset_rev);
2419 mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
2420 }
2421 }
2422
2423
2424 for (i = 0; i < table_entries; i++) {
2425
2426 sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
2427 sw32(rfdr, mc_filter[i]);
2428 }
2429
2430 sw32(rfcr, RFEN | rx_mode);
2431
2432
2433
2434 if (net_dev->flags & IFF_LOOPBACK) {
2435 u32 cr_saved;
2436
2437 cr_saved = sr32(cr);
2438 sw32(cr, cr_saved | TxDIS | RxDIS);
2439
2440 sw32(txcfg, sr32(txcfg) | TxMLB);
2441 sw32(rxcfg, sr32(rxcfg) | RxATX);
2442
2443 sw32(cr, cr_saved);
2444 }
2445 }
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456 static void sis900_reset(struct net_device *net_dev)
2457 {
2458 struct sis900_private *sis_priv = netdev_priv(net_dev);
2459 void __iomem *ioaddr = sis_priv->ioaddr;
2460 u32 status = TxRCMP | RxRCMP;
2461 int i;
2462
2463 sw32(ier, 0);
2464 sw32(imr, 0);
2465 sw32(rfcr, 0);
2466
2467 sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
2468
2469
2470 for (i = 0; status && (i < 1000); i++)
2471 status ^= sr32(isr) & status;
2472
2473 if (sis_priv->chipset_rev >= SIS635A_900_REV ||
2474 sis_priv->chipset_rev == SIS900B_900_REV)
2475 sw32(cfg, PESEL | RND_CNT);
2476 else
2477 sw32(cfg, PESEL);
2478 }
2479
2480
2481
2482
2483
2484
2485
2486
2487 static void sis900_remove(struct pci_dev *pci_dev)
2488 {
2489 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2490 struct sis900_private *sis_priv = netdev_priv(net_dev);
2491
2492 unregister_netdev(net_dev);
2493
2494 while (sis_priv->first_mii) {
2495 struct mii_phy *phy = sis_priv->first_mii;
2496
2497 sis_priv->first_mii = phy->next;
2498 kfree(phy);
2499 }
2500
2501 dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
2502 sis_priv->rx_ring_dma);
2503 dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
2504 sis_priv->tx_ring_dma);
2505 pci_iounmap(pci_dev, sis_priv->ioaddr);
2506 free_netdev(net_dev);
2507 }
2508
2509 static int __maybe_unused sis900_suspend(struct device *dev)
2510 {
2511 struct net_device *net_dev = dev_get_drvdata(dev);
2512 struct sis900_private *sis_priv = netdev_priv(net_dev);
2513 void __iomem *ioaddr = sis_priv->ioaddr;
2514
2515 if(!netif_running(net_dev))
2516 return 0;
2517
2518 netif_stop_queue(net_dev);
2519 netif_device_detach(net_dev);
2520
2521
2522 sw32(cr, RxDIS | TxDIS | sr32(cr));
2523
2524 return 0;
2525 }
2526
2527 static int __maybe_unused sis900_resume(struct device *dev)
2528 {
2529 struct net_device *net_dev = dev_get_drvdata(dev);
2530 struct sis900_private *sis_priv = netdev_priv(net_dev);
2531 void __iomem *ioaddr = sis_priv->ioaddr;
2532
2533 if(!netif_running(net_dev))
2534 return 0;
2535
2536 sis900_init_rxfilter(net_dev);
2537
2538 sis900_init_tx_ring(net_dev);
2539 sis900_init_rx_ring(net_dev);
2540
2541 set_rx_mode(net_dev);
2542
2543 netif_device_attach(net_dev);
2544 netif_start_queue(net_dev);
2545
2546
2547 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
2548
2549
2550 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
2551 sw32(cr, RxENA | sr32(cr));
2552 sw32(ier, IE);
2553
2554 sis900_check_mode(net_dev, sis_priv->mii);
2555
2556 return 0;
2557 }
2558
2559 static SIMPLE_DEV_PM_OPS(sis900_pm_ops, sis900_suspend, sis900_resume);
2560
2561 static struct pci_driver sis900_pci_driver = {
2562 .name = SIS900_MODULE_NAME,
2563 .id_table = sis900_pci_tbl,
2564 .probe = sis900_probe,
2565 .remove = sis900_remove,
2566 .driver.pm = &sis900_pm_ops,
2567 };
2568
2569 static int __init sis900_init_module(void)
2570 {
2571
2572 #ifdef MODULE
2573 printk(version);
2574 #endif
2575
2576 return pci_register_driver(&sis900_pci_driver);
2577 }
2578
2579 static void __exit sis900_cleanup_module(void)
2580 {
2581 pci_unregister_driver(&sis900_pci_driver);
2582 }
2583
2584 module_init(sis900_init_module);
2585 module_exit(sis900_cleanup_module);
2586