0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0031
0032 #include <linux/hardirq.h>
0033 #include <linux/module.h>
0034 #include <linux/init.h>
0035 #include <linux/interrupt.h>
0036 #include <linux/ioport.h>
0037 #include <linux/eisa.h>
0038 #include <linux/pci.h>
0039 #include <linux/dma-mapping.h>
0040 #include <linux/netdevice.h>
0041 #include <linux/etherdevice.h>
0042 #include <linux/delay.h>
0043 #include <linux/spinlock.h>
0044 #include <linux/workqueue.h>
0045 #include <linux/mii.h>
0046
0047 #include "tlan.h"
0048
0049
0050
0051 static struct net_device *tlan_eisa_devices;
0052
0053 static int tlan_devices_installed;
0054
0055
0056 static int aui[MAX_TLAN_BOARDS];
0057 static int duplex[MAX_TLAN_BOARDS];
0058 static int speed[MAX_TLAN_BOARDS];
0059 static int boards_found;
0060 module_param_array(aui, int, NULL, 0);
0061 module_param_array(duplex, int, NULL, 0);
0062 module_param_array(speed, int, NULL, 0);
0063 MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
0064 MODULE_PARM_DESC(duplex,
0065 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
0066 MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
0067
0068 MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
0069 MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
0070 MODULE_LICENSE("GPL");
0071
0072
0073
0074
0075 static int debug;
0076 module_param(debug, int, 0);
0077 MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
0078
0079 static const char tlan_signature[] = "TLAN";
0080 static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
0081 static int tlan_have_pci;
0082 static int tlan_have_eisa;
0083
0084 static const char * const media[] = {
0085 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
0086 "100BaseTx-FD", "100BaseT4", NULL
0087 };
0088
0089 static struct board {
0090 const char *device_label;
0091 u32 flags;
0092 u16 addr_ofs;
0093 } board_info[] = {
0094 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
0095 { "Compaq Netelligent 10/100 TX PCI UTP",
0096 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
0097 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
0098 { "Compaq NetFlex-3/P",
0099 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
0100 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
0101 { "Compaq Netelligent Integrated 10/100 TX UTP",
0102 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
0103 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
0104 TLAN_ADAPTER_NONE, 0x83 },
0105 { "Compaq Netelligent 10/100 TX Embedded UTP",
0106 TLAN_ADAPTER_NONE, 0x83 },
0107 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
0108 { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
0109 TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
0110 { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
0111 TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
0112 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
0113 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
0114 { "Compaq NetFlex-3/E",
0115 TLAN_ADAPTER_ACTIVITY_LED |
0116 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
0117 { "Compaq NetFlex-3/E",
0118 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
0119 };
0120
0121 static const struct pci_device_id tlan_pci_tbl[] = {
0122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
0123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
0124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
0125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
0126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
0127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
0128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
0129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
0130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
0131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
0132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
0133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
0134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
0135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
0136 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
0137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
0138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
0139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
0140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
0141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
0142 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
0143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
0144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
0145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
0146 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
0147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
0148 { 0,}
0149 };
0150 MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
0151
0152 static void tlan_eisa_probe(void);
0153 static void tlan_eisa_cleanup(void);
0154 static int tlan_init(struct net_device *);
0155 static int tlan_open(struct net_device *dev);
0156 static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
0157 static irqreturn_t tlan_handle_interrupt(int, void *);
0158 static int tlan_close(struct net_device *);
0159 static struct net_device_stats *tlan_get_stats(struct net_device *);
0160 static void tlan_set_multicast_list(struct net_device *);
0161 static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
0162 static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
0163 int irq, int rev, const struct pci_device_id *ent);
0164 static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
0165 static void tlan_tx_timeout_work(struct work_struct *work);
0166 static int tlan_init_one(struct pci_dev *pdev,
0167 const struct pci_device_id *ent);
0168
0169 static u32 tlan_handle_tx_eof(struct net_device *, u16);
0170 static u32 tlan_handle_stat_overflow(struct net_device *, u16);
0171 static u32 tlan_handle_rx_eof(struct net_device *, u16);
0172 static u32 tlan_handle_dummy(struct net_device *, u16);
0173 static u32 tlan_handle_tx_eoc(struct net_device *, u16);
0174 static u32 tlan_handle_status_check(struct net_device *, u16);
0175 static u32 tlan_handle_rx_eoc(struct net_device *, u16);
0176
0177 static void tlan_timer(struct timer_list *t);
0178 static void tlan_phy_monitor(struct timer_list *t);
0179
0180 static void tlan_reset_lists(struct net_device *);
0181 static void tlan_free_lists(struct net_device *);
0182 static void tlan_print_dio(u16);
0183 static void tlan_print_list(struct tlan_list *, char *, int);
0184 static void tlan_read_and_clear_stats(struct net_device *, int);
0185 static void tlan_reset_adapter(struct net_device *);
0186 static void tlan_finish_reset(struct net_device *);
0187 static void tlan_set_mac(struct net_device *, int areg, const char *mac);
0188
0189 static void __tlan_phy_print(struct net_device *);
0190 static void tlan_phy_print(struct net_device *);
0191 static void tlan_phy_detect(struct net_device *);
0192 static void tlan_phy_power_down(struct net_device *);
0193 static void tlan_phy_power_up(struct net_device *);
0194 static void tlan_phy_reset(struct net_device *);
0195 static void tlan_phy_start_link(struct net_device *);
0196 static void tlan_phy_finish_auto_neg(struct net_device *);
0197
0198
0199
0200
0201
0202
0203
0204
0205 static bool __tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
0206 static void tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
0207 static void tlan_mii_send_data(u16, u32, unsigned);
0208 static void tlan_mii_sync(u16);
0209 static void __tlan_mii_write_reg(struct net_device *, u16, u16, u16);
0210 static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
0211
0212 static void tlan_ee_send_start(u16);
0213 static int tlan_ee_send_byte(u16, u8, int);
0214 static void tlan_ee_receive_byte(u16, u8 *, int);
0215 static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
0216
0217
0218 static inline void
0219 tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
0220 {
0221 unsigned long addr = (unsigned long)skb;
0222 tag->buffer[9].address = addr;
0223 tag->buffer[8].address = upper_32_bits(addr);
0224 }
0225
0226 static inline struct sk_buff *
0227 tlan_get_skb(const struct tlan_list *tag)
0228 {
0229 unsigned long addr;
0230
0231 addr = tag->buffer[9].address;
0232 addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
0233 return (struct sk_buff *) addr;
0234 }
0235
0236 static u32
0237 (*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
0238 NULL,
0239 tlan_handle_tx_eof,
0240 tlan_handle_stat_overflow,
0241 tlan_handle_rx_eof,
0242 tlan_handle_dummy,
0243 tlan_handle_tx_eoc,
0244 tlan_handle_status_check,
0245 tlan_handle_rx_eoc
0246 };
0247
0248 static void
0249 tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
0250 {
0251 struct tlan_priv *priv = netdev_priv(dev);
0252 unsigned long flags = 0;
0253
0254 spin_lock_irqsave(&priv->lock, flags);
0255 if (priv->timer.function != NULL &&
0256 priv->timer_type != TLAN_TIMER_ACTIVITY) {
0257 spin_unlock_irqrestore(&priv->lock, flags);
0258 return;
0259 }
0260 priv->timer.function = tlan_timer;
0261 spin_unlock_irqrestore(&priv->lock, flags);
0262
0263 priv->timer_set_at = jiffies;
0264 priv->timer_type = type;
0265 mod_timer(&priv->timer, jiffies + ticks);
0266
0267 }
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 static void tlan_remove_one(struct pci_dev *pdev)
0301 {
0302 struct net_device *dev = pci_get_drvdata(pdev);
0303 struct tlan_priv *priv = netdev_priv(dev);
0304
0305 unregister_netdev(dev);
0306
0307 if (priv->dma_storage) {
0308 dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
0309 priv->dma_storage, priv->dma_storage_dma);
0310 }
0311
0312 #ifdef CONFIG_PCI
0313 pci_release_regions(pdev);
0314 #endif
0315
0316 cancel_work_sync(&priv->tlan_tqueue);
0317 free_netdev(dev);
0318 }
0319
0320 static void tlan_start(struct net_device *dev)
0321 {
0322 tlan_reset_lists(dev);
0323
0324
0325
0326 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
0327 tlan_reset_adapter(dev);
0328 netif_wake_queue(dev);
0329 }
0330
0331 static void tlan_stop(struct net_device *dev)
0332 {
0333 struct tlan_priv *priv = netdev_priv(dev);
0334
0335 del_timer_sync(&priv->media_timer);
0336 tlan_read_and_clear_stats(dev, TLAN_RECORD);
0337 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
0338
0339 tlan_reset_adapter(dev);
0340 if (priv->timer.function != NULL) {
0341 del_timer_sync(&priv->timer);
0342 priv->timer.function = NULL;
0343 }
0344 }
0345
0346 static int __maybe_unused tlan_suspend(struct device *dev_d)
0347 {
0348 struct net_device *dev = dev_get_drvdata(dev_d);
0349
0350 if (netif_running(dev))
0351 tlan_stop(dev);
0352
0353 netif_device_detach(dev);
0354
0355 return 0;
0356 }
0357
0358 static int __maybe_unused tlan_resume(struct device *dev_d)
0359 {
0360 struct net_device *dev = dev_get_drvdata(dev_d);
0361 netif_device_attach(dev);
0362
0363 if (netif_running(dev))
0364 tlan_start(dev);
0365
0366 return 0;
0367 }
0368
0369 static SIMPLE_DEV_PM_OPS(tlan_pm_ops, tlan_suspend, tlan_resume);
0370
0371 static struct pci_driver tlan_driver = {
0372 .name = "tlan",
0373 .id_table = tlan_pci_tbl,
0374 .probe = tlan_init_one,
0375 .remove = tlan_remove_one,
0376 .driver.pm = &tlan_pm_ops,
0377 };
0378
0379 static int __init tlan_probe(void)
0380 {
0381 int rc = -ENODEV;
0382
0383 pr_info("%s", tlan_banner);
0384
0385 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
0386
0387
0388
0389 rc = pci_register_driver(&tlan_driver);
0390
0391 if (rc != 0) {
0392 pr_err("Could not register pci driver\n");
0393 goto err_out_pci_free;
0394 }
0395
0396 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
0397 tlan_eisa_probe();
0398
0399 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
0400 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
0401 tlan_have_pci, tlan_have_eisa);
0402
0403 if (tlan_devices_installed == 0) {
0404 rc = -ENODEV;
0405 goto err_out_pci_unreg;
0406 }
0407 return 0;
0408
0409 err_out_pci_unreg:
0410 pci_unregister_driver(&tlan_driver);
0411 err_out_pci_free:
0412 return rc;
0413 }
0414
0415
0416 static int tlan_init_one(struct pci_dev *pdev,
0417 const struct pci_device_id *ent)
0418 {
0419 return tlan_probe1(pdev, -1, -1, 0, ent);
0420 }
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442 static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
0443 const struct pci_device_id *ent)
0444 {
0445
0446 struct net_device *dev;
0447 struct tlan_priv *priv;
0448 u16 device_id;
0449 int reg, rc = -ENODEV;
0450
0451 #ifdef CONFIG_PCI
0452 if (pdev) {
0453 rc = pci_enable_device(pdev);
0454 if (rc)
0455 return rc;
0456
0457 rc = pci_request_regions(pdev, tlan_signature);
0458 if (rc) {
0459 pr_err("Could not reserve IO regions\n");
0460 goto err_out;
0461 }
0462 }
0463 #endif
0464
0465 dev = alloc_etherdev(sizeof(struct tlan_priv));
0466 if (dev == NULL) {
0467 rc = -ENOMEM;
0468 goto err_out_regions;
0469 }
0470 SET_NETDEV_DEV(dev, &pdev->dev);
0471
0472 priv = netdev_priv(dev);
0473
0474 priv->pci_dev = pdev;
0475 priv->dev = dev;
0476
0477
0478 if (pdev) {
0479 u32 pci_io_base = 0;
0480
0481 priv->adapter = &board_info[ent->driver_data];
0482
0483 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
0484 if (rc) {
0485 pr_err("No suitable PCI mapping available\n");
0486 goto err_out_free_dev;
0487 }
0488
0489 for (reg = 0; reg <= 5; reg++) {
0490 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
0491 pci_io_base = pci_resource_start(pdev, reg);
0492 TLAN_DBG(TLAN_DEBUG_GNRL,
0493 "IO mapping is available at %x.\n",
0494 pci_io_base);
0495 break;
0496 }
0497 }
0498 if (!pci_io_base) {
0499 pr_err("No IO mappings available\n");
0500 rc = -EIO;
0501 goto err_out_free_dev;
0502 }
0503
0504 dev->base_addr = pci_io_base;
0505 dev->irq = pdev->irq;
0506 priv->adapter_rev = pdev->revision;
0507 pci_set_master(pdev);
0508 pci_set_drvdata(pdev, dev);
0509
0510 } else {
0511
0512
0513 device_id = inw(ioaddr + EISA_ID2);
0514 if (device_id == 0x20F1) {
0515 priv->adapter = &board_info[13];
0516 priv->adapter_rev = 23;
0517 } else {
0518 priv->adapter = &board_info[14];
0519 priv->adapter_rev = 10;
0520 }
0521 dev->base_addr = ioaddr;
0522 dev->irq = irq;
0523 }
0524
0525
0526 if (dev->mem_start) {
0527 priv->aui = dev->mem_start & 0x01;
0528 priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
0529 : (dev->mem_start & 0x06) >> 1;
0530 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
0531 : (dev->mem_start & 0x18) >> 3;
0532
0533 if (priv->speed == 0x1)
0534 priv->speed = TLAN_SPEED_10;
0535 else if (priv->speed == 0x2)
0536 priv->speed = TLAN_SPEED_100;
0537
0538 debug = priv->debug = dev->mem_end;
0539 } else {
0540 priv->aui = aui[boards_found];
0541 priv->speed = speed[boards_found];
0542 priv->duplex = duplex[boards_found];
0543 priv->debug = debug;
0544 }
0545
0546
0547
0548 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
0549
0550 spin_lock_init(&priv->lock);
0551
0552 rc = tlan_init(dev);
0553 if (rc) {
0554 pr_err("Could not set up device\n");
0555 goto err_out_free_dev;
0556 }
0557
0558 rc = register_netdev(dev);
0559 if (rc) {
0560 pr_err("Could not register device\n");
0561 goto err_out_uninit;
0562 }
0563
0564
0565 tlan_devices_installed++;
0566 boards_found++;
0567
0568
0569 if (pdev)
0570 tlan_have_pci++;
0571 else {
0572 priv->next_device = tlan_eisa_devices;
0573 tlan_eisa_devices = dev;
0574 tlan_have_eisa++;
0575 }
0576
0577 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
0578 (int)dev->irq,
0579 (int)dev->base_addr,
0580 priv->adapter->device_label,
0581 priv->adapter_rev);
0582 return 0;
0583
0584 err_out_uninit:
0585 dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
0586 priv->dma_storage, priv->dma_storage_dma);
0587 err_out_free_dev:
0588 free_netdev(dev);
0589 err_out_regions:
0590 #ifdef CONFIG_PCI
0591 if (pdev)
0592 pci_release_regions(pdev);
0593 err_out:
0594 #endif
0595 if (pdev)
0596 pci_disable_device(pdev);
0597 return rc;
0598 }
0599
0600
0601 static void tlan_eisa_cleanup(void)
0602 {
0603 struct net_device *dev;
0604 struct tlan_priv *priv;
0605
0606 while (tlan_have_eisa) {
0607 dev = tlan_eisa_devices;
0608 priv = netdev_priv(dev);
0609 if (priv->dma_storage) {
0610 dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
0611 priv->dma_storage,
0612 priv->dma_storage_dma);
0613 }
0614 release_region(dev->base_addr, 0x10);
0615 unregister_netdev(dev);
0616 tlan_eisa_devices = priv->next_device;
0617 free_netdev(dev);
0618 tlan_have_eisa--;
0619 }
0620 }
0621
0622
0623 static void __exit tlan_exit(void)
0624 {
0625 pci_unregister_driver(&tlan_driver);
0626
0627 if (tlan_have_eisa)
0628 tlan_eisa_cleanup();
0629
0630 }
0631
0632
0633
0634 module_init(tlan_probe);
0635 module_exit(tlan_exit);
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652 static void __init tlan_eisa_probe(void)
0653 {
0654 long ioaddr;
0655 int irq;
0656 u16 device_id;
0657
0658 if (!EISA_bus) {
0659 TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
0660 return;
0661 }
0662
0663
0664 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
0665
0666 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
0667 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
0668 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
0669 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
0670
0671
0672 TLAN_DBG(TLAN_DEBUG_PROBE,
0673 "Probing for EISA adapter at IO: 0x%4x : ",
0674 (int) ioaddr);
0675 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
0676 goto out;
0677
0678 if (inw(ioaddr + EISA_ID) != 0x110E) {
0679 release_region(ioaddr, 0x10);
0680 goto out;
0681 }
0682
0683 device_id = inw(ioaddr + EISA_ID2);
0684 if (device_id != 0x20F1 && device_id != 0x40F1) {
0685 release_region(ioaddr, 0x10);
0686 goto out;
0687 }
0688
0689
0690 if (inb(ioaddr + EISA_CR) != 0x1) {
0691 release_region(ioaddr, 0x10);
0692 goto out2;
0693 }
0694
0695 if (debug == 0x10)
0696 pr_info("Found one\n");
0697
0698
0699
0700 switch (inb(ioaddr + 0xcc0)) {
0701 case(0x10):
0702 irq = 5;
0703 break;
0704 case(0x20):
0705 irq = 9;
0706 break;
0707 case(0x40):
0708 irq = 10;
0709 break;
0710 case(0x80):
0711 irq = 11;
0712 break;
0713 default:
0714 goto out;
0715 }
0716
0717
0718
0719 tlan_probe1(NULL, ioaddr, irq, 12, NULL);
0720 continue;
0721
0722 out:
0723 if (debug == 0x10)
0724 pr_info("None found\n");
0725 continue;
0726
0727 out2:
0728 if (debug == 0x10)
0729 pr_info("Card found but it is not enabled, skipping\n");
0730 continue;
0731
0732 }
0733
0734 }
0735
0736 #ifdef CONFIG_NET_POLL_CONTROLLER
0737 static void tlan_poll(struct net_device *dev)
0738 {
0739 disable_irq(dev->irq);
0740 tlan_handle_interrupt(dev->irq, dev);
0741 enable_irq(dev->irq);
0742 }
0743 #endif
0744
0745 static const struct net_device_ops tlan_netdev_ops = {
0746 .ndo_open = tlan_open,
0747 .ndo_stop = tlan_close,
0748 .ndo_start_xmit = tlan_start_tx,
0749 .ndo_tx_timeout = tlan_tx_timeout,
0750 .ndo_get_stats = tlan_get_stats,
0751 .ndo_set_rx_mode = tlan_set_multicast_list,
0752 .ndo_eth_ioctl = tlan_ioctl,
0753 .ndo_set_mac_address = eth_mac_addr,
0754 .ndo_validate_addr = eth_validate_addr,
0755 #ifdef CONFIG_NET_POLL_CONTROLLER
0756 .ndo_poll_controller = tlan_poll,
0757 #endif
0758 };
0759
0760 static void tlan_get_drvinfo(struct net_device *dev,
0761 struct ethtool_drvinfo *info)
0762 {
0763 struct tlan_priv *priv = netdev_priv(dev);
0764
0765 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
0766 if (priv->pci_dev)
0767 strlcpy(info->bus_info, pci_name(priv->pci_dev),
0768 sizeof(info->bus_info));
0769 else
0770 strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
0771 }
0772
0773 static int tlan_get_eeprom_len(struct net_device *dev)
0774 {
0775 return TLAN_EEPROM_SIZE;
0776 }
0777
0778 static int tlan_get_eeprom(struct net_device *dev,
0779 struct ethtool_eeprom *eeprom, u8 *data)
0780 {
0781 int i;
0782
0783 for (i = 0; i < TLAN_EEPROM_SIZE; i++)
0784 if (tlan_ee_read_byte(dev, i, &data[i]))
0785 return -EIO;
0786
0787 return 0;
0788 }
0789
0790 static const struct ethtool_ops tlan_ethtool_ops = {
0791 .get_drvinfo = tlan_get_drvinfo,
0792 .get_link = ethtool_op_get_link,
0793 .get_eeprom_len = tlan_get_eeprom_len,
0794 .get_eeprom = tlan_get_eeprom,
0795 };
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814 static int tlan_init(struct net_device *dev)
0815 {
0816 int dma_size;
0817 int err;
0818 int i;
0819 struct tlan_priv *priv;
0820 u8 addr[ETH_ALEN];
0821
0822 priv = netdev_priv(dev);
0823
0824 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
0825 * (sizeof(struct tlan_list));
0826 priv->dma_storage = dma_alloc_coherent(&priv->pci_dev->dev, dma_size,
0827 &priv->dma_storage_dma, GFP_KERNEL);
0828 priv->dma_size = dma_size;
0829
0830 if (priv->dma_storage == NULL) {
0831 pr_err("Could not allocate lists and buffers for %s\n",
0832 dev->name);
0833 return -ENOMEM;
0834 }
0835 priv->rx_list = (struct tlan_list *)
0836 ALIGN((unsigned long)priv->dma_storage, 8);
0837 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
0838 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
0839 priv->tx_list_dma =
0840 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
0841
0842 err = 0;
0843 for (i = 0; i < ETH_ALEN; i++)
0844 err |= tlan_ee_read_byte(dev,
0845 (u8) priv->adapter->addr_ofs + i,
0846 addr + i);
0847 if (err) {
0848 pr_err("%s: Error reading MAC from eeprom: %d\n",
0849 dev->name, err);
0850 }
0851
0852 if (priv->adapter->addr_ofs == 0xf8) {
0853 for (i = 0; i < ETH_ALEN; i += 2) {
0854 char tmp = addr[i];
0855 addr[i] = addr[i + 1];
0856 addr[i + 1] = tmp;
0857 }
0858 }
0859 eth_hw_addr_set(dev, addr);
0860
0861 netif_carrier_off(dev);
0862
0863
0864 dev->netdev_ops = &tlan_netdev_ops;
0865 dev->ethtool_ops = &tlan_ethtool_ops;
0866 dev->watchdog_timeo = TX_TIMEOUT;
0867
0868 return 0;
0869
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892 static int tlan_open(struct net_device *dev)
0893 {
0894 struct tlan_priv *priv = netdev_priv(dev);
0895 int err;
0896
0897 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
0898 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
0899 dev->name, dev);
0900
0901 if (err) {
0902 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
0903 dev->irq);
0904 return err;
0905 }
0906
0907 timer_setup(&priv->timer, NULL, 0);
0908 timer_setup(&priv->media_timer, tlan_phy_monitor, 0);
0909
0910 tlan_start(dev);
0911
0912 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
0913 dev->name, priv->tlan_rev);
0914
0915 return 0;
0916
0917 }
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936 static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
0937 {
0938 struct tlan_priv *priv = netdev_priv(dev);
0939 struct mii_ioctl_data *data = if_mii(rq);
0940 u32 phy = priv->phy[priv->phy_num];
0941
0942 if (!priv->phy_online)
0943 return -EAGAIN;
0944
0945 switch (cmd) {
0946 case SIOCGMIIPHY:
0947 data->phy_id = phy;
0948 fallthrough;
0949
0950
0951 case SIOCGMIIREG:
0952 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
0953 data->reg_num & 0x1f, &data->val_out);
0954 return 0;
0955
0956
0957 case SIOCSMIIREG:
0958 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
0959 data->reg_num & 0x1f, data->val_in);
0960 return 0;
0961 default:
0962 return -EOPNOTSUPP;
0963 }
0964 }
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978 static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
0979 {
0980
0981 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
0982
0983
0984 tlan_free_lists(dev);
0985 tlan_reset_lists(dev);
0986 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
0987 tlan_reset_adapter(dev);
0988 netif_trans_update(dev);
0989 netif_wake_queue(dev);
0990
0991 }
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004 static void tlan_tx_timeout_work(struct work_struct *work)
1005 {
1006 struct tlan_priv *priv =
1007 container_of(work, struct tlan_priv, tlan_tqueue);
1008
1009 tlan_tx_timeout(priv->dev, UINT_MAX);
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1036 {
1037 struct tlan_priv *priv = netdev_priv(dev);
1038 dma_addr_t tail_list_phys;
1039 struct tlan_list *tail_list;
1040 unsigned long flags;
1041 unsigned int txlen;
1042
1043 if (!priv->phy_online) {
1044 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1045 dev->name);
1046 dev_kfree_skb_any(skb);
1047 return NETDEV_TX_OK;
1048 }
1049
1050 if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
1051 return NETDEV_TX_OK;
1052 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1053
1054 tail_list = priv->tx_list + priv->tx_tail;
1055 tail_list_phys =
1056 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1057
1058 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1059 TLAN_DBG(TLAN_DEBUG_TX,
1060 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1061 dev->name, priv->tx_head, priv->tx_tail);
1062 netif_stop_queue(dev);
1063 priv->tx_busy_count++;
1064 return NETDEV_TX_BUSY;
1065 }
1066
1067 tail_list->forward = 0;
1068
1069 tail_list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
1070 skb->data, txlen,
1071 DMA_TO_DEVICE);
1072 tlan_store_skb(tail_list, skb);
1073
1074 tail_list->frame_size = (u16) txlen;
1075 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1076 tail_list->buffer[1].count = 0;
1077 tail_list->buffer[1].address = 0;
1078
1079 spin_lock_irqsave(&priv->lock, flags);
1080 tail_list->c_stat = TLAN_CSTAT_READY;
1081 if (!priv->tx_in_progress) {
1082 priv->tx_in_progress = 1;
1083 TLAN_DBG(TLAN_DEBUG_TX,
1084 "TRANSMIT: Starting TX on buffer %d\n",
1085 priv->tx_tail);
1086 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1087 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1088 } else {
1089 TLAN_DBG(TLAN_DEBUG_TX,
1090 "TRANSMIT: Adding buffer %d to TX channel\n",
1091 priv->tx_tail);
1092 if (priv->tx_tail == 0) {
1093 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1094 = tail_list_phys;
1095 } else {
1096 (priv->tx_list + (priv->tx_tail - 1))->forward
1097 = tail_list_phys;
1098 }
1099 }
1100 spin_unlock_irqrestore(&priv->lock, flags);
1101
1102 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1103
1104 return NETDEV_TX_OK;
1105
1106 }
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1132 {
1133 struct net_device *dev = dev_id;
1134 struct tlan_priv *priv = netdev_priv(dev);
1135 u16 host_int;
1136 u16 type;
1137
1138 spin_lock(&priv->lock);
1139
1140 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1141 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1142 if (type) {
1143 u32 ack;
1144 u32 host_cmd;
1145
1146 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1147 ack = tlan_int_vector[type](dev, host_int);
1148
1149 if (ack) {
1150 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1151 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1152 }
1153 }
1154
1155 spin_unlock(&priv->lock);
1156
1157 return IRQ_RETVAL(type);
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 static int tlan_close(struct net_device *dev)
1179 {
1180 tlan_stop(dev);
1181
1182 free_irq(dev->irq, dev);
1183 tlan_free_lists(dev);
1184 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1185
1186 return 0;
1187
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1209 {
1210 struct tlan_priv *priv = netdev_priv(dev);
1211 int i;
1212
1213
1214 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1215
1216 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1217 priv->rx_eoc_count);
1218 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1219 priv->tx_busy_count);
1220 if (debug & TLAN_DEBUG_GNRL) {
1221 tlan_print_dio(dev->base_addr);
1222 tlan_phy_print(dev);
1223 }
1224 if (debug & TLAN_DEBUG_LIST) {
1225 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1226 tlan_print_list(priv->rx_list + i, "RX", i);
1227 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1228 tlan_print_list(priv->tx_list + i, "TX", i);
1229 }
1230
1231 return &dev->stats;
1232
1233 }
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 static void tlan_set_multicast_list(struct net_device *dev)
1259 {
1260 struct netdev_hw_addr *ha;
1261 u32 hash1 = 0;
1262 u32 hash2 = 0;
1263 int i;
1264 u32 offset;
1265 u8 tmp;
1266
1267 if (dev->flags & IFF_PROMISC) {
1268 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1269 tlan_dio_write8(dev->base_addr,
1270 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1271 } else {
1272 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1273 tlan_dio_write8(dev->base_addr,
1274 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1275 if (dev->flags & IFF_ALLMULTI) {
1276 for (i = 0; i < 3; i++)
1277 tlan_set_mac(dev, i + 1, NULL);
1278 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1279 0xffffffff);
1280 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1281 0xffffffff);
1282 } else {
1283 i = 0;
1284 netdev_for_each_mc_addr(ha, dev) {
1285 if (i < 3) {
1286 tlan_set_mac(dev, i + 1,
1287 (char *) &ha->addr);
1288 } else {
1289 offset =
1290 tlan_hash_func((u8 *)&ha->addr);
1291 if (offset < 32)
1292 hash1 |= (1 << offset);
1293 else
1294 hash2 |= (1 << (offset - 32));
1295 }
1296 i++;
1297 }
1298 for ( ; i < 3; i++)
1299 tlan_set_mac(dev, i + 1, NULL);
1300 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1301 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1302 }
1303 }
1304
1305 }
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1347 {
1348 struct tlan_priv *priv = netdev_priv(dev);
1349 int eoc = 0;
1350 struct tlan_list *head_list;
1351 dma_addr_t head_list_phys;
1352 u32 ack = 0;
1353 u16 tmp_c_stat;
1354
1355 TLAN_DBG(TLAN_DEBUG_TX,
1356 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1357 priv->tx_head, priv->tx_tail);
1358 head_list = priv->tx_list + priv->tx_head;
1359
1360 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1361 && (ack < 255)) {
1362 struct sk_buff *skb = tlan_get_skb(head_list);
1363
1364 ack++;
1365 dma_unmap_single(&priv->pci_dev->dev,
1366 head_list->buffer[0].address,
1367 max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
1368 DMA_TO_DEVICE);
1369 dev_kfree_skb_any(skb);
1370 head_list->buffer[8].address = 0;
1371 head_list->buffer[9].address = 0;
1372
1373 if (tmp_c_stat & TLAN_CSTAT_EOC)
1374 eoc = 1;
1375
1376 dev->stats.tx_bytes += head_list->frame_size;
1377
1378 head_list->c_stat = TLAN_CSTAT_UNUSED;
1379 netif_start_queue(dev);
1380 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1381 head_list = priv->tx_list + priv->tx_head;
1382 }
1383
1384 if (!ack)
1385 netdev_info(dev,
1386 "Received interrupt for uncompleted TX frame\n");
1387
1388 if (eoc) {
1389 TLAN_DBG(TLAN_DEBUG_TX,
1390 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1391 priv->tx_head, priv->tx_tail);
1392 head_list = priv->tx_list + priv->tx_head;
1393 head_list_phys = priv->tx_list_dma
1394 + sizeof(struct tlan_list)*priv->tx_head;
1395 if ((head_list->c_stat & TLAN_CSTAT_READY)
1396 == TLAN_CSTAT_READY) {
1397 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1398 ack |= TLAN_HC_GO;
1399 } else {
1400 priv->tx_in_progress = 0;
1401 }
1402 }
1403
1404 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1405 tlan_dio_write8(dev->base_addr,
1406 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1407 if (priv->timer.function == NULL) {
1408 priv->timer.function = tlan_timer;
1409 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1410 priv->timer_set_at = jiffies;
1411 priv->timer_type = TLAN_TIMER_ACTIVITY;
1412 add_timer(&priv->timer);
1413 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1414 priv->timer_set_at = jiffies;
1415 }
1416 }
1417
1418 return ack;
1419
1420 }
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442 static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1443 {
1444 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1445
1446 return 1;
1447
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478 static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1479 {
1480 struct tlan_priv *priv = netdev_priv(dev);
1481 u32 ack = 0;
1482 int eoc = 0;
1483 struct tlan_list *head_list;
1484 struct sk_buff *skb;
1485 struct tlan_list *tail_list;
1486 u16 tmp_c_stat;
1487 dma_addr_t head_list_phys;
1488
1489 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1490 priv->rx_head, priv->rx_tail);
1491 head_list = priv->rx_list + priv->rx_head;
1492 head_list_phys =
1493 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1494
1495 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1496 && (ack < 255)) {
1497 dma_addr_t frame_dma = head_list->buffer[0].address;
1498 u32 frame_size = head_list->frame_size;
1499 struct sk_buff *new_skb;
1500
1501 ack++;
1502 if (tmp_c_stat & TLAN_CSTAT_EOC)
1503 eoc = 1;
1504
1505 new_skb = netdev_alloc_skb_ip_align(dev,
1506 TLAN_MAX_FRAME_SIZE + 5);
1507 if (!new_skb)
1508 goto drop_and_reuse;
1509
1510 skb = tlan_get_skb(head_list);
1511 dma_unmap_single(&priv->pci_dev->dev, frame_dma,
1512 TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
1513 skb_put(skb, frame_size);
1514
1515 dev->stats.rx_bytes += frame_size;
1516
1517 skb->protocol = eth_type_trans(skb, dev);
1518 netif_rx(skb);
1519
1520 head_list->buffer[0].address =
1521 dma_map_single(&priv->pci_dev->dev, new_skb->data,
1522 TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
1523
1524 tlan_store_skb(head_list, new_skb);
1525 drop_and_reuse:
1526 head_list->forward = 0;
1527 head_list->c_stat = 0;
1528 tail_list = priv->rx_list + priv->rx_tail;
1529 tail_list->forward = head_list_phys;
1530
1531 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1532 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1533 head_list = priv->rx_list + priv->rx_head;
1534 head_list_phys = priv->rx_list_dma
1535 + sizeof(struct tlan_list)*priv->rx_head;
1536 }
1537
1538 if (!ack)
1539 netdev_info(dev,
1540 "Received interrupt for uncompleted RX frame\n");
1541
1542
1543 if (eoc) {
1544 TLAN_DBG(TLAN_DEBUG_RX,
1545 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1546 priv->rx_head, priv->rx_tail);
1547 head_list = priv->rx_list + priv->rx_head;
1548 head_list_phys = priv->rx_list_dma
1549 + sizeof(struct tlan_list)*priv->rx_head;
1550 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1551 ack |= TLAN_HC_GO | TLAN_HC_RT;
1552 priv->rx_eoc_count++;
1553 }
1554
1555 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1556 tlan_dio_write8(dev->base_addr,
1557 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1558 if (priv->timer.function == NULL) {
1559 priv->timer.function = tlan_timer;
1560 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1561 priv->timer_set_at = jiffies;
1562 priv->timer_type = TLAN_TIMER_ACTIVITY;
1563 add_timer(&priv->timer);
1564 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1565 priv->timer_set_at = jiffies;
1566 }
1567 }
1568
1569 return ack;
1570
1571 }
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593 static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1594 {
1595 netdev_info(dev, "Test interrupt\n");
1596 return 1;
1597
1598 }
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623 static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1624 {
1625 struct tlan_priv *priv = netdev_priv(dev);
1626 struct tlan_list *head_list;
1627 dma_addr_t head_list_phys;
1628 u32 ack = 1;
1629
1630 if (priv->tlan_rev < 0x30) {
1631 TLAN_DBG(TLAN_DEBUG_TX,
1632 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1633 priv->tx_head, priv->tx_tail);
1634 head_list = priv->tx_list + priv->tx_head;
1635 head_list_phys = priv->tx_list_dma
1636 + sizeof(struct tlan_list)*priv->tx_head;
1637 if ((head_list->c_stat & TLAN_CSTAT_READY)
1638 == TLAN_CSTAT_READY) {
1639 netif_stop_queue(dev);
1640 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1641 ack |= TLAN_HC_GO;
1642 } else {
1643 priv->tx_in_progress = 0;
1644 }
1645 }
1646
1647 return ack;
1648
1649 }
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1675 {
1676 struct tlan_priv *priv = netdev_priv(dev);
1677 u32 ack;
1678 u32 error;
1679 u8 net_sts;
1680 u32 phy;
1681 u16 tlphy_ctl;
1682 u16 tlphy_sts;
1683
1684 ack = 1;
1685 if (host_int & TLAN_HI_IV_MASK) {
1686 netif_stop_queue(dev);
1687 error = inl(dev->base_addr + TLAN_CH_PARM);
1688 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1689 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1690 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1691
1692 schedule_work(&priv->tlan_tqueue);
1693
1694 netif_wake_queue(dev);
1695 ack = 0;
1696 } else {
1697 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1698 phy = priv->phy[priv->phy_num];
1699
1700 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1701 if (net_sts) {
1702 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1703 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1704 dev->name, (unsigned) net_sts);
1705 }
1706 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1707 __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1708 __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1709 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1710 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1711 tlphy_ctl |= TLAN_TC_SWAPOL;
1712 __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1713 tlphy_ctl);
1714 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1715 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1716 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1717 __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1718 tlphy_ctl);
1719 }
1720
1721 if (debug)
1722 __tlan_phy_print(dev);
1723 }
1724 }
1725
1726 return ack;
1727
1728 }
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1754 {
1755 struct tlan_priv *priv = netdev_priv(dev);
1756 dma_addr_t head_list_phys;
1757 u32 ack = 1;
1758
1759 if (priv->tlan_rev < 0x30) {
1760 TLAN_DBG(TLAN_DEBUG_RX,
1761 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1762 priv->rx_head, priv->rx_tail);
1763 head_list_phys = priv->rx_list_dma
1764 + sizeof(struct tlan_list)*priv->rx_head;
1765 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1766 ack |= TLAN_HC_GO | TLAN_HC_RT;
1767 priv->rx_eoc_count++;
1768 }
1769
1770 return ack;
1771
1772 }
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816 static void tlan_timer(struct timer_list *t)
1817 {
1818 struct tlan_priv *priv = from_timer(priv, t, timer);
1819 struct net_device *dev = priv->dev;
1820 u32 elapsed;
1821 unsigned long flags = 0;
1822
1823 priv->timer.function = NULL;
1824
1825 switch (priv->timer_type) {
1826 case TLAN_TIMER_PHY_PDOWN:
1827 tlan_phy_power_down(dev);
1828 break;
1829 case TLAN_TIMER_PHY_PUP:
1830 tlan_phy_power_up(dev);
1831 break;
1832 case TLAN_TIMER_PHY_RESET:
1833 tlan_phy_reset(dev);
1834 break;
1835 case TLAN_TIMER_PHY_START_LINK:
1836 tlan_phy_start_link(dev);
1837 break;
1838 case TLAN_TIMER_PHY_FINISH_AN:
1839 tlan_phy_finish_auto_neg(dev);
1840 break;
1841 case TLAN_TIMER_FINISH_RESET:
1842 tlan_finish_reset(dev);
1843 break;
1844 case TLAN_TIMER_ACTIVITY:
1845 spin_lock_irqsave(&priv->lock, flags);
1846 if (priv->timer.function == NULL) {
1847 elapsed = jiffies - priv->timer_set_at;
1848 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1849 tlan_dio_write8(dev->base_addr,
1850 TLAN_LED_REG, TLAN_LED_LINK);
1851 } else {
1852 priv->timer.expires = priv->timer_set_at
1853 + TLAN_TIMER_ACT_DELAY;
1854 spin_unlock_irqrestore(&priv->lock, flags);
1855 add_timer(&priv->timer);
1856 break;
1857 }
1858 }
1859 spin_unlock_irqrestore(&priv->lock, flags);
1860 break;
1861 default:
1862 break;
1863 }
1864
1865 }
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891 static void tlan_reset_lists(struct net_device *dev)
1892 {
1893 struct tlan_priv *priv = netdev_priv(dev);
1894 int i;
1895 struct tlan_list *list;
1896 dma_addr_t list_phys;
1897 struct sk_buff *skb;
1898
1899 priv->tx_head = 0;
1900 priv->tx_tail = 0;
1901 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1902 list = priv->tx_list + i;
1903 list->c_stat = TLAN_CSTAT_UNUSED;
1904 list->buffer[0].address = 0;
1905 list->buffer[2].count = 0;
1906 list->buffer[2].address = 0;
1907 list->buffer[8].address = 0;
1908 list->buffer[9].address = 0;
1909 }
1910
1911 priv->rx_head = 0;
1912 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1913 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1914 list = priv->rx_list + i;
1915 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1916 list->c_stat = TLAN_CSTAT_READY;
1917 list->frame_size = TLAN_MAX_FRAME_SIZE;
1918 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1919 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1920 if (!skb)
1921 break;
1922
1923 list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
1924 skb->data,
1925 TLAN_MAX_FRAME_SIZE,
1926 DMA_FROM_DEVICE);
1927 tlan_store_skb(list, skb);
1928 list->buffer[1].count = 0;
1929 list->buffer[1].address = 0;
1930 list->forward = list_phys + sizeof(struct tlan_list);
1931 }
1932
1933
1934 while (i < TLAN_NUM_RX_LISTS) {
1935 tlan_store_skb(priv->rx_list + i, NULL);
1936 ++i;
1937 }
1938 list->forward = 0;
1939
1940 }
1941
1942
1943 static void tlan_free_lists(struct net_device *dev)
1944 {
1945 struct tlan_priv *priv = netdev_priv(dev);
1946 int i;
1947 struct tlan_list *list;
1948 struct sk_buff *skb;
1949
1950 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1951 list = priv->tx_list + i;
1952 skb = tlan_get_skb(list);
1953 if (skb) {
1954 dma_unmap_single(&priv->pci_dev->dev,
1955 list->buffer[0].address,
1956 max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
1957 DMA_TO_DEVICE);
1958 dev_kfree_skb_any(skb);
1959 list->buffer[8].address = 0;
1960 list->buffer[9].address = 0;
1961 }
1962 }
1963
1964 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1965 list = priv->rx_list + i;
1966 skb = tlan_get_skb(list);
1967 if (skb) {
1968 dma_unmap_single(&priv->pci_dev->dev,
1969 list->buffer[0].address,
1970 TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
1971 dev_kfree_skb_any(skb);
1972 list->buffer[8].address = 0;
1973 list->buffer[9].address = 0;
1974 }
1975 }
1976 }
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995 static void tlan_print_dio(u16 io_base)
1996 {
1997 u32 data0, data1;
1998 int i;
1999
2000 pr_info("Contents of internal registers for io base 0x%04hx\n",
2001 io_base);
2002 pr_info("Off. +0 +4\n");
2003 for (i = 0; i < 0x4C; i += 8) {
2004 data0 = tlan_dio_read32(io_base, i);
2005 data1 = tlan_dio_read32(io_base, i + 0x4);
2006 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2007 }
2008
2009 }
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031 static void tlan_print_list(struct tlan_list *list, char *type, int num)
2032 {
2033 int i;
2034
2035 pr_info("%s List %d at %p\n", type, num, list);
2036 pr_info(" Forward = 0x%08x\n", list->forward);
2037 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2038 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2039
2040 for (i = 0; i < 2; i++) {
2041 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2042 i, list->buffer[i].count, list->buffer[i].address);
2043 }
2044
2045 }
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068 static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2069 {
2070 u32 tx_good, tx_under;
2071 u32 rx_good, rx_over;
2072 u32 def_tx, crc, code;
2073 u32 multi_col, single_col;
2074 u32 excess_col, late_col, loss;
2075
2076 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2077 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2078 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2079 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2080 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2081
2082 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2083 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2084 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2085 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2086 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2087
2088 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2089 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2090 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2091 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2092 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2093
2094 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2095 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2096 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2097 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2098 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2099
2100 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2101 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2102 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2103 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2104
2105 if (record) {
2106 dev->stats.rx_packets += rx_good;
2107 dev->stats.rx_errors += rx_over + crc + code;
2108 dev->stats.tx_packets += tx_good;
2109 dev->stats.tx_errors += tx_under + loss;
2110 dev->stats.collisions += multi_col
2111 + single_col + excess_col + late_col;
2112
2113 dev->stats.rx_over_errors += rx_over;
2114 dev->stats.rx_crc_errors += crc;
2115 dev->stats.rx_frame_errors += code;
2116
2117 dev->stats.tx_aborted_errors += tx_under;
2118 dev->stats.tx_carrier_errors += loss;
2119 }
2120
2121 }
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143 static void
2144 tlan_reset_adapter(struct net_device *dev)
2145 {
2146 struct tlan_priv *priv = netdev_priv(dev);
2147 int i;
2148 u32 addr;
2149 u32 data;
2150 u8 data8;
2151
2152 priv->tlan_full_duplex = false;
2153 priv->phy_online = 0;
2154 netif_carrier_off(dev);
2155
2156
2157
2158 data = inl(dev->base_addr + TLAN_HOST_CMD);
2159 data |= TLAN_HC_AD_RST;
2160 outl(data, dev->base_addr + TLAN_HOST_CMD);
2161
2162 udelay(1000);
2163
2164
2165
2166 data = inl(dev->base_addr + TLAN_HOST_CMD);
2167 data |= TLAN_HC_INT_OFF;
2168 outl(data, dev->base_addr + TLAN_HOST_CMD);
2169
2170
2171
2172 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2173 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2174
2175
2176
2177 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2178 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2179
2180
2181
2182 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2183 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2184
2185
2186
2187 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2188 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2189 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2190
2191
2192
2193 if (priv->tlan_rev >= 0x30) {
2194 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2195 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2196 }
2197 tlan_phy_detect(dev);
2198 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2199
2200 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2201 data |= TLAN_NET_CFG_BIT;
2202 if (priv->aui == 1) {
2203 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2204 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2205 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2206 priv->tlan_full_duplex = true;
2207 } else {
2208 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2209 }
2210 }
2211
2212
2213 if (priv->phy_num == 0 ||
2214 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
2215 data |= TLAN_NET_CFG_PHY_EN;
2216 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2217
2218 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2219 tlan_finish_reset(dev);
2220 else
2221 tlan_phy_power_down(dev);
2222
2223 }
2224
2225
2226
2227
2228 static void
2229 tlan_finish_reset(struct net_device *dev)
2230 {
2231 struct tlan_priv *priv = netdev_priv(dev);
2232 u8 data;
2233 u32 phy;
2234 u8 sio;
2235 u16 status;
2236 u16 partner;
2237 u16 tlphy_ctl;
2238 u16 tlphy_par;
2239 u16 tlphy_id1, tlphy_id2;
2240 int i;
2241
2242 phy = priv->phy[priv->phy_num];
2243
2244 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2245 if (priv->tlan_full_duplex)
2246 data |= TLAN_NET_CMD_DUPLEX;
2247 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2248 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2249 if (priv->phy_num == 0)
2250 data |= TLAN_NET_MASK_MASK7;
2251 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2252 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2253 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2254 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2255
2256 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2257 (priv->aui)) {
2258 status = MII_GS_LINK;
2259 netdev_info(dev, "Link forced\n");
2260 } else {
2261 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2262 udelay(1000);
2263 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2264 if (status & MII_GS_LINK) {
2265
2266 if ((tlphy_id1 == NAT_SEM_ID1) &&
2267 (tlphy_id2 == NAT_SEM_ID2)) {
2268 tlan_mii_read_reg(dev, phy, MII_AN_LPA,
2269 &partner);
2270 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
2271 &tlphy_par);
2272
2273 netdev_info(dev,
2274 "Link active, %s %uMbps %s-Duplex\n",
2275 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2276 ? "forced" : "Autonegotiation enabled,",
2277 tlphy_par & TLAN_PHY_SPEED_100
2278 ? 100 : 10,
2279 tlphy_par & TLAN_PHY_DUPLEX_FULL
2280 ? "Full" : "Half");
2281
2282 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2283 netdev_info(dev, "Partner capability:");
2284 for (i = 5; i < 10; i++)
2285 if (partner & (1 << i))
2286 pr_cont(" %s",
2287 media[i-5]);
2288 pr_cont("\n");
2289 }
2290 } else
2291 netdev_info(dev, "Link active\n");
2292
2293 priv->media_timer.expires = jiffies + HZ;
2294 add_timer(&priv->media_timer);
2295 }
2296 }
2297
2298 if (priv->phy_num == 0) {
2299 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2300 tlphy_ctl |= TLAN_TC_INTEN;
2301 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2302 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2303 sio |= TLAN_NET_SIO_MINTEN;
2304 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2305 }
2306
2307 if (status & MII_GS_LINK) {
2308 tlan_set_mac(dev, 0, dev->dev_addr);
2309 priv->phy_online = 1;
2310 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2311 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2312 outb((TLAN_HC_REQ_INT >> 8),
2313 dev->base_addr + TLAN_HOST_CMD + 1);
2314 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2315 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2316 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
2317 netif_carrier_on(dev);
2318 } else {
2319 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2320 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2321 return;
2322 }
2323 tlan_set_multicast_list(dev);
2324
2325 }
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351 static void tlan_set_mac(struct net_device *dev, int areg, const char *mac)
2352 {
2353 int i;
2354
2355 areg *= 6;
2356
2357 if (mac != NULL) {
2358 for (i = 0; i < 6; i++)
2359 tlan_dio_write8(dev->base_addr,
2360 TLAN_AREG_0 + areg + i, mac[i]);
2361 } else {
2362 for (i = 0; i < 6; i++)
2363 tlan_dio_write8(dev->base_addr,
2364 TLAN_AREG_0 + areg + i, 0);
2365 }
2366
2367 }
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395 static void __tlan_phy_print(struct net_device *dev)
2396 {
2397 struct tlan_priv *priv = netdev_priv(dev);
2398 u16 i, data0, data1, data2, data3, phy;
2399
2400 lockdep_assert_held(&priv->lock);
2401
2402 phy = priv->phy[priv->phy_num];
2403
2404 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2405 netdev_info(dev, "Unmanaged PHY\n");
2406 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2407 netdev_info(dev, "PHY 0x%02x\n", phy);
2408 pr_info(" Off. +0 +1 +2 +3\n");
2409 for (i = 0; i < 0x20; i += 4) {
2410 __tlan_mii_read_reg(dev, phy, i, &data0);
2411 __tlan_mii_read_reg(dev, phy, i + 1, &data1);
2412 __tlan_mii_read_reg(dev, phy, i + 2, &data2);
2413 __tlan_mii_read_reg(dev, phy, i + 3, &data3);
2414 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2415 i, data0, data1, data2, data3);
2416 }
2417 } else {
2418 netdev_info(dev, "Invalid PHY\n");
2419 }
2420
2421 }
2422
2423 static void tlan_phy_print(struct net_device *dev)
2424 {
2425 struct tlan_priv *priv = netdev_priv(dev);
2426 unsigned long flags;
2427
2428 spin_lock_irqsave(&priv->lock, flags);
2429 __tlan_phy_print(dev);
2430 spin_unlock_irqrestore(&priv->lock, flags);
2431 }
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451 static void tlan_phy_detect(struct net_device *dev)
2452 {
2453 struct tlan_priv *priv = netdev_priv(dev);
2454 u16 control;
2455 u16 hi;
2456 u16 lo;
2457 u32 phy;
2458
2459 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2460 priv->phy_num = 0xffff;
2461 return;
2462 }
2463
2464 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2465
2466 if (hi != 0xffff)
2467 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2468 else
2469 priv->phy[0] = TLAN_PHY_NONE;
2470
2471 priv->phy[1] = TLAN_PHY_NONE;
2472 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2473 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2474 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2475 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2476 if ((control != 0xffff) ||
2477 (hi != 0xffff) || (lo != 0xffff)) {
2478 TLAN_DBG(TLAN_DEBUG_GNRL,
2479 "PHY found at %02x %04x %04x %04x\n",
2480 phy, control, hi, lo);
2481 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2482 (phy != TLAN_PHY_MAX_ADDR)) {
2483 priv->phy[1] = phy;
2484 }
2485 }
2486 }
2487
2488 if (priv->phy[1] != TLAN_PHY_NONE)
2489 priv->phy_num = 1;
2490 else if (priv->phy[0] != TLAN_PHY_NONE)
2491 priv->phy_num = 0;
2492 else
2493 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2494
2495 }
2496
2497
2498
2499
2500 static void tlan_phy_power_down(struct net_device *dev)
2501 {
2502 struct tlan_priv *priv = netdev_priv(dev);
2503 u16 value;
2504
2505 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2506 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2507 tlan_mii_sync(dev->base_addr);
2508 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2509 if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
2510
2511 if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
2512 value = MII_GC_ISOLATE;
2513 tlan_mii_sync(dev->base_addr);
2514 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2515 }
2516
2517
2518
2519
2520
2521 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
2522
2523 }
2524
2525
2526
2527
2528 static void tlan_phy_power_up(struct net_device *dev)
2529 {
2530 struct tlan_priv *priv = netdev_priv(dev);
2531 u16 value;
2532
2533 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2534 tlan_mii_sync(dev->base_addr);
2535 value = MII_GC_LOOPBK;
2536 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2537 tlan_mii_sync(dev->base_addr);
2538
2539
2540
2541
2542 tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
2543
2544 }
2545
2546
2547
2548
2549 static void tlan_phy_reset(struct net_device *dev)
2550 {
2551 struct tlan_priv *priv = netdev_priv(dev);
2552 u16 phy;
2553 u16 value;
2554 unsigned long timeout = jiffies + HZ;
2555
2556 phy = priv->phy[priv->phy_num];
2557
2558 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2559 tlan_mii_sync(dev->base_addr);
2560 value = MII_GC_LOOPBK | MII_GC_RESET;
2561 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2562 do {
2563 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2564 if (time_after(jiffies, timeout)) {
2565 netdev_err(dev, "PHY reset timeout\n");
2566 return;
2567 }
2568 } while (value & MII_GC_RESET);
2569
2570
2571
2572
2573
2574 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
2575
2576 }
2577
2578
2579
2580
2581 static void tlan_phy_start_link(struct net_device *dev)
2582 {
2583 struct tlan_priv *priv = netdev_priv(dev);
2584 u16 ability;
2585 u16 control;
2586 u16 data;
2587 u16 phy;
2588 u16 status;
2589 u16 tctl;
2590
2591 phy = priv->phy[priv->phy_num];
2592 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2593 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2594 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2595
2596 if ((status & MII_GS_AUTONEG) &&
2597 (!priv->aui)) {
2598 ability = status >> 11;
2599 if (priv->speed == TLAN_SPEED_10 &&
2600 priv->duplex == TLAN_DUPLEX_HALF) {
2601 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2602 } else if (priv->speed == TLAN_SPEED_10 &&
2603 priv->duplex == TLAN_DUPLEX_FULL) {
2604 priv->tlan_full_duplex = true;
2605 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2606 } else if (priv->speed == TLAN_SPEED_100 &&
2607 priv->duplex == TLAN_DUPLEX_HALF) {
2608 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2609 } else if (priv->speed == TLAN_SPEED_100 &&
2610 priv->duplex == TLAN_DUPLEX_FULL) {
2611 priv->tlan_full_duplex = true;
2612 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2613 } else {
2614
2615
2616 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2617 (ability << 5) | 1);
2618
2619 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2620
2621 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2622
2623
2624
2625
2626
2627 netdev_info(dev, "Starting autonegotiation\n");
2628 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2629 return;
2630 }
2631
2632 }
2633
2634 if ((priv->aui) && (priv->phy_num != 0)) {
2635 priv->phy_num = 0;
2636 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2637 | TLAN_NET_CFG_PHY_EN;
2638 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2639 tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
2640 return;
2641 } else if (priv->phy_num == 0) {
2642 control = 0;
2643 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2644 if (priv->aui) {
2645 tctl |= TLAN_TC_AUISEL;
2646 } else {
2647 tctl &= ~TLAN_TC_AUISEL;
2648 if (priv->duplex == TLAN_DUPLEX_FULL) {
2649 control |= MII_GC_DUPLEX;
2650 priv->tlan_full_duplex = true;
2651 }
2652 if (priv->speed == TLAN_SPEED_100)
2653 control |= MII_GC_SPEEDSEL;
2654 }
2655 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2656 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2657 }
2658
2659
2660
2661
2662 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2663
2664 }
2665
2666
2667
2668
2669 static void tlan_phy_finish_auto_neg(struct net_device *dev)
2670 {
2671 struct tlan_priv *priv = netdev_priv(dev);
2672 u16 an_adv;
2673 u16 an_lpa;
2674 u16 mode;
2675 u16 phy;
2676 u16 status;
2677
2678 phy = priv->phy[priv->phy_num];
2679
2680 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2681 udelay(1000);
2682 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2683
2684 if (!(status & MII_GS_AUTOCMPLT)) {
2685
2686
2687
2688 tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
2689 return;
2690 }
2691
2692 netdev_info(dev, "Autonegotiation complete\n");
2693 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2694 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2695 mode = an_adv & an_lpa & 0x03E0;
2696 if (mode & 0x0100)
2697 priv->tlan_full_duplex = true;
2698 else if (!(mode & 0x0080) && (mode & 0x0040))
2699 priv->tlan_full_duplex = true;
2700
2701
2702 if ((!(mode & 0x0180)) &&
2703 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2704 (priv->phy_num != 0)) {
2705 priv->phy_num = 0;
2706 tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
2707 return;
2708 }
2709
2710 if (priv->phy_num == 0) {
2711 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2712 (an_adv & an_lpa & 0x0040)) {
2713 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2714 MII_GC_AUTOENB | MII_GC_DUPLEX);
2715 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2716 } else {
2717 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2718 MII_GC_AUTOENB);
2719 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2720 }
2721 }
2722
2723
2724
2725 tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
2726
2727 }
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747 static void tlan_phy_monitor(struct timer_list *t)
2748 {
2749 struct tlan_priv *priv = from_timer(priv, t, media_timer);
2750 struct net_device *dev = priv->dev;
2751 u16 phy;
2752 u16 phy_status;
2753
2754 phy = priv->phy[priv->phy_num];
2755
2756
2757 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2758
2759
2760 if (!(phy_status & MII_GS_LINK)) {
2761 if (netif_carrier_ok(dev)) {
2762 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2763 dev->name);
2764 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
2765 netif_carrier_off(dev);
2766 if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
2767
2768 u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
2769 MII_GC_ISOLATE;
2770
2771 tlan_mii_sync(dev->base_addr);
2772 tlan_mii_write_reg(dev, priv->phy[0],
2773 MII_GEN_CTL, data);
2774
2775 priv->phy_num = 1;
2776
2777 tlan_set_timer(dev, msecs_to_jiffies(400),
2778 TLAN_TIMER_PHY_PDOWN);
2779 return;
2780 }
2781 }
2782 }
2783
2784
2785 if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
2786 tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
2787 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2788 dev->name);
2789 netif_carrier_on(dev);
2790 }
2791 priv->media_timer.expires = jiffies + HZ;
2792 add_timer(&priv->media_timer);
2793 }
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832 static bool
2833 __tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2834 {
2835 u8 nack;
2836 u16 sio, tmp;
2837 u32 i;
2838 bool err;
2839 int minten;
2840 struct tlan_priv *priv = netdev_priv(dev);
2841
2842 lockdep_assert_held(&priv->lock);
2843
2844 err = false;
2845 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2846 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2847
2848 tlan_mii_sync(dev->base_addr);
2849
2850 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2851 if (minten)
2852 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2853
2854 tlan_mii_send_data(dev->base_addr, 0x1, 2);
2855 tlan_mii_send_data(dev->base_addr, 0x2, 2);
2856 tlan_mii_send_data(dev->base_addr, phy, 5);
2857 tlan_mii_send_data(dev->base_addr, reg, 5);
2858
2859
2860 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2861
2862 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2863 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2864 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2865
2866 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);
2867 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2868 if (nack) {
2869 for (i = 0; i < 16; i++) {
2870 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2871 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2872 }
2873 tmp = 0xffff;
2874 err = true;
2875 } else {
2876 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2877 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2878 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2879 tmp |= i;
2880 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2881 }
2882 }
2883
2884
2885 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2886 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2887
2888 if (minten)
2889 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2890
2891 *val = tmp;
2892
2893 return err;
2894 }
2895
2896 static void tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg,
2897 u16 *val)
2898 {
2899 struct tlan_priv *priv = netdev_priv(dev);
2900 unsigned long flags;
2901
2902 spin_lock_irqsave(&priv->lock, flags);
2903 __tlan_mii_read_reg(dev, phy, reg, val);
2904 spin_unlock_irqrestore(&priv->lock, flags);
2905 }
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925 static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2926 {
2927 u16 sio;
2928 u32 i;
2929
2930 if (num_bits == 0)
2931 return;
2932
2933 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2934 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2935 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2936
2937 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2938 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2939 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2940 if (data & i)
2941 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2942 else
2943 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2944 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2945 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2946 }
2947
2948 }
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967 static void tlan_mii_sync(u16 base_port)
2968 {
2969 int i;
2970 u16 sio;
2971
2972 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2973 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2974
2975 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
2976 for (i = 0; i < 32; i++) {
2977 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2978 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2979 }
2980
2981 }
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006 static void
3007 __tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
3008 {
3009 u16 sio;
3010 int minten;
3011 struct tlan_priv *priv = netdev_priv(dev);
3012
3013 lockdep_assert_held(&priv->lock);
3014
3015 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3016 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
3017
3018 tlan_mii_sync(dev->base_addr);
3019
3020 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3021 if (minten)
3022 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3023
3024 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3025 tlan_mii_send_data(dev->base_addr, 0x1, 2);
3026 tlan_mii_send_data(dev->base_addr, phy, 5);
3027 tlan_mii_send_data(dev->base_addr, reg, 5);
3028
3029 tlan_mii_send_data(dev->base_addr, 0x2, 2);
3030 tlan_mii_send_data(dev->base_addr, val, 16);
3031
3032 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3033 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3034
3035 if (minten)
3036 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3037
3038 }
3039
3040 static void
3041 tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
3042 {
3043 struct tlan_priv *priv = netdev_priv(dev);
3044 unsigned long flags;
3045
3046 spin_lock_irqsave(&priv->lock, flags);
3047 __tlan_mii_write_reg(dev, phy, reg, val);
3048 spin_unlock_irqrestore(&priv->lock, flags);
3049 }
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081 static void tlan_ee_send_start(u16 io_base)
3082 {
3083 u16 sio;
3084
3085 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3086 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3087
3088 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3089 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3090 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3091 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3092 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3093
3094 }
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121 static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3122 {
3123 int err;
3124 u8 place;
3125 u16 sio;
3126
3127 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3128 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3129
3130
3131 for (place = 0x80; place != 0; place >>= 1) {
3132 if (place & data)
3133 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3134 else
3135 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3136 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3137 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3138 }
3139 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3140 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3141 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3142 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3143 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3144
3145 if ((!err) && stop) {
3146
3147 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3148 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3149 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3150 }
3151
3152 return err;
3153
3154 }
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183 static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3184 {
3185 u8 place;
3186 u16 sio;
3187
3188 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3189 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3190 *data = 0;
3191
3192
3193 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3194 for (place = 0x80; place; place >>= 1) {
3195 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3196 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3197 *data |= place;
3198 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3199 }
3200
3201 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3202 if (!stop) {
3203 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3204 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3205 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3206 } else {
3207 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3208 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3209 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3210
3211 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3212 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3213 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3214 }
3215
3216 }
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242 static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3243 {
3244 int err;
3245 struct tlan_priv *priv = netdev_priv(dev);
3246 unsigned long flags = 0;
3247 int ret = 0;
3248
3249 spin_lock_irqsave(&priv->lock, flags);
3250
3251 tlan_ee_send_start(dev->base_addr);
3252 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3253 if (err) {
3254 ret = 1;
3255 goto fail;
3256 }
3257 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3258 if (err) {
3259 ret = 2;
3260 goto fail;
3261 }
3262 tlan_ee_send_start(dev->base_addr);
3263 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3264 if (err) {
3265 ret = 3;
3266 goto fail;
3267 }
3268 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3269 fail:
3270 spin_unlock_irqrestore(&priv->lock, flags);
3271
3272 return ret;
3273
3274 }
3275
3276
3277