0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 #include <linux/module.h>
0050 #include <linux/moduleparam.h>
0051 #include <linux/types.h>
0052 #include <linux/errno.h>
0053 #include <linux/ioport.h>
0054 #include <linux/pci.h>
0055 #include <linux/dma-mapping.h>
0056 #include <linux/kernel.h>
0057 #include <linux/netdevice.h>
0058 #include <linux/etherdevice.h>
0059 #include <linux/skbuff.h>
0060 #include <linux/delay.h>
0061 #include <linux/mm.h>
0062 #include <linux/highmem.h>
0063 #include <linux/sockios.h>
0064 #include <linux/firmware.h>
0065 #include <linux/slab.h>
0066 #include <linux/prefetch.h>
0067 #include <linux/if_vlan.h>
0068
0069 #ifdef SIOCETHTOOL
0070 #include <linux/ethtool.h>
0071 #endif
0072
0073 #include <net/sock.h>
0074 #include <net/ip.h>
0075
0076 #include <asm/io.h>
0077 #include <asm/irq.h>
0078 #include <asm/byteorder.h>
0079 #include <linux/uaccess.h>
0080
0081
0082 #define DRV_NAME "acenic"
0083
0084 #undef INDEX_DEBUG
0085
0086 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
0087 #define ACE_IS_TIGON_I(ap) 0
0088 #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
0089 #else
0090 #define ACE_IS_TIGON_I(ap) (ap->version == 1)
0091 #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
0092 #endif
0093
0094 #ifndef PCI_VENDOR_ID_ALTEON
0095 #define PCI_VENDOR_ID_ALTEON 0x12ae
0096 #endif
0097 #ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
0098 #define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
0099 #define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
0100 #endif
0101 #ifndef PCI_DEVICE_ID_3COM_3C985
0102 #define PCI_DEVICE_ID_3COM_3C985 0x0001
0103 #endif
0104 #ifndef PCI_VENDOR_ID_NETGEAR
0105 #define PCI_VENDOR_ID_NETGEAR 0x1385
0106 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
0107 #endif
0108 #ifndef PCI_DEVICE_ID_NETGEAR_GA620T
0109 #define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
0110 #endif
0111
0112
0113
0114
0115
0116
0117 #ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
0118 #define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
0119 #endif
0120 #ifndef PCI_DEVICE_ID_FARALLON_PN9100T
0121 #define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
0122 #endif
0123 #ifndef PCI_VENDOR_ID_SGI
0124 #define PCI_VENDOR_ID_SGI 0x10a9
0125 #endif
0126 #ifndef PCI_DEVICE_ID_SGI_ACENIC
0127 #define PCI_DEVICE_ID_SGI_ACENIC 0x0009
0128 #endif
0129
0130 static const struct pci_device_id acenic_pci_tbl[] = {
0131 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
0132 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0133 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
0134 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0135 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
0136 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0137 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
0138 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0139 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
0140 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0141
0142
0143
0144
0145 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
0146 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0147 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
0148 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0149 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
0150 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0151 { }
0152 };
0153 MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
0154
0155 #define ace_sync_irq(irq) synchronize_irq(irq)
0156
0157 #ifndef offset_in_page
0158 #define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
0159 #endif
0160
0161 #define ACE_MAX_MOD_PARMS 8
0162 #define BOARD_IDX_STATIC 0
0163 #define BOARD_IDX_OVERFLOW -1
0164
0165 #include "acenic.h"
0166
0167
0168
0169
0170 #define MAX_TEXT_LEN 96*1024
0171 #define MAX_RODATA_LEN 8*1024
0172 #define MAX_DATA_LEN 2*1024
0173
0174 #ifndef tigon2FwReleaseLocal
0175 #define tigon2FwReleaseLocal 0
0176 #endif
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 #define RX_RING_SIZE 72
0327 #define RX_MINI_SIZE 64
0328 #define RX_JUMBO_SIZE 48
0329
0330 #define RX_PANIC_STD_THRES 16
0331 #define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
0332 #define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
0333 #define RX_PANIC_MINI_THRES 12
0334 #define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
0335 #define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
0336 #define RX_PANIC_JUMBO_THRES 6
0337 #define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
0338 #define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
0339
0340
0341
0342
0343
0344
0345 #define ACE_MINI_SIZE 100
0346
0347 #define ACE_MINI_BUFSIZE ACE_MINI_SIZE
0348 #define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
0349 #define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 #define DEF_TX_COAL 400
0360 #define DEF_TX_MAX_DESC 60
0361 #define DEF_RX_COAL 120
0362 #define DEF_RX_MAX_DESC 25
0363 #define DEF_TX_RATIO 21
0364
0365 #define DEF_JUMBO_TX_COAL 20
0366 #define DEF_JUMBO_TX_MAX_DESC 60
0367 #define DEF_JUMBO_RX_COAL 30
0368 #define DEF_JUMBO_RX_MAX_DESC 6
0369 #define DEF_JUMBO_TX_RATIO 21
0370
0371 #if tigon2FwReleaseLocal < 20001118
0372
0373
0374
0375
0376
0377
0378
0379 #define TX_COAL_INTS_ONLY 1
0380 #else
0381
0382
0383
0384 #define TX_COAL_INTS_ONLY 1
0385 #endif
0386
0387 #define DEF_TRACE 0
0388 #define DEF_STAT (2 * TICKS_PER_SEC)
0389
0390
0391 static int link_state[ACE_MAX_MOD_PARMS];
0392 static int trace[ACE_MAX_MOD_PARMS];
0393 static int tx_coal_tick[ACE_MAX_MOD_PARMS];
0394 static int rx_coal_tick[ACE_MAX_MOD_PARMS];
0395 static int max_tx_desc[ACE_MAX_MOD_PARMS];
0396 static int max_rx_desc[ACE_MAX_MOD_PARMS];
0397 static int tx_ratio[ACE_MAX_MOD_PARMS];
0398 static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
0399
0400 MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
0401 MODULE_LICENSE("GPL");
0402 MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
0403 #ifndef CONFIG_ACENIC_OMIT_TIGON_I
0404 MODULE_FIRMWARE("acenic/tg1.bin");
0405 #endif
0406 MODULE_FIRMWARE("acenic/tg2.bin");
0407
0408 module_param_array_named(link, link_state, int, NULL, 0);
0409 module_param_array(trace, int, NULL, 0);
0410 module_param_array(tx_coal_tick, int, NULL, 0);
0411 module_param_array(max_tx_desc, int, NULL, 0);
0412 module_param_array(rx_coal_tick, int, NULL, 0);
0413 module_param_array(max_rx_desc, int, NULL, 0);
0414 module_param_array(tx_ratio, int, NULL, 0);
0415 MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
0416 MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
0417 MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
0418 MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
0419 MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
0420 MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
0421 MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
0422
0423
0424 static const char version[] =
0425 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
0426 " http://home.cern.ch/~jes/gige/acenic.html\n";
0427
0428 static int ace_get_link_ksettings(struct net_device *,
0429 struct ethtool_link_ksettings *);
0430 static int ace_set_link_ksettings(struct net_device *,
0431 const struct ethtool_link_ksettings *);
0432 static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
0433
0434 static const struct ethtool_ops ace_ethtool_ops = {
0435 .get_drvinfo = ace_get_drvinfo,
0436 .get_link_ksettings = ace_get_link_ksettings,
0437 .set_link_ksettings = ace_set_link_ksettings,
0438 };
0439
0440 static void ace_watchdog(struct net_device *dev, unsigned int txqueue);
0441
0442 static const struct net_device_ops ace_netdev_ops = {
0443 .ndo_open = ace_open,
0444 .ndo_stop = ace_close,
0445 .ndo_tx_timeout = ace_watchdog,
0446 .ndo_get_stats = ace_get_stats,
0447 .ndo_start_xmit = ace_start_xmit,
0448 .ndo_set_rx_mode = ace_set_multicast_list,
0449 .ndo_validate_addr = eth_validate_addr,
0450 .ndo_set_mac_address = ace_set_mac_addr,
0451 .ndo_change_mtu = ace_change_mtu,
0452 };
0453
0454 static int acenic_probe_one(struct pci_dev *pdev,
0455 const struct pci_device_id *id)
0456 {
0457 struct net_device *dev;
0458 struct ace_private *ap;
0459 static int boards_found;
0460
0461 dev = alloc_etherdev(sizeof(struct ace_private));
0462 if (dev == NULL)
0463 return -ENOMEM;
0464
0465 SET_NETDEV_DEV(dev, &pdev->dev);
0466
0467 ap = netdev_priv(dev);
0468 ap->ndev = dev;
0469 ap->pdev = pdev;
0470 ap->name = pci_name(pdev);
0471
0472 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
0473 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
0474
0475 dev->watchdog_timeo = 5*HZ;
0476 dev->min_mtu = 0;
0477 dev->max_mtu = ACE_JUMBO_MTU;
0478
0479 dev->netdev_ops = &ace_netdev_ops;
0480 dev->ethtool_ops = &ace_ethtool_ops;
0481
0482
0483 if (!boards_found)
0484 printk(version);
0485
0486 if (pci_enable_device(pdev))
0487 goto fail_free_netdev;
0488
0489
0490
0491
0492
0493
0494 pci_set_master(pdev);
0495
0496 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
0497
0498
0499 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
0500 printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
0501 "access - was not enabled by BIOS/Firmware\n",
0502 ap->name);
0503 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
0504 pci_write_config_word(ap->pdev, PCI_COMMAND,
0505 ap->pci_command);
0506 wmb();
0507 }
0508
0509 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
0510 if (ap->pci_latency <= 0x40) {
0511 ap->pci_latency = 0x40;
0512 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
0513 }
0514
0515
0516
0517
0518
0519
0520 dev->base_addr = pci_resource_start(pdev, 0);
0521 ap->regs = ioremap(dev->base_addr, 0x4000);
0522 if (!ap->regs) {
0523 printk(KERN_ERR "%s: Unable to map I/O register, "
0524 "AceNIC %i will be disabled.\n",
0525 ap->name, boards_found);
0526 goto fail_free_netdev;
0527 }
0528
0529 switch(pdev->vendor) {
0530 case PCI_VENDOR_ID_ALTEON:
0531 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
0532 printk(KERN_INFO "%s: Farallon PN9100-T ",
0533 ap->name);
0534 } else {
0535 printk(KERN_INFO "%s: Alteon AceNIC ",
0536 ap->name);
0537 }
0538 break;
0539 case PCI_VENDOR_ID_3COM:
0540 printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
0541 break;
0542 case PCI_VENDOR_ID_NETGEAR:
0543 printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
0544 break;
0545 case PCI_VENDOR_ID_DEC:
0546 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
0547 printk(KERN_INFO "%s: Farallon PN9000-SX ",
0548 ap->name);
0549 break;
0550 }
0551 fallthrough;
0552 case PCI_VENDOR_ID_SGI:
0553 printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
0554 break;
0555 default:
0556 printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
0557 break;
0558 }
0559
0560 printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
0561 printk("irq %d\n", pdev->irq);
0562
0563 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
0564 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
0565 printk(KERN_ERR "%s: Driver compiled without Tigon I"
0566 " support - NIC disabled\n", dev->name);
0567 goto fail_uninit;
0568 }
0569 #endif
0570
0571 if (ace_allocate_descriptors(dev))
0572 goto fail_free_netdev;
0573
0574 #ifdef MODULE
0575 if (boards_found >= ACE_MAX_MOD_PARMS)
0576 ap->board_idx = BOARD_IDX_OVERFLOW;
0577 else
0578 ap->board_idx = boards_found;
0579 #else
0580 ap->board_idx = BOARD_IDX_STATIC;
0581 #endif
0582
0583 if (ace_init(dev))
0584 goto fail_free_netdev;
0585
0586 if (register_netdev(dev)) {
0587 printk(KERN_ERR "acenic: device registration failed\n");
0588 goto fail_uninit;
0589 }
0590 ap->name = dev->name;
0591
0592 dev->features |= NETIF_F_HIGHDMA;
0593
0594 pci_set_drvdata(pdev, dev);
0595
0596 boards_found++;
0597 return 0;
0598
0599 fail_uninit:
0600 ace_init_cleanup(dev);
0601 fail_free_netdev:
0602 free_netdev(dev);
0603 return -ENODEV;
0604 }
0605
0606 static void acenic_remove_one(struct pci_dev *pdev)
0607 {
0608 struct net_device *dev = pci_get_drvdata(pdev);
0609 struct ace_private *ap = netdev_priv(dev);
0610 struct ace_regs __iomem *regs = ap->regs;
0611 short i;
0612
0613 unregister_netdev(dev);
0614
0615 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
0616 if (ap->version >= 2)
0617 writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
0618
0619
0620
0621
0622 writel(1, ®s->Mb0Lo);
0623 readl(®s->CpuCtrl);
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 ace_sync_irq(dev->irq);
0635
0636 for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
0637 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
0638
0639 if (skb) {
0640 struct ring_info *ringp;
0641 dma_addr_t mapping;
0642
0643 ringp = &ap->skb->rx_std_skbuff[i];
0644 mapping = dma_unmap_addr(ringp, mapping);
0645 dma_unmap_page(&ap->pdev->dev, mapping,
0646 ACE_STD_BUFSIZE, DMA_FROM_DEVICE);
0647
0648 ap->rx_std_ring[i].size = 0;
0649 ap->skb->rx_std_skbuff[i].skb = NULL;
0650 dev_kfree_skb(skb);
0651 }
0652 }
0653
0654 if (ap->version >= 2) {
0655 for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
0656 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
0657
0658 if (skb) {
0659 struct ring_info *ringp;
0660 dma_addr_t mapping;
0661
0662 ringp = &ap->skb->rx_mini_skbuff[i];
0663 mapping = dma_unmap_addr(ringp,mapping);
0664 dma_unmap_page(&ap->pdev->dev, mapping,
0665 ACE_MINI_BUFSIZE,
0666 DMA_FROM_DEVICE);
0667
0668 ap->rx_mini_ring[i].size = 0;
0669 ap->skb->rx_mini_skbuff[i].skb = NULL;
0670 dev_kfree_skb(skb);
0671 }
0672 }
0673 }
0674
0675 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
0676 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
0677 if (skb) {
0678 struct ring_info *ringp;
0679 dma_addr_t mapping;
0680
0681 ringp = &ap->skb->rx_jumbo_skbuff[i];
0682 mapping = dma_unmap_addr(ringp, mapping);
0683 dma_unmap_page(&ap->pdev->dev, mapping,
0684 ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
0685
0686 ap->rx_jumbo_ring[i].size = 0;
0687 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
0688 dev_kfree_skb(skb);
0689 }
0690 }
0691
0692 ace_init_cleanup(dev);
0693 free_netdev(dev);
0694 }
0695
0696 static struct pci_driver acenic_pci_driver = {
0697 .name = "acenic",
0698 .id_table = acenic_pci_tbl,
0699 .probe = acenic_probe_one,
0700 .remove = acenic_remove_one,
0701 };
0702
0703 static void ace_free_descriptors(struct net_device *dev)
0704 {
0705 struct ace_private *ap = netdev_priv(dev);
0706 int size;
0707
0708 if (ap->rx_std_ring != NULL) {
0709 size = (sizeof(struct rx_desc) *
0710 (RX_STD_RING_ENTRIES +
0711 RX_JUMBO_RING_ENTRIES +
0712 RX_MINI_RING_ENTRIES +
0713 RX_RETURN_RING_ENTRIES));
0714 dma_free_coherent(&ap->pdev->dev, size, ap->rx_std_ring,
0715 ap->rx_ring_base_dma);
0716 ap->rx_std_ring = NULL;
0717 ap->rx_jumbo_ring = NULL;
0718 ap->rx_mini_ring = NULL;
0719 ap->rx_return_ring = NULL;
0720 }
0721 if (ap->evt_ring != NULL) {
0722 size = (sizeof(struct event) * EVT_RING_ENTRIES);
0723 dma_free_coherent(&ap->pdev->dev, size, ap->evt_ring,
0724 ap->evt_ring_dma);
0725 ap->evt_ring = NULL;
0726 }
0727 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
0728 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
0729 dma_free_coherent(&ap->pdev->dev, size, ap->tx_ring,
0730 ap->tx_ring_dma);
0731 }
0732 ap->tx_ring = NULL;
0733
0734 if (ap->evt_prd != NULL) {
0735 dma_free_coherent(&ap->pdev->dev, sizeof(u32),
0736 (void *)ap->evt_prd, ap->evt_prd_dma);
0737 ap->evt_prd = NULL;
0738 }
0739 if (ap->rx_ret_prd != NULL) {
0740 dma_free_coherent(&ap->pdev->dev, sizeof(u32),
0741 (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma);
0742 ap->rx_ret_prd = NULL;
0743 }
0744 if (ap->tx_csm != NULL) {
0745 dma_free_coherent(&ap->pdev->dev, sizeof(u32),
0746 (void *)ap->tx_csm, ap->tx_csm_dma);
0747 ap->tx_csm = NULL;
0748 }
0749 }
0750
0751
0752 static int ace_allocate_descriptors(struct net_device *dev)
0753 {
0754 struct ace_private *ap = netdev_priv(dev);
0755 int size;
0756
0757 size = (sizeof(struct rx_desc) *
0758 (RX_STD_RING_ENTRIES +
0759 RX_JUMBO_RING_ENTRIES +
0760 RX_MINI_RING_ENTRIES +
0761 RX_RETURN_RING_ENTRIES));
0762
0763 ap->rx_std_ring = dma_alloc_coherent(&ap->pdev->dev, size,
0764 &ap->rx_ring_base_dma, GFP_KERNEL);
0765 if (ap->rx_std_ring == NULL)
0766 goto fail;
0767
0768 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
0769 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
0770 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
0771
0772 size = (sizeof(struct event) * EVT_RING_ENTRIES);
0773
0774 ap->evt_ring = dma_alloc_coherent(&ap->pdev->dev, size,
0775 &ap->evt_ring_dma, GFP_KERNEL);
0776
0777 if (ap->evt_ring == NULL)
0778 goto fail;
0779
0780
0781
0782
0783
0784 if (!ACE_IS_TIGON_I(ap)) {
0785 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
0786
0787 ap->tx_ring = dma_alloc_coherent(&ap->pdev->dev, size,
0788 &ap->tx_ring_dma, GFP_KERNEL);
0789
0790 if (ap->tx_ring == NULL)
0791 goto fail;
0792 }
0793
0794 ap->evt_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
0795 &ap->evt_prd_dma, GFP_KERNEL);
0796 if (ap->evt_prd == NULL)
0797 goto fail;
0798
0799 ap->rx_ret_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
0800 &ap->rx_ret_prd_dma, GFP_KERNEL);
0801 if (ap->rx_ret_prd == NULL)
0802 goto fail;
0803
0804 ap->tx_csm = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
0805 &ap->tx_csm_dma, GFP_KERNEL);
0806 if (ap->tx_csm == NULL)
0807 goto fail;
0808
0809 return 0;
0810
0811 fail:
0812
0813 ace_init_cleanup(dev);
0814 return 1;
0815 }
0816
0817
0818
0819
0820
0821
0822 static void ace_init_cleanup(struct net_device *dev)
0823 {
0824 struct ace_private *ap;
0825
0826 ap = netdev_priv(dev);
0827
0828 ace_free_descriptors(dev);
0829
0830 if (ap->info)
0831 dma_free_coherent(&ap->pdev->dev, sizeof(struct ace_info),
0832 ap->info, ap->info_dma);
0833 kfree(ap->skb);
0834 kfree(ap->trace_buf);
0835
0836 if (dev->irq)
0837 free_irq(dev->irq, dev);
0838
0839 iounmap(ap->regs);
0840 }
0841
0842
0843
0844
0845
0846 static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
0847 {
0848 u32 idx;
0849
0850 idx = readl(®s->CmdPrd);
0851
0852 writel(*(u32 *)(cmd), ®s->CmdRng[idx]);
0853 idx = (idx + 1) % CMD_RING_ENTRIES;
0854
0855 writel(idx, ®s->CmdPrd);
0856 }
0857
0858
0859 static int ace_init(struct net_device *dev)
0860 {
0861 struct ace_private *ap;
0862 struct ace_regs __iomem *regs;
0863 struct ace_info *info = NULL;
0864 struct pci_dev *pdev;
0865 unsigned long myjif;
0866 u64 tmp_ptr;
0867 u32 tig_ver, mac1, mac2, tmp, pci_state;
0868 int board_idx, ecode = 0;
0869 short i;
0870 unsigned char cache_size;
0871 u8 addr[ETH_ALEN];
0872
0873 ap = netdev_priv(dev);
0874 regs = ap->regs;
0875
0876 board_idx = ap->board_idx;
0877
0878
0879
0880
0881
0882
0883 writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl);
0884 readl(®s->HostCtrl);
0885 udelay(5);
0886
0887
0888
0889
0890 #ifdef __BIG_ENDIAN
0891
0892
0893
0894
0895 writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
0896 ®s->HostCtrl);
0897 #else
0898 writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
0899 ®s->HostCtrl);
0900 #endif
0901 readl(®s->HostCtrl);
0902
0903
0904
0905
0906 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
0907 readl(®s->CpuCtrl);
0908 writel(0, ®s->Mb0Lo);
0909
0910 tig_ver = readl(®s->HostCtrl) >> 28;
0911
0912 switch(tig_ver){
0913 #ifndef CONFIG_ACENIC_OMIT_TIGON_I
0914 case 4:
0915 case 5:
0916 printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
0917 tig_ver, ap->firmware_major, ap->firmware_minor,
0918 ap->firmware_fix);
0919 writel(0, ®s->LocalCtrl);
0920 ap->version = 1;
0921 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
0922 break;
0923 #endif
0924 case 6:
0925 printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
0926 tig_ver, ap->firmware_major, ap->firmware_minor,
0927 ap->firmware_fix);
0928 writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
0929 readl(®s->CpuBCtrl);
0930
0931
0932
0933
0934
0935 writel(SRAM_BANK_512K, ®s->LocalCtrl);
0936 writel(SYNC_SRAM_TIMING, ®s->MiscCfg);
0937 ap->version = 2;
0938 ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
0939 break;
0940 default:
0941 printk(KERN_WARNING " Unsupported Tigon version detected "
0942 "(%i)\n", tig_ver);
0943 ecode = -ENODEV;
0944 goto init_error;
0945 }
0946
0947
0948
0949
0950
0951
0952
0953
0954 #ifdef __BIG_ENDIAN
0955 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
0956 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
0957 #else
0958 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
0959 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
0960 #endif
0961 readl(®s->ModeStat);
0962
0963 mac1 = 0;
0964 for(i = 0; i < 4; i++) {
0965 int t;
0966
0967 mac1 = mac1 << 8;
0968 t = read_eeprom_byte(dev, 0x8c+i);
0969 if (t < 0) {
0970 ecode = -EIO;
0971 goto init_error;
0972 } else
0973 mac1 |= (t & 0xff);
0974 }
0975 mac2 = 0;
0976 for(i = 4; i < 8; i++) {
0977 int t;
0978
0979 mac2 = mac2 << 8;
0980 t = read_eeprom_byte(dev, 0x8c+i);
0981 if (t < 0) {
0982 ecode = -EIO;
0983 goto init_error;
0984 } else
0985 mac2 |= (t & 0xff);
0986 }
0987
0988 writel(mac1, ®s->MacAddrHi);
0989 writel(mac2, ®s->MacAddrLo);
0990
0991 addr[0] = (mac1 >> 8) & 0xff;
0992 addr[1] = mac1 & 0xff;
0993 addr[2] = (mac2 >> 24) & 0xff;
0994 addr[3] = (mac2 >> 16) & 0xff;
0995 addr[4] = (mac2 >> 8) & 0xff;
0996 addr[5] = mac2 & 0xff;
0997 eth_hw_addr_set(dev, addr);
0998
0999 printk("MAC: %pM\n", dev->dev_addr);
1000
1001
1002
1003
1004
1005
1006
1007 pdev = ap->pdev;
1008 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
1009 cache_size <<= 2;
1010 if (cache_size != SMP_CACHE_BYTES) {
1011 printk(KERN_INFO " PCI cache line size set incorrectly "
1012 "(%i bytes) by BIOS/FW, ", cache_size);
1013 if (cache_size > SMP_CACHE_BYTES)
1014 printk("expecting %i\n", SMP_CACHE_BYTES);
1015 else {
1016 printk("correcting to %i\n", SMP_CACHE_BYTES);
1017 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1018 SMP_CACHE_BYTES >> 2);
1019 }
1020 }
1021
1022 pci_state = readl(®s->PciState);
1023 printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
1024 "latency: %i clks\n",
1025 (pci_state & PCI_32BIT) ? 32 : 64,
1026 (pci_state & PCI_66MHZ) ? 66 : 33,
1027 ap->pci_latency);
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 tmp = READ_CMD_MEM | WRITE_CMD_MEM;
1040 if (ap->version >= 2) {
1041 tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
1042
1043
1044
1045 if (board_idx == BOARD_IDX_OVERFLOW ||
1046 dis_pci_mem_inval[board_idx]) {
1047 if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1048 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1049 pci_write_config_word(pdev, PCI_COMMAND,
1050 ap->pci_command);
1051 printk(KERN_INFO " Disabling PCI memory "
1052 "write and invalidate\n");
1053 }
1054 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1055 printk(KERN_INFO " PCI memory write & invalidate "
1056 "enabled by BIOS, enabling counter measures\n");
1057
1058 switch(SMP_CACHE_BYTES) {
1059 case 16:
1060 tmp |= DMA_WRITE_MAX_16;
1061 break;
1062 case 32:
1063 tmp |= DMA_WRITE_MAX_32;
1064 break;
1065 case 64:
1066 tmp |= DMA_WRITE_MAX_64;
1067 break;
1068 case 128:
1069 tmp |= DMA_WRITE_MAX_128;
1070 break;
1071 default:
1072 printk(KERN_INFO " Cache line size %i not "
1073 "supported, PCI write and invalidate "
1074 "disabled\n", SMP_CACHE_BYTES);
1075 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1076 pci_write_config_word(pdev, PCI_COMMAND,
1077 ap->pci_command);
1078 }
1079 }
1080 }
1081
1082 #ifdef __sparc__
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094 tmp &= ~DMA_READ_WRITE_MASK;
1095 tmp |= DMA_READ_MAX_64;
1096 tmp |= DMA_WRITE_MAX_64;
1097 #endif
1098 #ifdef __alpha__
1099 tmp &= ~DMA_READ_WRITE_MASK;
1100 tmp |= DMA_READ_MAX_128;
1101
1102
1103
1104
1105
1106 tmp |= DMA_WRITE_MAX_128;
1107 #endif
1108 writel(tmp, ®s->PciState);
1109
1110 #if 0
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
1123 printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
1124 ap->pci_command |= PCI_COMMAND_FAST_BACK;
1125 pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
1126 }
1127 #endif
1128
1129
1130
1131
1132 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1133 ecode = -ENODEV;
1134 goto init_error;
1135 }
1136
1137
1138
1139
1140
1141
1142 if (!(info = dma_alloc_coherent(&ap->pdev->dev, sizeof(struct ace_info),
1143 &ap->info_dma, GFP_KERNEL))) {
1144 ecode = -EAGAIN;
1145 goto init_error;
1146 }
1147 ap->info = info;
1148
1149
1150
1151
1152 if (!(ap->skb = kzalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
1153 ecode = -EAGAIN;
1154 goto init_error;
1155 }
1156
1157 ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
1158 DRV_NAME, dev);
1159 if (ecode) {
1160 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1161 DRV_NAME, pdev->irq);
1162 goto init_error;
1163 } else
1164 dev->irq = pdev->irq;
1165
1166 #ifdef INDEX_DEBUG
1167 spin_lock_init(&ap->debug_lock);
1168 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
1169 ap->last_std_rx = 0;
1170 ap->last_mini_rx = 0;
1171 #endif
1172
1173 ecode = ace_load_firmware(dev);
1174 if (ecode)
1175 goto init_error;
1176
1177 ap->fw_running = 0;
1178
1179 tmp_ptr = ap->info_dma;
1180 writel(tmp_ptr >> 32, ®s->InfoPtrHi);
1181 writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
1182
1183 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
1184
1185 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
1186 info->evt_ctrl.flags = 0;
1187
1188 *(ap->evt_prd) = 0;
1189 wmb();
1190 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
1191 writel(0, ®s->EvtCsm);
1192
1193 set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
1194 info->cmd_ctrl.flags = 0;
1195 info->cmd_ctrl.max_len = 0;
1196
1197 for (i = 0; i < CMD_RING_ENTRIES; i++)
1198 writel(0, ®s->CmdRng[i]);
1199
1200 writel(0, ®s->CmdPrd);
1201 writel(0, ®s->CmdCsm);
1202
1203 tmp_ptr = ap->info_dma;
1204 tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
1205 set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
1206
1207 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
1208 info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
1209 info->rx_std_ctrl.flags =
1210 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1211
1212 memset(ap->rx_std_ring, 0,
1213 RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
1214
1215 for (i = 0; i < RX_STD_RING_ENTRIES; i++)
1216 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
1217
1218 ap->rx_std_skbprd = 0;
1219 atomic_set(&ap->cur_rx_bufs, 0);
1220
1221 set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
1222 (ap->rx_ring_base_dma +
1223 (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
1224 info->rx_jumbo_ctrl.max_len = 0;
1225 info->rx_jumbo_ctrl.flags =
1226 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1227
1228 memset(ap->rx_jumbo_ring, 0,
1229 RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
1230
1231 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
1232 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
1233
1234 ap->rx_jumbo_skbprd = 0;
1235 atomic_set(&ap->cur_jumbo_bufs, 0);
1236
1237 memset(ap->rx_mini_ring, 0,
1238 RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
1239
1240 if (ap->version >= 2) {
1241 set_aceaddr(&info->rx_mini_ctrl.rngptr,
1242 (ap->rx_ring_base_dma +
1243 (sizeof(struct rx_desc) *
1244 (RX_STD_RING_ENTRIES +
1245 RX_JUMBO_RING_ENTRIES))));
1246 info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
1247 info->rx_mini_ctrl.flags =
1248 RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
1249
1250 for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
1251 ap->rx_mini_ring[i].flags =
1252 BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
1253 } else {
1254 set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
1255 info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
1256 info->rx_mini_ctrl.max_len = 0;
1257 }
1258
1259 ap->rx_mini_skbprd = 0;
1260 atomic_set(&ap->cur_mini_bufs, 0);
1261
1262 set_aceaddr(&info->rx_return_ctrl.rngptr,
1263 (ap->rx_ring_base_dma +
1264 (sizeof(struct rx_desc) *
1265 (RX_STD_RING_ENTRIES +
1266 RX_JUMBO_RING_ENTRIES +
1267 RX_MINI_RING_ENTRIES))));
1268 info->rx_return_ctrl.flags = 0;
1269 info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
1270
1271 memset(ap->rx_return_ring, 0,
1272 RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
1273
1274 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
1275 *(ap->rx_ret_prd) = 0;
1276
1277 writel(TX_RING_BASE, ®s->WinBase);
1278
1279 if (ACE_IS_TIGON_I(ap)) {
1280 ap->tx_ring = (__force struct tx_desc *) regs->Window;
1281 for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
1282 * sizeof(struct tx_desc)) / sizeof(u32); i++)
1283 writel(0, (__force void __iomem *)ap->tx_ring + i * 4);
1284
1285 set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
1286 } else {
1287 memset(ap->tx_ring, 0,
1288 MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
1289
1290 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
1291 }
1292
1293 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
1294 tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1295
1296
1297
1298
1299 if (!ACE_IS_TIGON_I(ap))
1300 tmp |= RCB_FLG_TX_HOST_RING;
1301 #if TX_COAL_INTS_ONLY
1302 tmp |= RCB_FLG_COAL_INT_ONLY;
1303 #endif
1304 info->tx_ctrl.flags = tmp;
1305
1306 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
1307
1308
1309
1310
1311 #if 0
1312 writel(DMA_THRESH_16W, ®s->DmaReadCfg);
1313 writel(DMA_THRESH_16W, ®s->DmaWriteCfg);
1314 #else
1315 writel(DMA_THRESH_8W, ®s->DmaReadCfg);
1316 writel(DMA_THRESH_8W, ®s->DmaWriteCfg);
1317 #endif
1318
1319 writel(0, ®s->MaskInt);
1320 writel(1, ®s->IfIdx);
1321 #if 0
1322
1323
1324
1325
1326 writel(1, ®s->AssistState);
1327 #endif
1328
1329 writel(DEF_STAT, ®s->TuneStatTicks);
1330 writel(DEF_TRACE, ®s->TuneTrace);
1331
1332 ace_set_rxtx_parms(dev, 0);
1333
1334 if (board_idx == BOARD_IDX_OVERFLOW) {
1335 printk(KERN_WARNING "%s: more than %i NICs detected, "
1336 "ignoring module parameters!\n",
1337 ap->name, ACE_MAX_MOD_PARMS);
1338 } else if (board_idx >= 0) {
1339 if (tx_coal_tick[board_idx])
1340 writel(tx_coal_tick[board_idx],
1341 ®s->TuneTxCoalTicks);
1342 if (max_tx_desc[board_idx])
1343 writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc);
1344
1345 if (rx_coal_tick[board_idx])
1346 writel(rx_coal_tick[board_idx],
1347 ®s->TuneRxCoalTicks);
1348 if (max_rx_desc[board_idx])
1349 writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc);
1350
1351 if (trace[board_idx])
1352 writel(trace[board_idx], ®s->TuneTrace);
1353
1354 if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
1355 writel(tx_ratio[board_idx], ®s->TxBufRat);
1356 }
1357
1358
1359
1360
1361 tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
1362 LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
1363 if(ap->version >= 2)
1364 tmp |= LNK_TX_FLOW_CTL_Y;
1365
1366
1367
1368
1369 if ((board_idx >= 0) && link_state[board_idx]) {
1370 int option = link_state[board_idx];
1371
1372 tmp = LNK_ENABLE;
1373
1374 if (option & 0x01) {
1375 printk(KERN_INFO "%s: Setting half duplex link\n",
1376 ap->name);
1377 tmp &= ~LNK_FULL_DUPLEX;
1378 }
1379 if (option & 0x02)
1380 tmp &= ~LNK_NEGOTIATE;
1381 if (option & 0x10)
1382 tmp |= LNK_10MB;
1383 if (option & 0x20)
1384 tmp |= LNK_100MB;
1385 if (option & 0x40)
1386 tmp |= LNK_1000MB;
1387 if ((option & 0x70) == 0) {
1388 printk(KERN_WARNING "%s: No media speed specified, "
1389 "forcing auto negotiation\n", ap->name);
1390 tmp |= LNK_NEGOTIATE | LNK_1000MB |
1391 LNK_100MB | LNK_10MB;
1392 }
1393 if ((option & 0x100) == 0)
1394 tmp |= LNK_NEG_FCTL;
1395 else
1396 printk(KERN_INFO "%s: Disabling flow control "
1397 "negotiation\n", ap->name);
1398 if (option & 0x200)
1399 tmp |= LNK_RX_FLOW_CTL_Y;
1400 if ((option & 0x400) && (ap->version >= 2)) {
1401 printk(KERN_INFO "%s: Enabling TX flow control\n",
1402 ap->name);
1403 tmp |= LNK_TX_FLOW_CTL_Y;
1404 }
1405 }
1406
1407 ap->link = tmp;
1408 writel(tmp, ®s->TuneLink);
1409 if (ap->version >= 2)
1410 writel(tmp, ®s->TuneFastLink);
1411
1412 writel(ap->firmware_start, ®s->Pc);
1413
1414 writel(0, ®s->Mb0Lo);
1415
1416
1417
1418
1419
1420
1421
1422 ap->cur_rx = 0;
1423 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
1424
1425 wmb();
1426 ace_set_txprd(regs, ap, 0);
1427 writel(0, ®s->RxRetCsm);
1428
1429
1430
1431
1432
1433
1434
1435 writel(1, ®s->AssistState);
1436
1437
1438
1439
1440 writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl);
1441 readl(®s->CpuCtrl);
1442
1443
1444
1445
1446 myjif = jiffies + 3 * HZ;
1447 while (time_before(jiffies, myjif) && !ap->fw_running)
1448 cpu_relax();
1449
1450 if (!ap->fw_running) {
1451 printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
1452
1453 ace_dump_trace(ap);
1454 writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
1455 readl(®s->CpuCtrl);
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466 if (ap->version >= 2)
1467 writel(readl(®s->CpuBCtrl) | CPU_HALT,
1468 ®s->CpuBCtrl);
1469 writel(0, ®s->Mb0Lo);
1470 readl(®s->Mb0Lo);
1471
1472 ecode = -EBUSY;
1473 goto init_error;
1474 }
1475
1476
1477
1478
1479
1480 if (!test_and_set_bit(0, &ap->std_refill_busy))
1481 ace_load_std_rx_ring(dev, RX_RING_SIZE);
1482 else
1483 printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
1484 ap->name);
1485 if (ap->version >= 2) {
1486 if (!test_and_set_bit(0, &ap->mini_refill_busy))
1487 ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
1488 else
1489 printk(KERN_ERR "%s: Someone is busy refilling "
1490 "the RX mini ring\n", ap->name);
1491 }
1492 return 0;
1493
1494 init_error:
1495 ace_init_cleanup(dev);
1496 return ecode;
1497 }
1498
1499
1500 static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
1501 {
1502 struct ace_private *ap = netdev_priv(dev);
1503 struct ace_regs __iomem *regs = ap->regs;
1504 int board_idx = ap->board_idx;
1505
1506 if (board_idx >= 0) {
1507 if (!jumbo) {
1508 if (!tx_coal_tick[board_idx])
1509 writel(DEF_TX_COAL, ®s->TuneTxCoalTicks);
1510 if (!max_tx_desc[board_idx])
1511 writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc);
1512 if (!rx_coal_tick[board_idx])
1513 writel(DEF_RX_COAL, ®s->TuneRxCoalTicks);
1514 if (!max_rx_desc[board_idx])
1515 writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc);
1516 if (!tx_ratio[board_idx])
1517 writel(DEF_TX_RATIO, ®s->TxBufRat);
1518 } else {
1519 if (!tx_coal_tick[board_idx])
1520 writel(DEF_JUMBO_TX_COAL,
1521 ®s->TuneTxCoalTicks);
1522 if (!max_tx_desc[board_idx])
1523 writel(DEF_JUMBO_TX_MAX_DESC,
1524 ®s->TuneMaxTxDesc);
1525 if (!rx_coal_tick[board_idx])
1526 writel(DEF_JUMBO_RX_COAL,
1527 ®s->TuneRxCoalTicks);
1528 if (!max_rx_desc[board_idx])
1529 writel(DEF_JUMBO_RX_MAX_DESC,
1530 ®s->TuneMaxRxDesc);
1531 if (!tx_ratio[board_idx])
1532 writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat);
1533 }
1534 }
1535 }
1536
1537
1538 static void ace_watchdog(struct net_device *data, unsigned int txqueue)
1539 {
1540 struct net_device *dev = data;
1541 struct ace_private *ap = netdev_priv(dev);
1542 struct ace_regs __iomem *regs = ap->regs;
1543
1544
1545
1546
1547
1548
1549 if (*ap->tx_csm != ap->tx_ret_csm) {
1550 printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
1551 dev->name, (unsigned int)readl(®s->HostCtrl));
1552
1553 } else {
1554 printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
1555 dev->name);
1556 #if 0
1557 netif_wake_queue(dev);
1558 #endif
1559 }
1560 }
1561
1562
1563 static void ace_tasklet(struct tasklet_struct *t)
1564 {
1565 struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
1566 struct net_device *dev = ap->ndev;
1567 int cur_size;
1568
1569 cur_size = atomic_read(&ap->cur_rx_bufs);
1570 if ((cur_size < RX_LOW_STD_THRES) &&
1571 !test_and_set_bit(0, &ap->std_refill_busy)) {
1572 #ifdef DEBUG
1573 printk("refilling buffers (current %i)\n", cur_size);
1574 #endif
1575 ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
1576 }
1577
1578 if (ap->version >= 2) {
1579 cur_size = atomic_read(&ap->cur_mini_bufs);
1580 if ((cur_size < RX_LOW_MINI_THRES) &&
1581 !test_and_set_bit(0, &ap->mini_refill_busy)) {
1582 #ifdef DEBUG
1583 printk("refilling mini buffers (current %i)\n",
1584 cur_size);
1585 #endif
1586 ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
1587 }
1588 }
1589
1590 cur_size = atomic_read(&ap->cur_jumbo_bufs);
1591 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
1592 !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
1593 #ifdef DEBUG
1594 printk("refilling jumbo buffers (current %i)\n", cur_size);
1595 #endif
1596 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
1597 }
1598 ap->tasklet_pending = 0;
1599 }
1600
1601
1602
1603
1604
1605 static void ace_dump_trace(struct ace_private *ap)
1606 {
1607 #if 0
1608 if (!ap->trace_buf)
1609 if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
1610 return;
1611 #endif
1612 }
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
1623 {
1624 struct ace_private *ap = netdev_priv(dev);
1625 struct ace_regs __iomem *regs = ap->regs;
1626 short i, idx;
1627
1628
1629 prefetchw(&ap->cur_rx_bufs);
1630
1631 idx = ap->rx_std_skbprd;
1632
1633 for (i = 0; i < nr_bufs; i++) {
1634 struct sk_buff *skb;
1635 struct rx_desc *rd;
1636 dma_addr_t mapping;
1637
1638 skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
1639 if (!skb)
1640 break;
1641
1642 mapping = dma_map_page(&ap->pdev->dev,
1643 virt_to_page(skb->data),
1644 offset_in_page(skb->data),
1645 ACE_STD_BUFSIZE, DMA_FROM_DEVICE);
1646 ap->skb->rx_std_skbuff[idx].skb = skb;
1647 dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1648 mapping, mapping);
1649
1650 rd = &ap->rx_std_ring[idx];
1651 set_aceaddr(&rd->addr, mapping);
1652 rd->size = ACE_STD_BUFSIZE;
1653 rd->idx = idx;
1654 idx = (idx + 1) % RX_STD_RING_ENTRIES;
1655 }
1656
1657 if (!i)
1658 goto error_out;
1659
1660 atomic_add(i, &ap->cur_rx_bufs);
1661 ap->rx_std_skbprd = idx;
1662
1663 if (ACE_IS_TIGON_I(ap)) {
1664 struct cmd cmd;
1665 cmd.evt = C_SET_RX_PRD_IDX;
1666 cmd.code = 0;
1667 cmd.idx = ap->rx_std_skbprd;
1668 ace_issue_cmd(regs, &cmd);
1669 } else {
1670 writel(idx, ®s->RxStdPrd);
1671 wmb();
1672 }
1673
1674 out:
1675 clear_bit(0, &ap->std_refill_busy);
1676 return;
1677
1678 error_out:
1679 printk(KERN_INFO "Out of memory when allocating "
1680 "standard receive buffers\n");
1681 goto out;
1682 }
1683
1684
1685 static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
1686 {
1687 struct ace_private *ap = netdev_priv(dev);
1688 struct ace_regs __iomem *regs = ap->regs;
1689 short i, idx;
1690
1691 prefetchw(&ap->cur_mini_bufs);
1692
1693 idx = ap->rx_mini_skbprd;
1694 for (i = 0; i < nr_bufs; i++) {
1695 struct sk_buff *skb;
1696 struct rx_desc *rd;
1697 dma_addr_t mapping;
1698
1699 skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
1700 if (!skb)
1701 break;
1702
1703 mapping = dma_map_page(&ap->pdev->dev,
1704 virt_to_page(skb->data),
1705 offset_in_page(skb->data),
1706 ACE_MINI_BUFSIZE, DMA_FROM_DEVICE);
1707 ap->skb->rx_mini_skbuff[idx].skb = skb;
1708 dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
1709 mapping, mapping);
1710
1711 rd = &ap->rx_mini_ring[idx];
1712 set_aceaddr(&rd->addr, mapping);
1713 rd->size = ACE_MINI_BUFSIZE;
1714 rd->idx = idx;
1715 idx = (idx + 1) % RX_MINI_RING_ENTRIES;
1716 }
1717
1718 if (!i)
1719 goto error_out;
1720
1721 atomic_add(i, &ap->cur_mini_bufs);
1722
1723 ap->rx_mini_skbprd = idx;
1724
1725 writel(idx, ®s->RxMiniPrd);
1726 wmb();
1727
1728 out:
1729 clear_bit(0, &ap->mini_refill_busy);
1730 return;
1731 error_out:
1732 printk(KERN_INFO "Out of memory when allocating "
1733 "mini receive buffers\n");
1734 goto out;
1735 }
1736
1737
1738
1739
1740
1741
1742 static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
1743 {
1744 struct ace_private *ap = netdev_priv(dev);
1745 struct ace_regs __iomem *regs = ap->regs;
1746 short i, idx;
1747
1748 idx = ap->rx_jumbo_skbprd;
1749
1750 for (i = 0; i < nr_bufs; i++) {
1751 struct sk_buff *skb;
1752 struct rx_desc *rd;
1753 dma_addr_t mapping;
1754
1755 skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
1756 if (!skb)
1757 break;
1758
1759 mapping = dma_map_page(&ap->pdev->dev,
1760 virt_to_page(skb->data),
1761 offset_in_page(skb->data),
1762 ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
1763 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
1764 dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
1765 mapping, mapping);
1766
1767 rd = &ap->rx_jumbo_ring[idx];
1768 set_aceaddr(&rd->addr, mapping);
1769 rd->size = ACE_JUMBO_BUFSIZE;
1770 rd->idx = idx;
1771 idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
1772 }
1773
1774 if (!i)
1775 goto error_out;
1776
1777 atomic_add(i, &ap->cur_jumbo_bufs);
1778 ap->rx_jumbo_skbprd = idx;
1779
1780 if (ACE_IS_TIGON_I(ap)) {
1781 struct cmd cmd;
1782 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1783 cmd.code = 0;
1784 cmd.idx = ap->rx_jumbo_skbprd;
1785 ace_issue_cmd(regs, &cmd);
1786 } else {
1787 writel(idx, ®s->RxJumboPrd);
1788 wmb();
1789 }
1790
1791 out:
1792 clear_bit(0, &ap->jumbo_refill_busy);
1793 return;
1794 error_out:
1795 if (net_ratelimit())
1796 printk(KERN_INFO "Out of memory when allocating "
1797 "jumbo receive buffers\n");
1798 goto out;
1799 }
1800
1801
1802
1803
1804
1805
1806
1807 static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
1808 {
1809 struct ace_private *ap;
1810
1811 ap = netdev_priv(dev);
1812
1813 while (evtcsm != evtprd) {
1814 switch (ap->evt_ring[evtcsm].evt) {
1815 case E_FW_RUNNING:
1816 printk(KERN_INFO "%s: Firmware up and running\n",
1817 ap->name);
1818 ap->fw_running = 1;
1819 wmb();
1820 break;
1821 case E_STATS_UPDATED:
1822 break;
1823 case E_LNK_STATE:
1824 {
1825 u16 code = ap->evt_ring[evtcsm].code;
1826 switch (code) {
1827 case E_C_LINK_UP:
1828 {
1829 u32 state = readl(&ap->regs->GigLnkState);
1830 printk(KERN_WARNING "%s: Optical link UP "
1831 "(%s Duplex, Flow Control: %s%s)\n",
1832 ap->name,
1833 state & LNK_FULL_DUPLEX ? "Full":"Half",
1834 state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
1835 state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
1836 break;
1837 }
1838 case E_C_LINK_DOWN:
1839 printk(KERN_WARNING "%s: Optical link DOWN\n",
1840 ap->name);
1841 break;
1842 case E_C_LINK_10_100:
1843 printk(KERN_WARNING "%s: 10/100BaseT link "
1844 "UP\n", ap->name);
1845 break;
1846 default:
1847 printk(KERN_ERR "%s: Unknown optical link "
1848 "state %02x\n", ap->name, code);
1849 }
1850 break;
1851 }
1852 case E_ERROR:
1853 switch(ap->evt_ring[evtcsm].code) {
1854 case E_C_ERR_INVAL_CMD:
1855 printk(KERN_ERR "%s: invalid command error\n",
1856 ap->name);
1857 break;
1858 case E_C_ERR_UNIMP_CMD:
1859 printk(KERN_ERR "%s: unimplemented command "
1860 "error\n", ap->name);
1861 break;
1862 case E_C_ERR_BAD_CFG:
1863 printk(KERN_ERR "%s: bad config error\n",
1864 ap->name);
1865 break;
1866 default:
1867 printk(KERN_ERR "%s: unknown error %02x\n",
1868 ap->name, ap->evt_ring[evtcsm].code);
1869 }
1870 break;
1871 case E_RESET_JUMBO_RNG:
1872 {
1873 int i;
1874 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
1875 if (ap->skb->rx_jumbo_skbuff[i].skb) {
1876 ap->rx_jumbo_ring[i].size = 0;
1877 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
1878 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
1879 ap->skb->rx_jumbo_skbuff[i].skb = NULL;
1880 }
1881 }
1882
1883 if (ACE_IS_TIGON_I(ap)) {
1884 struct cmd cmd;
1885 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1886 cmd.code = 0;
1887 cmd.idx = 0;
1888 ace_issue_cmd(ap->regs, &cmd);
1889 } else {
1890 writel(0, &((ap->regs)->RxJumboPrd));
1891 wmb();
1892 }
1893
1894 ap->jumbo = 0;
1895 ap->rx_jumbo_skbprd = 0;
1896 printk(KERN_INFO "%s: Jumbo ring flushed\n",
1897 ap->name);
1898 clear_bit(0, &ap->jumbo_refill_busy);
1899 break;
1900 }
1901 default:
1902 printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
1903 ap->name, ap->evt_ring[evtcsm].evt);
1904 }
1905 evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
1906 }
1907
1908 return evtcsm;
1909 }
1910
1911
1912 static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
1913 {
1914 struct ace_private *ap = netdev_priv(dev);
1915 u32 idx;
1916 int mini_count = 0, std_count = 0;
1917
1918 idx = rxretcsm;
1919
1920 prefetchw(&ap->cur_rx_bufs);
1921 prefetchw(&ap->cur_mini_bufs);
1922
1923 while (idx != rxretprd) {
1924 struct ring_info *rip;
1925 struct sk_buff *skb;
1926 struct rx_desc *retdesc;
1927 u32 skbidx;
1928 int bd_flags, desc_type, mapsize;
1929 u16 csum;
1930
1931
1932
1933 if (idx == rxretcsm)
1934 rmb();
1935
1936 retdesc = &ap->rx_return_ring[idx];
1937 skbidx = retdesc->idx;
1938 bd_flags = retdesc->flags;
1939 desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
1940
1941 switch(desc_type) {
1942
1943
1944
1945
1946
1947
1948
1949 case 0:
1950 rip = &ap->skb->rx_std_skbuff[skbidx];
1951 mapsize = ACE_STD_BUFSIZE;
1952 std_count++;
1953 break;
1954 case BD_FLG_JUMBO:
1955 rip = &ap->skb->rx_jumbo_skbuff[skbidx];
1956 mapsize = ACE_JUMBO_BUFSIZE;
1957 atomic_dec(&ap->cur_jumbo_bufs);
1958 break;
1959 case BD_FLG_MINI:
1960 rip = &ap->skb->rx_mini_skbuff[skbidx];
1961 mapsize = ACE_MINI_BUFSIZE;
1962 mini_count++;
1963 break;
1964 default:
1965 printk(KERN_INFO "%s: unknown frame type (0x%02x) "
1966 "returned by NIC\n", dev->name,
1967 retdesc->flags);
1968 goto error;
1969 }
1970
1971 skb = rip->skb;
1972 rip->skb = NULL;
1973 dma_unmap_page(&ap->pdev->dev, dma_unmap_addr(rip, mapping),
1974 mapsize, DMA_FROM_DEVICE);
1975 skb_put(skb, retdesc->size);
1976
1977
1978
1979
1980 csum = retdesc->tcp_udp_csum;
1981
1982 skb->protocol = eth_type_trans(skb, dev);
1983
1984
1985
1986
1987
1988 if (bd_flags & BD_FLG_TCP_UDP_SUM) {
1989 skb->csum = htons(csum);
1990 skb->ip_summed = CHECKSUM_COMPLETE;
1991 } else {
1992 skb_checksum_none_assert(skb);
1993 }
1994
1995
1996 if ((bd_flags & BD_FLG_VLAN_TAG))
1997 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
1998 netif_rx(skb);
1999
2000 dev->stats.rx_packets++;
2001 dev->stats.rx_bytes += retdesc->size;
2002
2003 idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
2004 }
2005
2006 atomic_sub(std_count, &ap->cur_rx_bufs);
2007 if (!ACE_IS_TIGON_I(ap))
2008 atomic_sub(mini_count, &ap->cur_mini_bufs);
2009
2010 out:
2011
2012
2013
2014
2015 if (ACE_IS_TIGON_I(ap)) {
2016 writel(idx, &ap->regs->RxRetCsm);
2017 }
2018 ap->cur_rx = idx;
2019
2020 return;
2021 error:
2022 idx = rxretprd;
2023 goto out;
2024 }
2025
2026
2027 static inline void ace_tx_int(struct net_device *dev,
2028 u32 txcsm, u32 idx)
2029 {
2030 struct ace_private *ap = netdev_priv(dev);
2031
2032 do {
2033 struct sk_buff *skb;
2034 struct tx_ring_info *info;
2035
2036 info = ap->skb->tx_skbuff + idx;
2037 skb = info->skb;
2038
2039 if (dma_unmap_len(info, maplen)) {
2040 dma_unmap_page(&ap->pdev->dev,
2041 dma_unmap_addr(info, mapping),
2042 dma_unmap_len(info, maplen),
2043 DMA_TO_DEVICE);
2044 dma_unmap_len_set(info, maplen, 0);
2045 }
2046
2047 if (skb) {
2048 dev->stats.tx_packets++;
2049 dev->stats.tx_bytes += skb->len;
2050 dev_consume_skb_irq(skb);
2051 info->skb = NULL;
2052 }
2053
2054 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2055 } while (idx != txcsm);
2056
2057 if (netif_queue_stopped(dev))
2058 netif_wake_queue(dev);
2059
2060 wmb();
2061 ap->tx_ret_csm = txcsm;
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090 }
2091
2092
2093 static irqreturn_t ace_interrupt(int irq, void *dev_id)
2094 {
2095 struct net_device *dev = (struct net_device *)dev_id;
2096 struct ace_private *ap = netdev_priv(dev);
2097 struct ace_regs __iomem *regs = ap->regs;
2098 u32 idx;
2099 u32 txcsm, rxretcsm, rxretprd;
2100 u32 evtcsm, evtprd;
2101
2102
2103
2104
2105
2106
2107 if (!(readl(®s->HostCtrl) & IN_INT))
2108 return IRQ_NONE;
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118 writel(0, ®s->Mb0Lo);
2119 readl(®s->Mb0Lo);
2120
2121
2122
2123
2124
2125
2126
2127
2128 rxretprd = *ap->rx_ret_prd;
2129 rxretcsm = ap->cur_rx;
2130
2131 if (rxretprd != rxretcsm)
2132 ace_rx_int(dev, rxretprd, rxretcsm);
2133
2134 txcsm = *ap->tx_csm;
2135 idx = ap->tx_ret_csm;
2136
2137 if (txcsm != idx) {
2138
2139
2140
2141
2142
2143
2144
2145 if (!tx_ring_full(ap, txcsm, ap->tx_prd))
2146 ace_tx_int(dev, txcsm, idx);
2147 }
2148
2149 evtcsm = readl(®s->EvtCsm);
2150 evtprd = *ap->evt_prd;
2151
2152 if (evtcsm != evtprd) {
2153 evtcsm = ace_handle_event(dev, evtcsm, evtprd);
2154 writel(evtcsm, ®s->EvtCsm);
2155 }
2156
2157
2158
2159
2160
2161 if (netif_running(dev)) {
2162 int cur_size;
2163 int run_tasklet = 0;
2164
2165 cur_size = atomic_read(&ap->cur_rx_bufs);
2166 if (cur_size < RX_LOW_STD_THRES) {
2167 if ((cur_size < RX_PANIC_STD_THRES) &&
2168 !test_and_set_bit(0, &ap->std_refill_busy)) {
2169 #ifdef DEBUG
2170 printk("low on std buffers %i\n", cur_size);
2171 #endif
2172 ace_load_std_rx_ring(dev,
2173 RX_RING_SIZE - cur_size);
2174 } else
2175 run_tasklet = 1;
2176 }
2177
2178 if (!ACE_IS_TIGON_I(ap)) {
2179 cur_size = atomic_read(&ap->cur_mini_bufs);
2180 if (cur_size < RX_LOW_MINI_THRES) {
2181 if ((cur_size < RX_PANIC_MINI_THRES) &&
2182 !test_and_set_bit(0,
2183 &ap->mini_refill_busy)) {
2184 #ifdef DEBUG
2185 printk("low on mini buffers %i\n",
2186 cur_size);
2187 #endif
2188 ace_load_mini_rx_ring(dev,
2189 RX_MINI_SIZE - cur_size);
2190 } else
2191 run_tasklet = 1;
2192 }
2193 }
2194
2195 if (ap->jumbo) {
2196 cur_size = atomic_read(&ap->cur_jumbo_bufs);
2197 if (cur_size < RX_LOW_JUMBO_THRES) {
2198 if ((cur_size < RX_PANIC_JUMBO_THRES) &&
2199 !test_and_set_bit(0,
2200 &ap->jumbo_refill_busy)){
2201 #ifdef DEBUG
2202 printk("low on jumbo buffers %i\n",
2203 cur_size);
2204 #endif
2205 ace_load_jumbo_rx_ring(dev,
2206 RX_JUMBO_SIZE - cur_size);
2207 } else
2208 run_tasklet = 1;
2209 }
2210 }
2211 if (run_tasklet && !ap->tasklet_pending) {
2212 ap->tasklet_pending = 1;
2213 tasklet_schedule(&ap->ace_tasklet);
2214 }
2215 }
2216
2217 return IRQ_HANDLED;
2218 }
2219
2220 static int ace_open(struct net_device *dev)
2221 {
2222 struct ace_private *ap = netdev_priv(dev);
2223 struct ace_regs __iomem *regs = ap->regs;
2224 struct cmd cmd;
2225
2226 if (!(ap->fw_running)) {
2227 printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
2228 return -EBUSY;
2229 }
2230
2231 writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu);
2232
2233 cmd.evt = C_CLEAR_STATS;
2234 cmd.code = 0;
2235 cmd.idx = 0;
2236 ace_issue_cmd(regs, &cmd);
2237
2238 cmd.evt = C_HOST_STATE;
2239 cmd.code = C_C_STACK_UP;
2240 cmd.idx = 0;
2241 ace_issue_cmd(regs, &cmd);
2242
2243 if (ap->jumbo &&
2244 !test_and_set_bit(0, &ap->jumbo_refill_busy))
2245 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2246
2247 if (dev->flags & IFF_PROMISC) {
2248 cmd.evt = C_SET_PROMISC_MODE;
2249 cmd.code = C_C_PROMISC_ENABLE;
2250 cmd.idx = 0;
2251 ace_issue_cmd(regs, &cmd);
2252
2253 ap->promisc = 1;
2254 }else
2255 ap->promisc = 0;
2256 ap->mcast_all = 0;
2257
2258 #if 0
2259 cmd.evt = C_LNK_NEGOTIATION;
2260 cmd.code = 0;
2261 cmd.idx = 0;
2262 ace_issue_cmd(regs, &cmd);
2263 #endif
2264
2265 netif_start_queue(dev);
2266
2267
2268
2269
2270 tasklet_setup(&ap->ace_tasklet, ace_tasklet);
2271 return 0;
2272 }
2273
2274
2275 static int ace_close(struct net_device *dev)
2276 {
2277 struct ace_private *ap = netdev_priv(dev);
2278 struct ace_regs __iomem *regs = ap->regs;
2279 struct cmd cmd;
2280 unsigned long flags;
2281 short i;
2282
2283
2284
2285
2286
2287
2288 netif_stop_queue(dev);
2289
2290
2291 if (ap->promisc) {
2292 cmd.evt = C_SET_PROMISC_MODE;
2293 cmd.code = C_C_PROMISC_DISABLE;
2294 cmd.idx = 0;
2295 ace_issue_cmd(regs, &cmd);
2296 ap->promisc = 0;
2297 }
2298
2299 cmd.evt = C_HOST_STATE;
2300 cmd.code = C_C_STACK_DOWN;
2301 cmd.idx = 0;
2302 ace_issue_cmd(regs, &cmd);
2303
2304 tasklet_kill(&ap->ace_tasklet);
2305
2306
2307
2308
2309
2310
2311 local_irq_save(flags);
2312 ace_mask_irq(dev);
2313
2314 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2315 struct sk_buff *skb;
2316 struct tx_ring_info *info;
2317
2318 info = ap->skb->tx_skbuff + i;
2319 skb = info->skb;
2320
2321 if (dma_unmap_len(info, maplen)) {
2322 if (ACE_IS_TIGON_I(ap)) {
2323
2324 struct tx_desc __iomem *tx;
2325 tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
2326 writel(0, &tx->addr.addrhi);
2327 writel(0, &tx->addr.addrlo);
2328 writel(0, &tx->flagsize);
2329 } else
2330 memset(ap->tx_ring + i, 0,
2331 sizeof(struct tx_desc));
2332 dma_unmap_page(&ap->pdev->dev,
2333 dma_unmap_addr(info, mapping),
2334 dma_unmap_len(info, maplen),
2335 DMA_TO_DEVICE);
2336 dma_unmap_len_set(info, maplen, 0);
2337 }
2338 if (skb) {
2339 dev_kfree_skb(skb);
2340 info->skb = NULL;
2341 }
2342 }
2343
2344 if (ap->jumbo) {
2345 cmd.evt = C_RESET_JUMBO_RNG;
2346 cmd.code = 0;
2347 cmd.idx = 0;
2348 ace_issue_cmd(regs, &cmd);
2349 }
2350
2351 ace_unmask_irq(dev);
2352 local_irq_restore(flags);
2353
2354 return 0;
2355 }
2356
2357
2358 static inline dma_addr_t
2359 ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2360 struct sk_buff *tail, u32 idx)
2361 {
2362 dma_addr_t mapping;
2363 struct tx_ring_info *info;
2364
2365 mapping = dma_map_page(&ap->pdev->dev, virt_to_page(skb->data),
2366 offset_in_page(skb->data), skb->len,
2367 DMA_TO_DEVICE);
2368
2369 info = ap->skb->tx_skbuff + idx;
2370 info->skb = tail;
2371 dma_unmap_addr_set(info, mapping, mapping);
2372 dma_unmap_len_set(info, maplen, skb->len);
2373 return mapping;
2374 }
2375
2376
2377 static inline void
2378 ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
2379 u32 flagsize, u32 vlan_tag)
2380 {
2381 #if !USE_TX_COAL_NOW
2382 flagsize &= ~BD_FLG_COAL_NOW;
2383 #endif
2384
2385 if (ACE_IS_TIGON_I(ap)) {
2386 struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
2387 writel(addr >> 32, &io->addr.addrhi);
2388 writel(addr & 0xffffffff, &io->addr.addrlo);
2389 writel(flagsize, &io->flagsize);
2390 writel(vlan_tag, &io->vlanres);
2391 } else {
2392 desc->addr.addrhi = addr >> 32;
2393 desc->addr.addrlo = addr;
2394 desc->flagsize = flagsize;
2395 desc->vlanres = vlan_tag;
2396 }
2397 }
2398
2399
2400 static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
2401 struct net_device *dev)
2402 {
2403 struct ace_private *ap = netdev_priv(dev);
2404 struct ace_regs __iomem *regs = ap->regs;
2405 struct tx_desc *desc;
2406 u32 idx, flagsize;
2407 unsigned long maxjiff = jiffies + 3*HZ;
2408
2409 restart:
2410 idx = ap->tx_prd;
2411
2412 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2413 goto overflow;
2414
2415 if (!skb_shinfo(skb)->nr_frags) {
2416 dma_addr_t mapping;
2417 u32 vlan_tag = 0;
2418
2419 mapping = ace_map_tx_skb(ap, skb, skb, idx);
2420 flagsize = (skb->len << 16) | (BD_FLG_END);
2421 if (skb->ip_summed == CHECKSUM_PARTIAL)
2422 flagsize |= BD_FLG_TCP_UDP_SUM;
2423 if (skb_vlan_tag_present(skb)) {
2424 flagsize |= BD_FLG_VLAN_TAG;
2425 vlan_tag = skb_vlan_tag_get(skb);
2426 }
2427 desc = ap->tx_ring + idx;
2428 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2429
2430
2431 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2432 flagsize |= BD_FLG_COAL_NOW;
2433
2434 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2435 } else {
2436 dma_addr_t mapping;
2437 u32 vlan_tag = 0;
2438 int i, len = 0;
2439
2440 mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2441 flagsize = (skb_headlen(skb) << 16);
2442 if (skb->ip_summed == CHECKSUM_PARTIAL)
2443 flagsize |= BD_FLG_TCP_UDP_SUM;
2444 if (skb_vlan_tag_present(skb)) {
2445 flagsize |= BD_FLG_VLAN_TAG;
2446 vlan_tag = skb_vlan_tag_get(skb);
2447 }
2448
2449 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
2450
2451 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2452
2453 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2454 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2455 struct tx_ring_info *info;
2456
2457 len += skb_frag_size(frag);
2458 info = ap->skb->tx_skbuff + idx;
2459 desc = ap->tx_ring + idx;
2460
2461 mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
2462 skb_frag_size(frag),
2463 DMA_TO_DEVICE);
2464
2465 flagsize = skb_frag_size(frag) << 16;
2466 if (skb->ip_summed == CHECKSUM_PARTIAL)
2467 flagsize |= BD_FLG_TCP_UDP_SUM;
2468 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2469
2470 if (i == skb_shinfo(skb)->nr_frags - 1) {
2471 flagsize |= BD_FLG_END;
2472 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2473 flagsize |= BD_FLG_COAL_NOW;
2474
2475
2476
2477
2478
2479 info->skb = skb;
2480 } else {
2481 info->skb = NULL;
2482 }
2483 dma_unmap_addr_set(info, mapping, mapping);
2484 dma_unmap_len_set(info, maplen, skb_frag_size(frag));
2485 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2486 }
2487 }
2488
2489 wmb();
2490 ap->tx_prd = idx;
2491 ace_set_txprd(regs, ap, idx);
2492
2493 if (flagsize & BD_FLG_COAL_NOW) {
2494 netif_stop_queue(dev);
2495
2496
2497
2498
2499
2500
2501
2502 if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
2503 netif_wake_queue(dev);
2504 }
2505
2506 return NETDEV_TX_OK;
2507
2508 overflow:
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 if (time_before(jiffies, maxjiff)) {
2526 barrier();
2527 cpu_relax();
2528 goto restart;
2529 }
2530
2531
2532 printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
2533 return NETDEV_TX_BUSY;
2534 }
2535
2536
2537 static int ace_change_mtu(struct net_device *dev, int new_mtu)
2538 {
2539 struct ace_private *ap = netdev_priv(dev);
2540 struct ace_regs __iomem *regs = ap->regs;
2541
2542 writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu);
2543 dev->mtu = new_mtu;
2544
2545 if (new_mtu > ACE_STD_MTU) {
2546 if (!(ap->jumbo)) {
2547 printk(KERN_INFO "%s: Enabling Jumbo frame "
2548 "support\n", dev->name);
2549 ap->jumbo = 1;
2550 if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
2551 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2552 ace_set_rxtx_parms(dev, 1);
2553 }
2554 } else {
2555 while (test_and_set_bit(0, &ap->jumbo_refill_busy));
2556 ace_sync_irq(dev->irq);
2557 ace_set_rxtx_parms(dev, 0);
2558 if (ap->jumbo) {
2559 struct cmd cmd;
2560
2561 cmd.evt = C_RESET_JUMBO_RNG;
2562 cmd.code = 0;
2563 cmd.idx = 0;
2564 ace_issue_cmd(regs, &cmd);
2565 }
2566 }
2567
2568 return 0;
2569 }
2570
2571 static int ace_get_link_ksettings(struct net_device *dev,
2572 struct ethtool_link_ksettings *cmd)
2573 {
2574 struct ace_private *ap = netdev_priv(dev);
2575 struct ace_regs __iomem *regs = ap->regs;
2576 u32 link;
2577 u32 supported;
2578
2579 memset(cmd, 0, sizeof(struct ethtool_link_ksettings));
2580
2581 supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2582 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2583 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
2584 SUPPORTED_Autoneg | SUPPORTED_FIBRE);
2585
2586 cmd->base.port = PORT_FIBRE;
2587
2588 link = readl(®s->GigLnkState);
2589 if (link & LNK_1000MB) {
2590 cmd->base.speed = SPEED_1000;
2591 } else {
2592 link = readl(®s->FastLnkState);
2593 if (link & LNK_100MB)
2594 cmd->base.speed = SPEED_100;
2595 else if (link & LNK_10MB)
2596 cmd->base.speed = SPEED_10;
2597 else
2598 cmd->base.speed = 0;
2599 }
2600 if (link & LNK_FULL_DUPLEX)
2601 cmd->base.duplex = DUPLEX_FULL;
2602 else
2603 cmd->base.duplex = DUPLEX_HALF;
2604
2605 if (link & LNK_NEGOTIATE)
2606 cmd->base.autoneg = AUTONEG_ENABLE;
2607 else
2608 cmd->base.autoneg = AUTONEG_DISABLE;
2609
2610 #if 0
2611
2612
2613
2614 ecmd->trace = readl(®s->TuneTrace);
2615
2616 ecmd->txcoal = readl(®s->TuneTxCoalTicks);
2617 ecmd->rxcoal = readl(®s->TuneRxCoalTicks);
2618 #endif
2619
2620 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2621 supported);
2622
2623 return 0;
2624 }
2625
2626 static int ace_set_link_ksettings(struct net_device *dev,
2627 const struct ethtool_link_ksettings *cmd)
2628 {
2629 struct ace_private *ap = netdev_priv(dev);
2630 struct ace_regs __iomem *regs = ap->regs;
2631 u32 link, speed;
2632
2633 link = readl(®s->GigLnkState);
2634 if (link & LNK_1000MB)
2635 speed = SPEED_1000;
2636 else {
2637 link = readl(®s->FastLnkState);
2638 if (link & LNK_100MB)
2639 speed = SPEED_100;
2640 else if (link & LNK_10MB)
2641 speed = SPEED_10;
2642 else
2643 speed = SPEED_100;
2644 }
2645
2646 link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
2647 LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
2648 if (!ACE_IS_TIGON_I(ap))
2649 link |= LNK_TX_FLOW_CTL_Y;
2650 if (cmd->base.autoneg == AUTONEG_ENABLE)
2651 link |= LNK_NEGOTIATE;
2652 if (cmd->base.speed != speed) {
2653 link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
2654 switch (cmd->base.speed) {
2655 case SPEED_1000:
2656 link |= LNK_1000MB;
2657 break;
2658 case SPEED_100:
2659 link |= LNK_100MB;
2660 break;
2661 case SPEED_10:
2662 link |= LNK_10MB;
2663 break;
2664 }
2665 }
2666
2667 if (cmd->base.duplex == DUPLEX_FULL)
2668 link |= LNK_FULL_DUPLEX;
2669
2670 if (link != ap->link) {
2671 struct cmd cmd;
2672 printk(KERN_INFO "%s: Renegotiating link state\n",
2673 dev->name);
2674
2675 ap->link = link;
2676 writel(link, ®s->TuneLink);
2677 if (!ACE_IS_TIGON_I(ap))
2678 writel(link, ®s->TuneFastLink);
2679 wmb();
2680
2681 cmd.evt = C_LNK_NEGOTIATION;
2682 cmd.code = 0;
2683 cmd.idx = 0;
2684 ace_issue_cmd(regs, &cmd);
2685 }
2686 return 0;
2687 }
2688
2689 static void ace_get_drvinfo(struct net_device *dev,
2690 struct ethtool_drvinfo *info)
2691 {
2692 struct ace_private *ap = netdev_priv(dev);
2693
2694 strlcpy(info->driver, "acenic", sizeof(info->driver));
2695 snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
2696 ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
2697
2698 if (ap->pdev)
2699 strlcpy(info->bus_info, pci_name(ap->pdev),
2700 sizeof(info->bus_info));
2701
2702 }
2703
2704
2705
2706
2707 static int ace_set_mac_addr(struct net_device *dev, void *p)
2708 {
2709 struct ace_private *ap = netdev_priv(dev);
2710 struct ace_regs __iomem *regs = ap->regs;
2711 struct sockaddr *addr=p;
2712 const u8 *da;
2713 struct cmd cmd;
2714
2715 if(netif_running(dev))
2716 return -EBUSY;
2717
2718 eth_hw_addr_set(dev, addr->sa_data);
2719
2720 da = (const u8 *)dev->dev_addr;
2721
2722 writel(da[0] << 8 | da[1], ®s->MacAddrHi);
2723 writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
2724 ®s->MacAddrLo);
2725
2726 cmd.evt = C_SET_MAC_ADDR;
2727 cmd.code = 0;
2728 cmd.idx = 0;
2729 ace_issue_cmd(regs, &cmd);
2730
2731 return 0;
2732 }
2733
2734
2735 static void ace_set_multicast_list(struct net_device *dev)
2736 {
2737 struct ace_private *ap = netdev_priv(dev);
2738 struct ace_regs __iomem *regs = ap->regs;
2739 struct cmd cmd;
2740
2741 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
2742 cmd.evt = C_SET_MULTICAST_MODE;
2743 cmd.code = C_C_MCAST_ENABLE;
2744 cmd.idx = 0;
2745 ace_issue_cmd(regs, &cmd);
2746 ap->mcast_all = 1;
2747 } else if (ap->mcast_all) {
2748 cmd.evt = C_SET_MULTICAST_MODE;
2749 cmd.code = C_C_MCAST_DISABLE;
2750 cmd.idx = 0;
2751 ace_issue_cmd(regs, &cmd);
2752 ap->mcast_all = 0;
2753 }
2754
2755 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
2756 cmd.evt = C_SET_PROMISC_MODE;
2757 cmd.code = C_C_PROMISC_ENABLE;
2758 cmd.idx = 0;
2759 ace_issue_cmd(regs, &cmd);
2760 ap->promisc = 1;
2761 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
2762 cmd.evt = C_SET_PROMISC_MODE;
2763 cmd.code = C_C_PROMISC_DISABLE;
2764 cmd.idx = 0;
2765 ace_issue_cmd(regs, &cmd);
2766 ap->promisc = 0;
2767 }
2768
2769
2770
2771
2772
2773
2774
2775 if (!netdev_mc_empty(dev) && !ap->mcast_all) {
2776 cmd.evt = C_SET_MULTICAST_MODE;
2777 cmd.code = C_C_MCAST_ENABLE;
2778 cmd.idx = 0;
2779 ace_issue_cmd(regs, &cmd);
2780 }else if (!ap->mcast_all) {
2781 cmd.evt = C_SET_MULTICAST_MODE;
2782 cmd.code = C_C_MCAST_DISABLE;
2783 cmd.idx = 0;
2784 ace_issue_cmd(regs, &cmd);
2785 }
2786 }
2787
2788
2789 static struct net_device_stats *ace_get_stats(struct net_device *dev)
2790 {
2791 struct ace_private *ap = netdev_priv(dev);
2792 struct ace_mac_stats __iomem *mac_stats =
2793 (struct ace_mac_stats __iomem *)ap->regs->Stats;
2794
2795 dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
2796 dev->stats.multicast = readl(&mac_stats->kept_mc);
2797 dev->stats.collisions = readl(&mac_stats->coll);
2798
2799 return &dev->stats;
2800 }
2801
2802
2803 static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
2804 u32 dest, int size)
2805 {
2806 void __iomem *tdest;
2807 short tsize, i;
2808
2809 if (size <= 0)
2810 return;
2811
2812 while (size > 0) {
2813 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2814 min_t(u32, size, ACE_WINDOW_SIZE));
2815 tdest = (void __iomem *) ®s->Window +
2816 (dest & (ACE_WINDOW_SIZE - 1));
2817 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
2818 for (i = 0; i < (tsize / 4); i++) {
2819
2820 writel(be32_to_cpup(src), tdest);
2821 src++;
2822 tdest += 4;
2823 dest += 4;
2824 size -= 4;
2825 }
2826 }
2827 }
2828
2829
2830 static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
2831 {
2832 void __iomem *tdest;
2833 short tsize = 0, i;
2834
2835 if (size <= 0)
2836 return;
2837
2838 while (size > 0) {
2839 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2840 min_t(u32, size, ACE_WINDOW_SIZE));
2841 tdest = (void __iomem *) ®s->Window +
2842 (dest & (ACE_WINDOW_SIZE - 1));
2843 writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
2844
2845 for (i = 0; i < (tsize / 4); i++) {
2846 writel(0, tdest + i*4);
2847 }
2848
2849 dest += tsize;
2850 size -= tsize;
2851 }
2852 }
2853
2854
2855
2856
2857
2858
2859
2860
2861 static int ace_load_firmware(struct net_device *dev)
2862 {
2863 const struct firmware *fw;
2864 const char *fw_name = "acenic/tg2.bin";
2865 struct ace_private *ap = netdev_priv(dev);
2866 struct ace_regs __iomem *regs = ap->regs;
2867 const __be32 *fw_data;
2868 u32 load_addr;
2869 int ret;
2870
2871 if (!(readl(®s->CpuCtrl) & CPU_HALTED)) {
2872 printk(KERN_ERR "%s: trying to download firmware while the "
2873 "CPU is running!\n", ap->name);
2874 return -EFAULT;
2875 }
2876
2877 if (ACE_IS_TIGON_I(ap))
2878 fw_name = "acenic/tg1.bin";
2879
2880 ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
2881 if (ret) {
2882 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
2883 ap->name, fw_name);
2884 return ret;
2885 }
2886
2887 fw_data = (void *)fw->data;
2888
2889
2890
2891
2892
2893
2894 ap->firmware_major = fw->data[0];
2895 ap->firmware_minor = fw->data[1];
2896 ap->firmware_fix = fw->data[2];
2897
2898 ap->firmware_start = be32_to_cpu(fw_data[1]);
2899 if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
2900 printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2901 ap->name, ap->firmware_start, fw_name);
2902 ret = -EINVAL;
2903 goto out;
2904 }
2905
2906 load_addr = be32_to_cpu(fw_data[2]);
2907 if (load_addr < 0x4000 || load_addr >= 0x80000) {
2908 printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2909 ap->name, load_addr, fw_name);
2910 ret = -EINVAL;
2911 goto out;
2912 }
2913
2914
2915
2916
2917
2918 ace_clear(regs, 0x2000, 0x80000-0x2000);
2919 ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
2920 out:
2921 release_firmware(fw);
2922 return ret;
2923 }
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941 static void eeprom_start(struct ace_regs __iomem *regs)
2942 {
2943 u32 local;
2944
2945 readl(®s->LocalCtrl);
2946 udelay(ACE_SHORT_DELAY);
2947 local = readl(®s->LocalCtrl);
2948 local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
2949 writel(local, ®s->LocalCtrl);
2950 readl(®s->LocalCtrl);
2951 mb();
2952 udelay(ACE_SHORT_DELAY);
2953 local |= EEPROM_CLK_OUT;
2954 writel(local, ®s->LocalCtrl);
2955 readl(®s->LocalCtrl);
2956 mb();
2957 udelay(ACE_SHORT_DELAY);
2958 local &= ~EEPROM_DATA_OUT;
2959 writel(local, ®s->LocalCtrl);
2960 readl(®s->LocalCtrl);
2961 mb();
2962 udelay(ACE_SHORT_DELAY);
2963 local &= ~EEPROM_CLK_OUT;
2964 writel(local, ®s->LocalCtrl);
2965 readl(®s->LocalCtrl);
2966 mb();
2967 }
2968
2969
2970 static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
2971 {
2972 short i;
2973 u32 local;
2974
2975 udelay(ACE_SHORT_DELAY);
2976 local = readl(®s->LocalCtrl);
2977 local &= ~EEPROM_DATA_OUT;
2978 local |= EEPROM_WRITE_ENABLE;
2979 writel(local, ®s->LocalCtrl);
2980 readl(®s->LocalCtrl);
2981 mb();
2982
2983 for (i = 0; i < 8; i++, magic <<= 1) {
2984 udelay(ACE_SHORT_DELAY);
2985 if (magic & 0x80)
2986 local |= EEPROM_DATA_OUT;
2987 else
2988 local &= ~EEPROM_DATA_OUT;
2989 writel(local, ®s->LocalCtrl);
2990 readl(®s->LocalCtrl);
2991 mb();
2992
2993 udelay(ACE_SHORT_DELAY);
2994 local |= EEPROM_CLK_OUT;
2995 writel(local, ®s->LocalCtrl);
2996 readl(®s->LocalCtrl);
2997 mb();
2998 udelay(ACE_SHORT_DELAY);
2999 local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
3000 writel(local, ®s->LocalCtrl);
3001 readl(®s->LocalCtrl);
3002 mb();
3003 }
3004 }
3005
3006
3007 static int eeprom_check_ack(struct ace_regs __iomem *regs)
3008 {
3009 int state;
3010 u32 local;
3011
3012 local = readl(®s->LocalCtrl);
3013 local &= ~EEPROM_WRITE_ENABLE;
3014 writel(local, ®s->LocalCtrl);
3015 readl(®s->LocalCtrl);
3016 mb();
3017 udelay(ACE_LONG_DELAY);
3018 local |= EEPROM_CLK_OUT;
3019 writel(local, ®s->LocalCtrl);
3020 readl(®s->LocalCtrl);
3021 mb();
3022 udelay(ACE_SHORT_DELAY);
3023
3024 state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0;
3025 udelay(ACE_SHORT_DELAY);
3026 mb();
3027 writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
3028 readl(®s->LocalCtrl);
3029 mb();
3030
3031 return state;
3032 }
3033
3034
3035 static void eeprom_stop(struct ace_regs __iomem *regs)
3036 {
3037 u32 local;
3038
3039 udelay(ACE_SHORT_DELAY);
3040 local = readl(®s->LocalCtrl);
3041 local |= EEPROM_WRITE_ENABLE;
3042 writel(local, ®s->LocalCtrl);
3043 readl(®s->LocalCtrl);
3044 mb();
3045 udelay(ACE_SHORT_DELAY);
3046 local &= ~EEPROM_DATA_OUT;
3047 writel(local, ®s->LocalCtrl);
3048 readl(®s->LocalCtrl);
3049 mb();
3050 udelay(ACE_SHORT_DELAY);
3051 local |= EEPROM_CLK_OUT;
3052 writel(local, ®s->LocalCtrl);
3053 readl(®s->LocalCtrl);
3054 mb();
3055 udelay(ACE_SHORT_DELAY);
3056 local |= EEPROM_DATA_OUT;
3057 writel(local, ®s->LocalCtrl);
3058 readl(®s->LocalCtrl);
3059 mb();
3060 udelay(ACE_LONG_DELAY);
3061 local &= ~EEPROM_CLK_OUT;
3062 writel(local, ®s->LocalCtrl);
3063 mb();
3064 }
3065
3066
3067
3068
3069
3070 static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
3071 {
3072 struct ace_private *ap = netdev_priv(dev);
3073 struct ace_regs __iomem *regs = ap->regs;
3074 unsigned long flags;
3075 u32 local;
3076 int result = 0;
3077 short i;
3078
3079
3080
3081
3082
3083 local_irq_save(flags);
3084
3085 eeprom_start(regs);
3086
3087 eeprom_prep(regs, EEPROM_WRITE_SELECT);
3088 if (eeprom_check_ack(regs)) {
3089 local_irq_restore(flags);
3090 printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
3091 result = -EIO;
3092 goto eeprom_read_error;
3093 }
3094
3095 eeprom_prep(regs, (offset >> 8) & 0xff);
3096 if (eeprom_check_ack(regs)) {
3097 local_irq_restore(flags);
3098 printk(KERN_ERR "%s: Unable to set address byte 0\n",
3099 ap->name);
3100 result = -EIO;
3101 goto eeprom_read_error;
3102 }
3103
3104 eeprom_prep(regs, offset & 0xff);
3105 if (eeprom_check_ack(regs)) {
3106 local_irq_restore(flags);
3107 printk(KERN_ERR "%s: Unable to set address byte 1\n",
3108 ap->name);
3109 result = -EIO;
3110 goto eeprom_read_error;
3111 }
3112
3113 eeprom_start(regs);
3114 eeprom_prep(regs, EEPROM_READ_SELECT);
3115 if (eeprom_check_ack(regs)) {
3116 local_irq_restore(flags);
3117 printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
3118 ap->name);
3119 result = -EIO;
3120 goto eeprom_read_error;
3121 }
3122
3123 for (i = 0; i < 8; i++) {
3124 local = readl(®s->LocalCtrl);
3125 local &= ~EEPROM_WRITE_ENABLE;
3126 writel(local, ®s->LocalCtrl);
3127 readl(®s->LocalCtrl);
3128 udelay(ACE_LONG_DELAY);
3129 mb();
3130 local |= EEPROM_CLK_OUT;
3131 writel(local, ®s->LocalCtrl);
3132 readl(®s->LocalCtrl);
3133 mb();
3134 udelay(ACE_SHORT_DELAY);
3135
3136 result = (result << 1) |
3137 ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0);
3138 udelay(ACE_SHORT_DELAY);
3139 mb();
3140 local = readl(®s->LocalCtrl);
3141 local &= ~EEPROM_CLK_OUT;
3142 writel(local, ®s->LocalCtrl);
3143 readl(®s->LocalCtrl);
3144 udelay(ACE_SHORT_DELAY);
3145 mb();
3146 if (i == 7) {
3147 local |= EEPROM_WRITE_ENABLE;
3148 writel(local, ®s->LocalCtrl);
3149 readl(®s->LocalCtrl);
3150 mb();
3151 udelay(ACE_SHORT_DELAY);
3152 }
3153 }
3154
3155 local |= EEPROM_DATA_OUT;
3156 writel(local, ®s->LocalCtrl);
3157 readl(®s->LocalCtrl);
3158 mb();
3159 udelay(ACE_SHORT_DELAY);
3160 writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl);
3161 readl(®s->LocalCtrl);
3162 udelay(ACE_LONG_DELAY);
3163 writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
3164 readl(®s->LocalCtrl);
3165 mb();
3166 udelay(ACE_SHORT_DELAY);
3167 eeprom_stop(regs);
3168
3169 local_irq_restore(flags);
3170 out:
3171 return result;
3172
3173 eeprom_read_error:
3174 printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
3175 ap->name, offset);
3176 goto out;
3177 }
3178
3179 module_pci_driver(acenic_pci_driver);