Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
0004  *           and other Tigon based cards.
0005  *
0006  * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
0007  *
0008  * Thanks to Alteon and 3Com for providing hardware and documentation
0009  * enabling me to write this driver.
0010  *
0011  * A mailing list for discussing the use of this driver has been
0012  * setup, please subscribe to the lists if you have any questions
0013  * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
0014  * see how to subscribe.
0015  *
0016  * Additional credits:
0017  *   Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
0018  *       dump support. The trace dump support has not been
0019  *       integrated yet however.
0020  *   Troy Benjegerdes: Big Endian (PPC) patches.
0021  *   Nate Stahl: Better out of memory handling and stats support.
0022  *   Aman Singla: Nasty race between interrupt handler and tx code dealing
0023  *                with 'testing the tx_ret_csm and setting tx_full'
0024  *   David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
0025  *                                       infrastructure and Sparc support
0026  *   Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
0027  *                              driver under Linux/Sparc64
0028  *   Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
0029  *                                       ETHTOOL_GDRVINFO support
0030  *   Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
0031  *                                       handler and close() cleanup.
0032  *   Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
0033  *                                       memory mapped IO is enabled to
0034  *                                       make the driver work on RS/6000.
0035  *   Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
0036  *                                       where the driver would disable
0037  *                                       bus master mode if it had to disable
0038  *                                       write and invalidate.
0039  *   Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
0040  *                                       endian systems.
0041  *   Val Henson <vhenson@esscom.com>:    Reset Jumbo skb producer and
0042  *                                       rx producer index when
0043  *                                       flushing the Jumbo ring.
0044  *   Hans Grobler <grobh@sun.ac.za>:     Memory leak fixes in the
0045  *                                       driver init path.
0046  *   Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
0047  */
0048 
0049 #include <linux/module.h>
0050 #include <linux/moduleparam.h>
0051 #include <linux/types.h>
0052 #include <linux/errno.h>
0053 #include <linux/ioport.h>
0054 #include <linux/pci.h>
0055 #include <linux/dma-mapping.h>
0056 #include <linux/kernel.h>
0057 #include <linux/netdevice.h>
0058 #include <linux/etherdevice.h>
0059 #include <linux/skbuff.h>
0060 #include <linux/delay.h>
0061 #include <linux/mm.h>
0062 #include <linux/highmem.h>
0063 #include <linux/sockios.h>
0064 #include <linux/firmware.h>
0065 #include <linux/slab.h>
0066 #include <linux/prefetch.h>
0067 #include <linux/if_vlan.h>
0068 
0069 #ifdef SIOCETHTOOL
0070 #include <linux/ethtool.h>
0071 #endif
0072 
0073 #include <net/sock.h>
0074 #include <net/ip.h>
0075 
0076 #include <asm/io.h>
0077 #include <asm/irq.h>
0078 #include <asm/byteorder.h>
0079 #include <linux/uaccess.h>
0080 
0081 
0082 #define DRV_NAME "acenic"
0083 
0084 #undef INDEX_DEBUG
0085 
0086 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
0087 #define ACE_IS_TIGON_I(ap)  0
0088 #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
0089 #else
0090 #define ACE_IS_TIGON_I(ap)  (ap->version == 1)
0091 #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
0092 #endif
0093 
0094 #ifndef PCI_VENDOR_ID_ALTEON
0095 #define PCI_VENDOR_ID_ALTEON        0x12ae
0096 #endif
0097 #ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
0098 #define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE  0x0001
0099 #define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
0100 #endif
0101 #ifndef PCI_DEVICE_ID_3COM_3C985
0102 #define PCI_DEVICE_ID_3COM_3C985    0x0001
0103 #endif
0104 #ifndef PCI_VENDOR_ID_NETGEAR
0105 #define PCI_VENDOR_ID_NETGEAR       0x1385
0106 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
0107 #endif
0108 #ifndef PCI_DEVICE_ID_NETGEAR_GA620T
0109 #define PCI_DEVICE_ID_NETGEAR_GA620T    0x630a
0110 #endif
0111 
0112 
0113 /*
0114  * Farallon used the DEC vendor ID by mistake and they seem not
0115  * to care - stinky!
0116  */
0117 #ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
0118 #define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
0119 #endif
0120 #ifndef PCI_DEVICE_ID_FARALLON_PN9100T
0121 #define PCI_DEVICE_ID_FARALLON_PN9100T  0xfa
0122 #endif
0123 #ifndef PCI_VENDOR_ID_SGI
0124 #define PCI_VENDOR_ID_SGI       0x10a9
0125 #endif
0126 #ifndef PCI_DEVICE_ID_SGI_ACENIC
0127 #define PCI_DEVICE_ID_SGI_ACENIC    0x0009
0128 #endif
0129 
0130 static const struct pci_device_id acenic_pci_tbl[] = {
0131     { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
0132       PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0133     { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
0134       PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0135     { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
0136       PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0137     { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
0138       PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0139     { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
0140       PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0141     /*
0142      * Farallon used the DEC vendor ID on their cards incorrectly,
0143      * then later Alteon's ID.
0144      */
0145     { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
0146       PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0147     { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
0148       PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0149     { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
0150       PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
0151     { }
0152 };
0153 MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
0154 
0155 #define ace_sync_irq(irq)   synchronize_irq(irq)
0156 
0157 #ifndef offset_in_page
0158 #define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
0159 #endif
0160 
0161 #define ACE_MAX_MOD_PARMS   8
0162 #define BOARD_IDX_STATIC    0
0163 #define BOARD_IDX_OVERFLOW  -1
0164 
0165 #include "acenic.h"
0166 
0167 /*
0168  * These must be defined before the firmware is included.
0169  */
0170 #define MAX_TEXT_LEN    96*1024
0171 #define MAX_RODATA_LEN  8*1024
0172 #define MAX_DATA_LEN    2*1024
0173 
0174 #ifndef tigon2FwReleaseLocal
0175 #define tigon2FwReleaseLocal 0
0176 #endif
0177 
0178 /*
0179  * This driver currently supports Tigon I and Tigon II based cards
0180  * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
0181  * GA620. The driver should also work on the SGI, DEC and Farallon
0182  * versions of the card, however I have not been able to test that
0183  * myself.
0184  *
0185  * This card is really neat, it supports receive hardware checksumming
0186  * and jumbo frames (up to 9000 bytes) and does a lot of work in the
0187  * firmware. Also the programming interface is quite neat, except for
0188  * the parts dealing with the i2c eeprom on the card ;-)
0189  *
0190  * Using jumbo frames:
0191  *
0192  * To enable jumbo frames, simply specify an mtu between 1500 and 9000
0193  * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
0194  * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
0195  * interface number and <MTU> being the MTU value.
0196  *
0197  * Module parameters:
0198  *
0199  * When compiled as a loadable module, the driver allows for a number
0200  * of module parameters to be specified. The driver supports the
0201  * following module parameters:
0202  *
0203  *  trace=<val> - Firmware trace level. This requires special traced
0204  *                firmware to replace the firmware supplied with
0205  *                the driver - for debugging purposes only.
0206  *
0207  *  link=<val>  - Link state. Normally you want to use the default link
0208  *                parameters set by the driver. This can be used to
0209  *                override these in case your switch doesn't negotiate
0210  *                the link properly. Valid values are:
0211  *         0x0001 - Force half duplex link.
0212  *         0x0002 - Do not negotiate line speed with the other end.
0213  *         0x0010 - 10Mbit/sec link.
0214  *         0x0020 - 100Mbit/sec link.
0215  *         0x0040 - 1000Mbit/sec link.
0216  *         0x0100 - Do not negotiate flow control.
0217  *         0x0200 - Enable RX flow control Y
0218  *         0x0400 - Enable TX flow control Y (Tigon II NICs only).
0219  *                Default value is 0x0270, ie. enable link+flow
0220  *                control negotiation. Negotiating the highest
0221  *                possible link speed with RX flow control enabled.
0222  *
0223  *                When disabling link speed negotiation, only one link
0224  *                speed is allowed to be specified!
0225  *
0226  *  tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
0227  *                to wait for more packets to arive before
0228  *                interrupting the host, from the time the first
0229  *                packet arrives.
0230  *
0231  *  rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
0232  *                to wait for more packets to arive in the transmit ring,
0233  *                before interrupting the host, after transmitting the
0234  *                first packet in the ring.
0235  *
0236  *  max_tx_desc=<val> - maximum number of transmit descriptors
0237  *                (packets) transmitted before interrupting the host.
0238  *
0239  *  max_rx_desc=<val> - maximum number of receive descriptors
0240  *                (packets) received before interrupting the host.
0241  *
0242  *  tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
0243  *                increments of the NIC's on board memory to be used for
0244  *                transmit and receive buffers. For the 1MB NIC app. 800KB
0245  *                is available, on the 1/2MB NIC app. 300KB is available.
0246  *                68KB will always be available as a minimum for both
0247  *                directions. The default value is a 50/50 split.
0248  *  dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
0249  *                operations, default (1) is to always disable this as
0250  *                that is what Alteon does on NT. I have not been able
0251  *                to measure any real performance differences with
0252  *                this on my systems. Set <val>=0 if you want to
0253  *                enable these operations.
0254  *
0255  * If you use more than one NIC, specify the parameters for the
0256  * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
0257  * run tracing on NIC #2 but not on NIC #1 and #3.
0258  *
0259  * TODO:
0260  *
0261  * - Proper multicast support.
0262  * - NIC dump support.
0263  * - More tuning parameters.
0264  *
0265  * The mini ring is not used under Linux and I am not sure it makes sense
0266  * to actually use it.
0267  *
0268  * New interrupt handler strategy:
0269  *
0270  * The old interrupt handler worked using the traditional method of
0271  * replacing an skbuff with a new one when a packet arrives. However
0272  * the rx rings do not need to contain a static number of buffer
0273  * descriptors, thus it makes sense to move the memory allocation out
0274  * of the main interrupt handler and do it in a bottom half handler
0275  * and only allocate new buffers when the number of buffers in the
0276  * ring is below a certain threshold. In order to avoid starving the
0277  * NIC under heavy load it is however necessary to force allocation
0278  * when hitting a minimum threshold. The strategy for alloction is as
0279  * follows:
0280  *
0281  *     RX_LOW_BUF_THRES    - allocate buffers in the bottom half
0282  *     RX_PANIC_LOW_THRES  - we are very low on buffers, allocate
0283  *                           the buffers in the interrupt handler
0284  *     RX_RING_THRES       - maximum number of buffers in the rx ring
0285  *     RX_MINI_THRES       - maximum number of buffers in the mini ring
0286  *     RX_JUMBO_THRES      - maximum number of buffers in the jumbo ring
0287  *
0288  * One advantagous side effect of this allocation approach is that the
0289  * entire rx processing can be done without holding any spin lock
0290  * since the rx rings and registers are totally independent of the tx
0291  * ring and its registers.  This of course includes the kmalloc's of
0292  * new skb's. Thus start_xmit can run in parallel with rx processing
0293  * and the memory allocation on SMP systems.
0294  *
0295  * Note that running the skb reallocation in a bottom half opens up
0296  * another can of races which needs to be handled properly. In
0297  * particular it can happen that the interrupt handler tries to run
0298  * the reallocation while the bottom half is either running on another
0299  * CPU or was interrupted on the same CPU. To get around this the
0300  * driver uses bitops to prevent the reallocation routines from being
0301  * reentered.
0302  *
0303  * TX handling can also be done without holding any spin lock, wheee
0304  * this is fun! since tx_ret_csm is only written to by the interrupt
0305  * handler. The case to be aware of is when shutting down the device
0306  * and cleaning up where it is necessary to make sure that
0307  * start_xmit() is not running while this is happening. Well DaveM
0308  * informs me that this case is already protected against ... bye bye
0309  * Mr. Spin Lock, it was nice to know you.
0310  *
0311  * TX interrupts are now partly disabled so the NIC will only generate
0312  * TX interrupts for the number of coal ticks, not for the number of
0313  * TX packets in the queue. This should reduce the number of TX only,
0314  * ie. when no RX processing is done, interrupts seen.
0315  */
0316 
0317 /*
0318  * Threshold values for RX buffer allocation - the low water marks for
0319  * when to start refilling the rings are set to 75% of the ring
0320  * sizes. It seems to make sense to refill the rings entirely from the
0321  * intrrupt handler once it gets below the panic threshold, that way
0322  * we don't risk that the refilling is moved to another CPU when the
0323  * one running the interrupt handler just got the slab code hot in its
0324  * cache.
0325  */
0326 #define RX_RING_SIZE        72
0327 #define RX_MINI_SIZE        64
0328 #define RX_JUMBO_SIZE       48
0329 
0330 #define RX_PANIC_STD_THRES  16
0331 #define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
0332 #define RX_LOW_STD_THRES    (3*RX_RING_SIZE)/4
0333 #define RX_PANIC_MINI_THRES 12
0334 #define RX_PANIC_MINI_REFILL    (3*RX_PANIC_MINI_THRES)/2
0335 #define RX_LOW_MINI_THRES   (3*RX_MINI_SIZE)/4
0336 #define RX_PANIC_JUMBO_THRES    6
0337 #define RX_PANIC_JUMBO_REFILL   (3*RX_PANIC_JUMBO_THRES)/2
0338 #define RX_LOW_JUMBO_THRES  (3*RX_JUMBO_SIZE)/4
0339 
0340 
0341 /*
0342  * Size of the mini ring entries, basically these just should be big
0343  * enough to take TCP ACKs
0344  */
0345 #define ACE_MINI_SIZE       100
0346 
0347 #define ACE_MINI_BUFSIZE    ACE_MINI_SIZE
0348 #define ACE_STD_BUFSIZE     (ACE_STD_MTU + ETH_HLEN + 4)
0349 #define ACE_JUMBO_BUFSIZE   (ACE_JUMBO_MTU + ETH_HLEN + 4)
0350 
0351 /*
0352  * There seems to be a magic difference in the effect between 995 and 996
0353  * but little difference between 900 and 995 ... no idea why.
0354  *
0355  * There is now a default set of tuning parameters which is set, depending
0356  * on whether or not the user enables Jumbo frames. It's assumed that if
0357  * Jumbo frames are enabled, the user wants optimal tuning for that case.
0358  */
0359 #define DEF_TX_COAL     400 /* 996 */
0360 #define DEF_TX_MAX_DESC     60  /* was 40 */
0361 #define DEF_RX_COAL     120 /* 1000 */
0362 #define DEF_RX_MAX_DESC     25
0363 #define DEF_TX_RATIO        21 /* 24 */
0364 
0365 #define DEF_JUMBO_TX_COAL   20
0366 #define DEF_JUMBO_TX_MAX_DESC   60
0367 #define DEF_JUMBO_RX_COAL   30
0368 #define DEF_JUMBO_RX_MAX_DESC   6
0369 #define DEF_JUMBO_TX_RATIO  21
0370 
0371 #if tigon2FwReleaseLocal < 20001118
0372 /*
0373  * Standard firmware and early modifications duplicate
0374  * IRQ load without this flag (coal timer is never reset).
0375  * Note that with this flag tx_coal should be less than
0376  * time to xmit full tx ring.
0377  * 400usec is not so bad for tx ring size of 128.
0378  */
0379 #define TX_COAL_INTS_ONLY   1   /* worth it */
0380 #else
0381 /*
0382  * With modified firmware, this is not necessary, but still useful.
0383  */
0384 #define TX_COAL_INTS_ONLY   1
0385 #endif
0386 
0387 #define DEF_TRACE       0
0388 #define DEF_STAT        (2 * TICKS_PER_SEC)
0389 
0390 
0391 static int link_state[ACE_MAX_MOD_PARMS];
0392 static int trace[ACE_MAX_MOD_PARMS];
0393 static int tx_coal_tick[ACE_MAX_MOD_PARMS];
0394 static int rx_coal_tick[ACE_MAX_MOD_PARMS];
0395 static int max_tx_desc[ACE_MAX_MOD_PARMS];
0396 static int max_rx_desc[ACE_MAX_MOD_PARMS];
0397 static int tx_ratio[ACE_MAX_MOD_PARMS];
0398 static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
0399 
0400 MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
0401 MODULE_LICENSE("GPL");
0402 MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
0403 #ifndef CONFIG_ACENIC_OMIT_TIGON_I
0404 MODULE_FIRMWARE("acenic/tg1.bin");
0405 #endif
0406 MODULE_FIRMWARE("acenic/tg2.bin");
0407 
0408 module_param_array_named(link, link_state, int, NULL, 0);
0409 module_param_array(trace, int, NULL, 0);
0410 module_param_array(tx_coal_tick, int, NULL, 0);
0411 module_param_array(max_tx_desc, int, NULL, 0);
0412 module_param_array(rx_coal_tick, int, NULL, 0);
0413 module_param_array(max_rx_desc, int, NULL, 0);
0414 module_param_array(tx_ratio, int, NULL, 0);
0415 MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
0416 MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
0417 MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
0418 MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
0419 MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
0420 MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
0421 MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
0422 
0423 
0424 static const char version[] =
0425   "acenic.c: v0.92 08/05/2002  Jes Sorensen, linux-acenic@SunSITE.dk\n"
0426   "                            http://home.cern.ch/~jes/gige/acenic.html\n";
0427 
0428 static int ace_get_link_ksettings(struct net_device *,
0429                   struct ethtool_link_ksettings *);
0430 static int ace_set_link_ksettings(struct net_device *,
0431                   const struct ethtool_link_ksettings *);
0432 static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
0433 
0434 static const struct ethtool_ops ace_ethtool_ops = {
0435     .get_drvinfo = ace_get_drvinfo,
0436     .get_link_ksettings = ace_get_link_ksettings,
0437     .set_link_ksettings = ace_set_link_ksettings,
0438 };
0439 
0440 static void ace_watchdog(struct net_device *dev, unsigned int txqueue);
0441 
0442 static const struct net_device_ops ace_netdev_ops = {
0443     .ndo_open       = ace_open,
0444     .ndo_stop       = ace_close,
0445     .ndo_tx_timeout     = ace_watchdog,
0446     .ndo_get_stats      = ace_get_stats,
0447     .ndo_start_xmit     = ace_start_xmit,
0448     .ndo_set_rx_mode    = ace_set_multicast_list,
0449     .ndo_validate_addr  = eth_validate_addr,
0450     .ndo_set_mac_address    = ace_set_mac_addr,
0451     .ndo_change_mtu     = ace_change_mtu,
0452 };
0453 
0454 static int acenic_probe_one(struct pci_dev *pdev,
0455                 const struct pci_device_id *id)
0456 {
0457     struct net_device *dev;
0458     struct ace_private *ap;
0459     static int boards_found;
0460 
0461     dev = alloc_etherdev(sizeof(struct ace_private));
0462     if (dev == NULL)
0463         return -ENOMEM;
0464 
0465     SET_NETDEV_DEV(dev, &pdev->dev);
0466 
0467     ap = netdev_priv(dev);
0468     ap->ndev = dev;
0469     ap->pdev = pdev;
0470     ap->name = pci_name(pdev);
0471 
0472     dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
0473     dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
0474 
0475     dev->watchdog_timeo = 5*HZ;
0476     dev->min_mtu = 0;
0477     dev->max_mtu = ACE_JUMBO_MTU;
0478 
0479     dev->netdev_ops = &ace_netdev_ops;
0480     dev->ethtool_ops = &ace_ethtool_ops;
0481 
0482     /* we only display this string ONCE */
0483     if (!boards_found)
0484         printk(version);
0485 
0486     if (pci_enable_device(pdev))
0487         goto fail_free_netdev;
0488 
0489     /*
0490      * Enable master mode before we start playing with the
0491      * pci_command word since pci_set_master() will modify
0492      * it.
0493      */
0494     pci_set_master(pdev);
0495 
0496     pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
0497 
0498     /* OpenFirmware on Mac's does not set this - DOH.. */
0499     if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
0500         printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
0501                "access - was not enabled by BIOS/Firmware\n",
0502                ap->name);
0503         ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
0504         pci_write_config_word(ap->pdev, PCI_COMMAND,
0505                       ap->pci_command);
0506         wmb();
0507     }
0508 
0509     pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
0510     if (ap->pci_latency <= 0x40) {
0511         ap->pci_latency = 0x40;
0512         pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
0513     }
0514 
0515     /*
0516      * Remap the regs into kernel space - this is abuse of
0517      * dev->base_addr since it was means for I/O port
0518      * addresses but who gives a damn.
0519      */
0520     dev->base_addr = pci_resource_start(pdev, 0);
0521     ap->regs = ioremap(dev->base_addr, 0x4000);
0522     if (!ap->regs) {
0523         printk(KERN_ERR "%s:  Unable to map I/O register, "
0524                "AceNIC %i will be disabled.\n",
0525                ap->name, boards_found);
0526         goto fail_free_netdev;
0527     }
0528 
0529     switch(pdev->vendor) {
0530     case PCI_VENDOR_ID_ALTEON:
0531         if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
0532             printk(KERN_INFO "%s: Farallon PN9100-T ",
0533                    ap->name);
0534         } else {
0535             printk(KERN_INFO "%s: Alteon AceNIC ",
0536                    ap->name);
0537         }
0538         break;
0539     case PCI_VENDOR_ID_3COM:
0540         printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
0541         break;
0542     case PCI_VENDOR_ID_NETGEAR:
0543         printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
0544         break;
0545     case PCI_VENDOR_ID_DEC:
0546         if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
0547             printk(KERN_INFO "%s: Farallon PN9000-SX ",
0548                    ap->name);
0549             break;
0550         }
0551         fallthrough;
0552     case PCI_VENDOR_ID_SGI:
0553         printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
0554         break;
0555     default:
0556         printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
0557         break;
0558     }
0559 
0560     printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
0561     printk("irq %d\n", pdev->irq);
0562 
0563 #ifdef CONFIG_ACENIC_OMIT_TIGON_I
0564     if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
0565         printk(KERN_ERR "%s: Driver compiled without Tigon I"
0566                " support - NIC disabled\n", dev->name);
0567         goto fail_uninit;
0568     }
0569 #endif
0570 
0571     if (ace_allocate_descriptors(dev))
0572         goto fail_free_netdev;
0573 
0574 #ifdef MODULE
0575     if (boards_found >= ACE_MAX_MOD_PARMS)
0576         ap->board_idx = BOARD_IDX_OVERFLOW;
0577     else
0578         ap->board_idx = boards_found;
0579 #else
0580     ap->board_idx = BOARD_IDX_STATIC;
0581 #endif
0582 
0583     if (ace_init(dev))
0584         goto fail_free_netdev;
0585 
0586     if (register_netdev(dev)) {
0587         printk(KERN_ERR "acenic: device registration failed\n");
0588         goto fail_uninit;
0589     }
0590     ap->name = dev->name;
0591 
0592     dev->features |= NETIF_F_HIGHDMA;
0593 
0594     pci_set_drvdata(pdev, dev);
0595 
0596     boards_found++;
0597     return 0;
0598 
0599  fail_uninit:
0600     ace_init_cleanup(dev);
0601  fail_free_netdev:
0602     free_netdev(dev);
0603     return -ENODEV;
0604 }
0605 
0606 static void acenic_remove_one(struct pci_dev *pdev)
0607 {
0608     struct net_device *dev = pci_get_drvdata(pdev);
0609     struct ace_private *ap = netdev_priv(dev);
0610     struct ace_regs __iomem *regs = ap->regs;
0611     short i;
0612 
0613     unregister_netdev(dev);
0614 
0615     writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
0616     if (ap->version >= 2)
0617         writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
0618 
0619     /*
0620      * This clears any pending interrupts
0621      */
0622     writel(1, &regs->Mb0Lo);
0623     readl(&regs->CpuCtrl);  /* flush */
0624 
0625     /*
0626      * Make sure no other CPUs are processing interrupts
0627      * on the card before the buffers are being released.
0628      * Otherwise one might experience some `interesting'
0629      * effects.
0630      *
0631      * Then release the RX buffers - jumbo buffers were
0632      * already released in ace_close().
0633      */
0634     ace_sync_irq(dev->irq);
0635 
0636     for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
0637         struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
0638 
0639         if (skb) {
0640             struct ring_info *ringp;
0641             dma_addr_t mapping;
0642 
0643             ringp = &ap->skb->rx_std_skbuff[i];
0644             mapping = dma_unmap_addr(ringp, mapping);
0645             dma_unmap_page(&ap->pdev->dev, mapping,
0646                        ACE_STD_BUFSIZE, DMA_FROM_DEVICE);
0647 
0648             ap->rx_std_ring[i].size = 0;
0649             ap->skb->rx_std_skbuff[i].skb = NULL;
0650             dev_kfree_skb(skb);
0651         }
0652     }
0653 
0654     if (ap->version >= 2) {
0655         for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
0656             struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
0657 
0658             if (skb) {
0659                 struct ring_info *ringp;
0660                 dma_addr_t mapping;
0661 
0662                 ringp = &ap->skb->rx_mini_skbuff[i];
0663                 mapping = dma_unmap_addr(ringp,mapping);
0664                 dma_unmap_page(&ap->pdev->dev, mapping,
0665                            ACE_MINI_BUFSIZE,
0666                            DMA_FROM_DEVICE);
0667 
0668                 ap->rx_mini_ring[i].size = 0;
0669                 ap->skb->rx_mini_skbuff[i].skb = NULL;
0670                 dev_kfree_skb(skb);
0671             }
0672         }
0673     }
0674 
0675     for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
0676         struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
0677         if (skb) {
0678             struct ring_info *ringp;
0679             dma_addr_t mapping;
0680 
0681             ringp = &ap->skb->rx_jumbo_skbuff[i];
0682             mapping = dma_unmap_addr(ringp, mapping);
0683             dma_unmap_page(&ap->pdev->dev, mapping,
0684                        ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
0685 
0686             ap->rx_jumbo_ring[i].size = 0;
0687             ap->skb->rx_jumbo_skbuff[i].skb = NULL;
0688             dev_kfree_skb(skb);
0689         }
0690     }
0691 
0692     ace_init_cleanup(dev);
0693     free_netdev(dev);
0694 }
0695 
0696 static struct pci_driver acenic_pci_driver = {
0697     .name       = "acenic",
0698     .id_table   = acenic_pci_tbl,
0699     .probe      = acenic_probe_one,
0700     .remove     = acenic_remove_one,
0701 };
0702 
0703 static void ace_free_descriptors(struct net_device *dev)
0704 {
0705     struct ace_private *ap = netdev_priv(dev);
0706     int size;
0707 
0708     if (ap->rx_std_ring != NULL) {
0709         size = (sizeof(struct rx_desc) *
0710             (RX_STD_RING_ENTRIES +
0711              RX_JUMBO_RING_ENTRIES +
0712              RX_MINI_RING_ENTRIES +
0713              RX_RETURN_RING_ENTRIES));
0714         dma_free_coherent(&ap->pdev->dev, size, ap->rx_std_ring,
0715                   ap->rx_ring_base_dma);
0716         ap->rx_std_ring = NULL;
0717         ap->rx_jumbo_ring = NULL;
0718         ap->rx_mini_ring = NULL;
0719         ap->rx_return_ring = NULL;
0720     }
0721     if (ap->evt_ring != NULL) {
0722         size = (sizeof(struct event) * EVT_RING_ENTRIES);
0723         dma_free_coherent(&ap->pdev->dev, size, ap->evt_ring,
0724                   ap->evt_ring_dma);
0725         ap->evt_ring = NULL;
0726     }
0727     if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
0728         size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
0729         dma_free_coherent(&ap->pdev->dev, size, ap->tx_ring,
0730                   ap->tx_ring_dma);
0731     }
0732     ap->tx_ring = NULL;
0733 
0734     if (ap->evt_prd != NULL) {
0735         dma_free_coherent(&ap->pdev->dev, sizeof(u32),
0736                   (void *)ap->evt_prd, ap->evt_prd_dma);
0737         ap->evt_prd = NULL;
0738     }
0739     if (ap->rx_ret_prd != NULL) {
0740         dma_free_coherent(&ap->pdev->dev, sizeof(u32),
0741                   (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma);
0742         ap->rx_ret_prd = NULL;
0743     }
0744     if (ap->tx_csm != NULL) {
0745         dma_free_coherent(&ap->pdev->dev, sizeof(u32),
0746                   (void *)ap->tx_csm, ap->tx_csm_dma);
0747         ap->tx_csm = NULL;
0748     }
0749 }
0750 
0751 
0752 static int ace_allocate_descriptors(struct net_device *dev)
0753 {
0754     struct ace_private *ap = netdev_priv(dev);
0755     int size;
0756 
0757     size = (sizeof(struct rx_desc) *
0758         (RX_STD_RING_ENTRIES +
0759          RX_JUMBO_RING_ENTRIES +
0760          RX_MINI_RING_ENTRIES +
0761          RX_RETURN_RING_ENTRIES));
0762 
0763     ap->rx_std_ring = dma_alloc_coherent(&ap->pdev->dev, size,
0764                          &ap->rx_ring_base_dma, GFP_KERNEL);
0765     if (ap->rx_std_ring == NULL)
0766         goto fail;
0767 
0768     ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
0769     ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
0770     ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
0771 
0772     size = (sizeof(struct event) * EVT_RING_ENTRIES);
0773 
0774     ap->evt_ring = dma_alloc_coherent(&ap->pdev->dev, size,
0775                       &ap->evt_ring_dma, GFP_KERNEL);
0776 
0777     if (ap->evt_ring == NULL)
0778         goto fail;
0779 
0780     /*
0781      * Only allocate a host TX ring for the Tigon II, the Tigon I
0782      * has to use PCI registers for this ;-(
0783      */
0784     if (!ACE_IS_TIGON_I(ap)) {
0785         size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
0786 
0787         ap->tx_ring = dma_alloc_coherent(&ap->pdev->dev, size,
0788                          &ap->tx_ring_dma, GFP_KERNEL);
0789 
0790         if (ap->tx_ring == NULL)
0791             goto fail;
0792     }
0793 
0794     ap->evt_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
0795                      &ap->evt_prd_dma, GFP_KERNEL);
0796     if (ap->evt_prd == NULL)
0797         goto fail;
0798 
0799     ap->rx_ret_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
0800                         &ap->rx_ret_prd_dma, GFP_KERNEL);
0801     if (ap->rx_ret_prd == NULL)
0802         goto fail;
0803 
0804     ap->tx_csm = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
0805                     &ap->tx_csm_dma, GFP_KERNEL);
0806     if (ap->tx_csm == NULL)
0807         goto fail;
0808 
0809     return 0;
0810 
0811 fail:
0812     /* Clean up. */
0813     ace_init_cleanup(dev);
0814     return 1;
0815 }
0816 
0817 
0818 /*
0819  * Generic cleanup handling data allocated during init. Used when the
0820  * module is unloaded or if an error occurs during initialization
0821  */
0822 static void ace_init_cleanup(struct net_device *dev)
0823 {
0824     struct ace_private *ap;
0825 
0826     ap = netdev_priv(dev);
0827 
0828     ace_free_descriptors(dev);
0829 
0830     if (ap->info)
0831         dma_free_coherent(&ap->pdev->dev, sizeof(struct ace_info),
0832                   ap->info, ap->info_dma);
0833     kfree(ap->skb);
0834     kfree(ap->trace_buf);
0835 
0836     if (dev->irq)
0837         free_irq(dev->irq, dev);
0838 
0839     iounmap(ap->regs);
0840 }
0841 
0842 
0843 /*
0844  * Commands are considered to be slow.
0845  */
0846 static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
0847 {
0848     u32 idx;
0849 
0850     idx = readl(&regs->CmdPrd);
0851 
0852     writel(*(u32 *)(cmd), &regs->CmdRng[idx]);
0853     idx = (idx + 1) % CMD_RING_ENTRIES;
0854 
0855     writel(idx, &regs->CmdPrd);
0856 }
0857 
0858 
0859 static int ace_init(struct net_device *dev)
0860 {
0861     struct ace_private *ap;
0862     struct ace_regs __iomem *regs;
0863     struct ace_info *info = NULL;
0864     struct pci_dev *pdev;
0865     unsigned long myjif;
0866     u64 tmp_ptr;
0867     u32 tig_ver, mac1, mac2, tmp, pci_state;
0868     int board_idx, ecode = 0;
0869     short i;
0870     unsigned char cache_size;
0871     u8 addr[ETH_ALEN];
0872 
0873     ap = netdev_priv(dev);
0874     regs = ap->regs;
0875 
0876     board_idx = ap->board_idx;
0877 
0878     /*
0879      * aman@sgi.com - its useful to do a NIC reset here to
0880      * address the `Firmware not running' problem subsequent
0881      * to any crashes involving the NIC
0882      */
0883     writel(HW_RESET | (HW_RESET << 24), &regs->HostCtrl);
0884     readl(&regs->HostCtrl);     /* PCI write posting */
0885     udelay(5);
0886 
0887     /*
0888      * Don't access any other registers before this point!
0889      */
0890 #ifdef __BIG_ENDIAN
0891     /*
0892      * This will most likely need BYTE_SWAP once we switch
0893      * to using __raw_writel()
0894      */
0895     writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
0896            &regs->HostCtrl);
0897 #else
0898     writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
0899            &regs->HostCtrl);
0900 #endif
0901     readl(&regs->HostCtrl);     /* PCI write posting */
0902 
0903     /*
0904      * Stop the NIC CPU and clear pending interrupts
0905      */
0906     writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
0907     readl(&regs->CpuCtrl);      /* PCI write posting */
0908     writel(0, &regs->Mb0Lo);
0909 
0910     tig_ver = readl(&regs->HostCtrl) >> 28;
0911 
0912     switch(tig_ver){
0913 #ifndef CONFIG_ACENIC_OMIT_TIGON_I
0914     case 4:
0915     case 5:
0916         printk(KERN_INFO "  Tigon I  (Rev. %i), Firmware: %i.%i.%i, ",
0917                tig_ver, ap->firmware_major, ap->firmware_minor,
0918                ap->firmware_fix);
0919         writel(0, &regs->LocalCtrl);
0920         ap->version = 1;
0921         ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
0922         break;
0923 #endif
0924     case 6:
0925         printk(KERN_INFO "  Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
0926                tig_ver, ap->firmware_major, ap->firmware_minor,
0927                ap->firmware_fix);
0928         writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
0929         readl(&regs->CpuBCtrl);     /* PCI write posting */
0930         /*
0931          * The SRAM bank size does _not_ indicate the amount
0932          * of memory on the card, it controls the _bank_ size!
0933          * Ie. a 1MB AceNIC will have two banks of 512KB.
0934          */
0935         writel(SRAM_BANK_512K, &regs->LocalCtrl);
0936         writel(SYNC_SRAM_TIMING, &regs->MiscCfg);
0937         ap->version = 2;
0938         ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
0939         break;
0940     default:
0941         printk(KERN_WARNING "  Unsupported Tigon version detected "
0942                "(%i)\n", tig_ver);
0943         ecode = -ENODEV;
0944         goto init_error;
0945     }
0946 
0947     /*
0948      * ModeStat _must_ be set after the SRAM settings as this change
0949      * seems to corrupt the ModeStat and possible other registers.
0950      * The SRAM settings survive resets and setting it to the same
0951      * value a second time works as well. This is what caused the
0952      * `Firmware not running' problem on the Tigon II.
0953      */
0954 #ifdef __BIG_ENDIAN
0955     writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
0956            ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
0957 #else
0958     writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
0959            ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
0960 #endif
0961     readl(&regs->ModeStat);     /* PCI write posting */
0962 
0963     mac1 = 0;
0964     for(i = 0; i < 4; i++) {
0965         int t;
0966 
0967         mac1 = mac1 << 8;
0968         t = read_eeprom_byte(dev, 0x8c+i);
0969         if (t < 0) {
0970             ecode = -EIO;
0971             goto init_error;
0972         } else
0973             mac1 |= (t & 0xff);
0974     }
0975     mac2 = 0;
0976     for(i = 4; i < 8; i++) {
0977         int t;
0978 
0979         mac2 = mac2 << 8;
0980         t = read_eeprom_byte(dev, 0x8c+i);
0981         if (t < 0) {
0982             ecode = -EIO;
0983             goto init_error;
0984         } else
0985             mac2 |= (t & 0xff);
0986     }
0987 
0988     writel(mac1, &regs->MacAddrHi);
0989     writel(mac2, &regs->MacAddrLo);
0990 
0991     addr[0] = (mac1 >> 8) & 0xff;
0992     addr[1] = mac1 & 0xff;
0993     addr[2] = (mac2 >> 24) & 0xff;
0994     addr[3] = (mac2 >> 16) & 0xff;
0995     addr[4] = (mac2 >> 8) & 0xff;
0996     addr[5] = mac2 & 0xff;
0997     eth_hw_addr_set(dev, addr);
0998 
0999     printk("MAC: %pM\n", dev->dev_addr);
1000 
1001     /*
1002      * Looks like this is necessary to deal with on all architectures,
1003      * even this %$#%$# N440BX Intel based thing doesn't get it right.
1004      * Ie. having two NICs in the machine, one will have the cache
1005      * line set at boot time, the other will not.
1006      */
1007     pdev = ap->pdev;
1008     pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
1009     cache_size <<= 2;
1010     if (cache_size != SMP_CACHE_BYTES) {
1011         printk(KERN_INFO "  PCI cache line size set incorrectly "
1012                "(%i bytes) by BIOS/FW, ", cache_size);
1013         if (cache_size > SMP_CACHE_BYTES)
1014             printk("expecting %i\n", SMP_CACHE_BYTES);
1015         else {
1016             printk("correcting to %i\n", SMP_CACHE_BYTES);
1017             pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1018                           SMP_CACHE_BYTES >> 2);
1019         }
1020     }
1021 
1022     pci_state = readl(&regs->PciState);
1023     printk(KERN_INFO "  PCI bus width: %i bits, speed: %iMHz, "
1024            "latency: %i clks\n",
1025             (pci_state & PCI_32BIT) ? 32 : 64,
1026         (pci_state & PCI_66MHZ) ? 66 : 33,
1027         ap->pci_latency);
1028 
1029     /*
1030      * Set the max DMA transfer size. Seems that for most systems
1031      * the performance is better when no MAX parameter is
1032      * set. However for systems enabling PCI write and invalidate,
1033      * DMA writes must be set to the L1 cache line size to get
1034      * optimal performance.
1035      *
1036      * The default is now to turn the PCI write and invalidate off
1037      * - that is what Alteon does for NT.
1038      */
1039     tmp = READ_CMD_MEM | WRITE_CMD_MEM;
1040     if (ap->version >= 2) {
1041         tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
1042         /*
1043          * Tuning parameters only supported for 8 cards
1044          */
1045         if (board_idx == BOARD_IDX_OVERFLOW ||
1046             dis_pci_mem_inval[board_idx]) {
1047             if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1048                 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1049                 pci_write_config_word(pdev, PCI_COMMAND,
1050                               ap->pci_command);
1051                 printk(KERN_INFO "  Disabling PCI memory "
1052                        "write and invalidate\n");
1053             }
1054         } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
1055             printk(KERN_INFO "  PCI memory write & invalidate "
1056                    "enabled by BIOS, enabling counter measures\n");
1057 
1058             switch(SMP_CACHE_BYTES) {
1059             case 16:
1060                 tmp |= DMA_WRITE_MAX_16;
1061                 break;
1062             case 32:
1063                 tmp |= DMA_WRITE_MAX_32;
1064                 break;
1065             case 64:
1066                 tmp |= DMA_WRITE_MAX_64;
1067                 break;
1068             case 128:
1069                 tmp |= DMA_WRITE_MAX_128;
1070                 break;
1071             default:
1072                 printk(KERN_INFO "  Cache line size %i not "
1073                        "supported, PCI write and invalidate "
1074                        "disabled\n", SMP_CACHE_BYTES);
1075                 ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
1076                 pci_write_config_word(pdev, PCI_COMMAND,
1077                               ap->pci_command);
1078             }
1079         }
1080     }
1081 
1082 #ifdef __sparc__
1083     /*
1084      * On this platform, we know what the best dma settings
1085      * are.  We use 64-byte maximum bursts, because if we
1086      * burst larger than the cache line size (or even cross
1087      * a 64byte boundary in a single burst) the UltraSparc
1088      * PCI controller will disconnect at 64-byte multiples.
1089      *
1090      * Read-multiple will be properly enabled above, and when
1091      * set will give the PCI controller proper hints about
1092      * prefetching.
1093      */
1094     tmp &= ~DMA_READ_WRITE_MASK;
1095     tmp |= DMA_READ_MAX_64;
1096     tmp |= DMA_WRITE_MAX_64;
1097 #endif
1098 #ifdef __alpha__
1099     tmp &= ~DMA_READ_WRITE_MASK;
1100     tmp |= DMA_READ_MAX_128;
1101     /*
1102      * All the docs say MUST NOT. Well, I did.
1103      * Nothing terrible happens, if we load wrong size.
1104      * Bit w&i still works better!
1105      */
1106     tmp |= DMA_WRITE_MAX_128;
1107 #endif
1108     writel(tmp, &regs->PciState);
1109 
1110 #if 0
1111     /*
1112      * The Host PCI bus controller driver has to set FBB.
1113      * If all devices on that PCI bus support FBB, then the controller
1114      * can enable FBB support in the Host PCI Bus controller (or on
1115      * the PCI-PCI bridge if that applies).
1116      * -ggg
1117      */
1118     /*
1119      * I have received reports from people having problems when this
1120      * bit is enabled.
1121      */
1122     if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
1123         printk(KERN_INFO "  Enabling PCI Fast Back to Back\n");
1124         ap->pci_command |= PCI_COMMAND_FAST_BACK;
1125         pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
1126     }
1127 #endif
1128 
1129     /*
1130      * Configure DMA attributes.
1131      */
1132     if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1133         ecode = -ENODEV;
1134         goto init_error;
1135     }
1136 
1137     /*
1138      * Initialize the generic info block and the command+event rings
1139      * and the control blocks for the transmit and receive rings
1140      * as they need to be setup once and for all.
1141      */
1142     if (!(info = dma_alloc_coherent(&ap->pdev->dev, sizeof(struct ace_info),
1143                     &ap->info_dma, GFP_KERNEL))) {
1144         ecode = -EAGAIN;
1145         goto init_error;
1146     }
1147     ap->info = info;
1148 
1149     /*
1150      * Get the memory for the skb rings.
1151      */
1152     if (!(ap->skb = kzalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
1153         ecode = -EAGAIN;
1154         goto init_error;
1155     }
1156 
1157     ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
1158                 DRV_NAME, dev);
1159     if (ecode) {
1160         printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1161                DRV_NAME, pdev->irq);
1162         goto init_error;
1163     } else
1164         dev->irq = pdev->irq;
1165 
1166 #ifdef INDEX_DEBUG
1167     spin_lock_init(&ap->debug_lock);
1168     ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
1169     ap->last_std_rx = 0;
1170     ap->last_mini_rx = 0;
1171 #endif
1172 
1173     ecode = ace_load_firmware(dev);
1174     if (ecode)
1175         goto init_error;
1176 
1177     ap->fw_running = 0;
1178 
1179     tmp_ptr = ap->info_dma;
1180     writel(tmp_ptr >> 32, &regs->InfoPtrHi);
1181     writel(tmp_ptr & 0xffffffff, &regs->InfoPtrLo);
1182 
1183     memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
1184 
1185     set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
1186     info->evt_ctrl.flags = 0;
1187 
1188     *(ap->evt_prd) = 0;
1189     wmb();
1190     set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
1191     writel(0, &regs->EvtCsm);
1192 
1193     set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
1194     info->cmd_ctrl.flags = 0;
1195     info->cmd_ctrl.max_len = 0;
1196 
1197     for (i = 0; i < CMD_RING_ENTRIES; i++)
1198         writel(0, &regs->CmdRng[i]);
1199 
1200     writel(0, &regs->CmdPrd);
1201     writel(0, &regs->CmdCsm);
1202 
1203     tmp_ptr = ap->info_dma;
1204     tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
1205     set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
1206 
1207     set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
1208     info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
1209     info->rx_std_ctrl.flags =
1210       RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1211 
1212     memset(ap->rx_std_ring, 0,
1213            RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
1214 
1215     for (i = 0; i < RX_STD_RING_ENTRIES; i++)
1216         ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
1217 
1218     ap->rx_std_skbprd = 0;
1219     atomic_set(&ap->cur_rx_bufs, 0);
1220 
1221     set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
1222             (ap->rx_ring_base_dma +
1223              (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
1224     info->rx_jumbo_ctrl.max_len = 0;
1225     info->rx_jumbo_ctrl.flags =
1226       RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1227 
1228     memset(ap->rx_jumbo_ring, 0,
1229            RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
1230 
1231     for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
1232         ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
1233 
1234     ap->rx_jumbo_skbprd = 0;
1235     atomic_set(&ap->cur_jumbo_bufs, 0);
1236 
1237     memset(ap->rx_mini_ring, 0,
1238            RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
1239 
1240     if (ap->version >= 2) {
1241         set_aceaddr(&info->rx_mini_ctrl.rngptr,
1242                 (ap->rx_ring_base_dma +
1243                  (sizeof(struct rx_desc) *
1244                   (RX_STD_RING_ENTRIES +
1245                    RX_JUMBO_RING_ENTRIES))));
1246         info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
1247         info->rx_mini_ctrl.flags =
1248           RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
1249 
1250         for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
1251             ap->rx_mini_ring[i].flags =
1252                 BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
1253     } else {
1254         set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
1255         info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
1256         info->rx_mini_ctrl.max_len = 0;
1257     }
1258 
1259     ap->rx_mini_skbprd = 0;
1260     atomic_set(&ap->cur_mini_bufs, 0);
1261 
1262     set_aceaddr(&info->rx_return_ctrl.rngptr,
1263             (ap->rx_ring_base_dma +
1264              (sizeof(struct rx_desc) *
1265               (RX_STD_RING_ENTRIES +
1266                RX_JUMBO_RING_ENTRIES +
1267                RX_MINI_RING_ENTRIES))));
1268     info->rx_return_ctrl.flags = 0;
1269     info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
1270 
1271     memset(ap->rx_return_ring, 0,
1272            RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
1273 
1274     set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
1275     *(ap->rx_ret_prd) = 0;
1276 
1277     writel(TX_RING_BASE, &regs->WinBase);
1278 
1279     if (ACE_IS_TIGON_I(ap)) {
1280         ap->tx_ring = (__force struct tx_desc *) regs->Window;
1281         for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
1282                  * sizeof(struct tx_desc)) / sizeof(u32); i++)
1283             writel(0, (__force void __iomem *)ap->tx_ring  + i * 4);
1284 
1285         set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
1286     } else {
1287         memset(ap->tx_ring, 0,
1288                MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
1289 
1290         set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
1291     }
1292 
1293     info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
1294     tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
1295 
1296     /*
1297      * The Tigon I does not like having the TX ring in host memory ;-(
1298      */
1299     if (!ACE_IS_TIGON_I(ap))
1300         tmp |= RCB_FLG_TX_HOST_RING;
1301 #if TX_COAL_INTS_ONLY
1302     tmp |= RCB_FLG_COAL_INT_ONLY;
1303 #endif
1304     info->tx_ctrl.flags = tmp;
1305 
1306     set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
1307 
1308     /*
1309      * Potential item for tuning parameter
1310      */
1311 #if 0 /* NO */
1312     writel(DMA_THRESH_16W, &regs->DmaReadCfg);
1313     writel(DMA_THRESH_16W, &regs->DmaWriteCfg);
1314 #else
1315     writel(DMA_THRESH_8W, &regs->DmaReadCfg);
1316     writel(DMA_THRESH_8W, &regs->DmaWriteCfg);
1317 #endif
1318 
1319     writel(0, &regs->MaskInt);
1320     writel(1, &regs->IfIdx);
1321 #if 0
1322     /*
1323      * McKinley boxes do not like us fiddling with AssistState
1324      * this early
1325      */
1326     writel(1, &regs->AssistState);
1327 #endif
1328 
1329     writel(DEF_STAT, &regs->TuneStatTicks);
1330     writel(DEF_TRACE, &regs->TuneTrace);
1331 
1332     ace_set_rxtx_parms(dev, 0);
1333 
1334     if (board_idx == BOARD_IDX_OVERFLOW) {
1335         printk(KERN_WARNING "%s: more than %i NICs detected, "
1336                "ignoring module parameters!\n",
1337                ap->name, ACE_MAX_MOD_PARMS);
1338     } else if (board_idx >= 0) {
1339         if (tx_coal_tick[board_idx])
1340             writel(tx_coal_tick[board_idx],
1341                    &regs->TuneTxCoalTicks);
1342         if (max_tx_desc[board_idx])
1343             writel(max_tx_desc[board_idx], &regs->TuneMaxTxDesc);
1344 
1345         if (rx_coal_tick[board_idx])
1346             writel(rx_coal_tick[board_idx],
1347                    &regs->TuneRxCoalTicks);
1348         if (max_rx_desc[board_idx])
1349             writel(max_rx_desc[board_idx], &regs->TuneMaxRxDesc);
1350 
1351         if (trace[board_idx])
1352             writel(trace[board_idx], &regs->TuneTrace);
1353 
1354         if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
1355             writel(tx_ratio[board_idx], &regs->TxBufRat);
1356     }
1357 
1358     /*
1359      * Default link parameters
1360      */
1361     tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
1362         LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
1363     if(ap->version >= 2)
1364         tmp |= LNK_TX_FLOW_CTL_Y;
1365 
1366     /*
1367      * Override link default parameters
1368      */
1369     if ((board_idx >= 0) && link_state[board_idx]) {
1370         int option = link_state[board_idx];
1371 
1372         tmp = LNK_ENABLE;
1373 
1374         if (option & 0x01) {
1375             printk(KERN_INFO "%s: Setting half duplex link\n",
1376                    ap->name);
1377             tmp &= ~LNK_FULL_DUPLEX;
1378         }
1379         if (option & 0x02)
1380             tmp &= ~LNK_NEGOTIATE;
1381         if (option & 0x10)
1382             tmp |= LNK_10MB;
1383         if (option & 0x20)
1384             tmp |= LNK_100MB;
1385         if (option & 0x40)
1386             tmp |= LNK_1000MB;
1387         if ((option & 0x70) == 0) {
1388             printk(KERN_WARNING "%s: No media speed specified, "
1389                    "forcing auto negotiation\n", ap->name);
1390             tmp |= LNK_NEGOTIATE | LNK_1000MB |
1391                 LNK_100MB | LNK_10MB;
1392         }
1393         if ((option & 0x100) == 0)
1394             tmp |= LNK_NEG_FCTL;
1395         else
1396             printk(KERN_INFO "%s: Disabling flow control "
1397                    "negotiation\n", ap->name);
1398         if (option & 0x200)
1399             tmp |= LNK_RX_FLOW_CTL_Y;
1400         if ((option & 0x400) && (ap->version >= 2)) {
1401             printk(KERN_INFO "%s: Enabling TX flow control\n",
1402                    ap->name);
1403             tmp |= LNK_TX_FLOW_CTL_Y;
1404         }
1405     }
1406 
1407     ap->link = tmp;
1408     writel(tmp, &regs->TuneLink);
1409     if (ap->version >= 2)
1410         writel(tmp, &regs->TuneFastLink);
1411 
1412     writel(ap->firmware_start, &regs->Pc);
1413 
1414     writel(0, &regs->Mb0Lo);
1415 
1416     /*
1417      * Set tx_csm before we start receiving interrupts, otherwise
1418      * the interrupt handler might think it is supposed to process
1419      * tx ints before we are up and running, which may cause a null
1420      * pointer access in the int handler.
1421      */
1422     ap->cur_rx = 0;
1423     ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
1424 
1425     wmb();
1426     ace_set_txprd(regs, ap, 0);
1427     writel(0, &regs->RxRetCsm);
1428 
1429     /*
1430      * Enable DMA engine now.
1431      * If we do this sooner, Mckinley box pukes.
1432      * I assume it's because Tigon II DMA engine wants to check
1433      * *something* even before the CPU is started.
1434      */
1435     writel(1, &regs->AssistState);  /* enable DMA */
1436 
1437     /*
1438      * Start the NIC CPU
1439      */
1440     writel(readl(&regs->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), &regs->CpuCtrl);
1441     readl(&regs->CpuCtrl);
1442 
1443     /*
1444      * Wait for the firmware to spin up - max 3 seconds.
1445      */
1446     myjif = jiffies + 3 * HZ;
1447     while (time_before(jiffies, myjif) && !ap->fw_running)
1448         cpu_relax();
1449 
1450     if (!ap->fw_running) {
1451         printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
1452 
1453         ace_dump_trace(ap);
1454         writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
1455         readl(&regs->CpuCtrl);
1456 
1457         /* aman@sgi.com - account for badly behaving firmware/NIC:
1458          * - have observed that the NIC may continue to generate
1459          *   interrupts for some reason; attempt to stop it - halt
1460          *   second CPU for Tigon II cards, and also clear Mb0
1461          * - if we're a module, we'll fail to load if this was
1462          *   the only GbE card in the system => if the kernel does
1463          *   see an interrupt from the NIC, code to handle it is
1464          *   gone and OOps! - so free_irq also
1465          */
1466         if (ap->version >= 2)
1467             writel(readl(&regs->CpuBCtrl) | CPU_HALT,
1468                    &regs->CpuBCtrl);
1469         writel(0, &regs->Mb0Lo);
1470         readl(&regs->Mb0Lo);
1471 
1472         ecode = -EBUSY;
1473         goto init_error;
1474     }
1475 
1476     /*
1477      * We load the ring here as there seem to be no way to tell the
1478      * firmware to wipe the ring without re-initializing it.
1479      */
1480     if (!test_and_set_bit(0, &ap->std_refill_busy))
1481         ace_load_std_rx_ring(dev, RX_RING_SIZE);
1482     else
1483         printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
1484                ap->name);
1485     if (ap->version >= 2) {
1486         if (!test_and_set_bit(0, &ap->mini_refill_busy))
1487             ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
1488         else
1489             printk(KERN_ERR "%s: Someone is busy refilling "
1490                    "the RX mini ring\n", ap->name);
1491     }
1492     return 0;
1493 
1494  init_error:
1495     ace_init_cleanup(dev);
1496     return ecode;
1497 }
1498 
1499 
1500 static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
1501 {
1502     struct ace_private *ap = netdev_priv(dev);
1503     struct ace_regs __iomem *regs = ap->regs;
1504     int board_idx = ap->board_idx;
1505 
1506     if (board_idx >= 0) {
1507         if (!jumbo) {
1508             if (!tx_coal_tick[board_idx])
1509                 writel(DEF_TX_COAL, &regs->TuneTxCoalTicks);
1510             if (!max_tx_desc[board_idx])
1511                 writel(DEF_TX_MAX_DESC, &regs->TuneMaxTxDesc);
1512             if (!rx_coal_tick[board_idx])
1513                 writel(DEF_RX_COAL, &regs->TuneRxCoalTicks);
1514             if (!max_rx_desc[board_idx])
1515                 writel(DEF_RX_MAX_DESC, &regs->TuneMaxRxDesc);
1516             if (!tx_ratio[board_idx])
1517                 writel(DEF_TX_RATIO, &regs->TxBufRat);
1518         } else {
1519             if (!tx_coal_tick[board_idx])
1520                 writel(DEF_JUMBO_TX_COAL,
1521                        &regs->TuneTxCoalTicks);
1522             if (!max_tx_desc[board_idx])
1523                 writel(DEF_JUMBO_TX_MAX_DESC,
1524                        &regs->TuneMaxTxDesc);
1525             if (!rx_coal_tick[board_idx])
1526                 writel(DEF_JUMBO_RX_COAL,
1527                        &regs->TuneRxCoalTicks);
1528             if (!max_rx_desc[board_idx])
1529                 writel(DEF_JUMBO_RX_MAX_DESC,
1530                        &regs->TuneMaxRxDesc);
1531             if (!tx_ratio[board_idx])
1532                 writel(DEF_JUMBO_TX_RATIO, &regs->TxBufRat);
1533         }
1534     }
1535 }
1536 
1537 
1538 static void ace_watchdog(struct net_device *data, unsigned int txqueue)
1539 {
1540     struct net_device *dev = data;
1541     struct ace_private *ap = netdev_priv(dev);
1542     struct ace_regs __iomem *regs = ap->regs;
1543 
1544     /*
1545      * We haven't received a stats update event for more than 2.5
1546      * seconds and there is data in the transmit queue, thus we
1547      * assume the card is stuck.
1548      */
1549     if (*ap->tx_csm != ap->tx_ret_csm) {
1550         printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
1551                dev->name, (unsigned int)readl(&regs->HostCtrl));
1552         /* This can happen due to ieee flow control. */
1553     } else {
1554         printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
1555                dev->name);
1556 #if 0
1557         netif_wake_queue(dev);
1558 #endif
1559     }
1560 }
1561 
1562 
1563 static void ace_tasklet(struct tasklet_struct *t)
1564 {
1565     struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
1566     struct net_device *dev = ap->ndev;
1567     int cur_size;
1568 
1569     cur_size = atomic_read(&ap->cur_rx_bufs);
1570     if ((cur_size < RX_LOW_STD_THRES) &&
1571         !test_and_set_bit(0, &ap->std_refill_busy)) {
1572 #ifdef DEBUG
1573         printk("refilling buffers (current %i)\n", cur_size);
1574 #endif
1575         ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
1576     }
1577 
1578     if (ap->version >= 2) {
1579         cur_size = atomic_read(&ap->cur_mini_bufs);
1580         if ((cur_size < RX_LOW_MINI_THRES) &&
1581             !test_and_set_bit(0, &ap->mini_refill_busy)) {
1582 #ifdef DEBUG
1583             printk("refilling mini buffers (current %i)\n",
1584                    cur_size);
1585 #endif
1586             ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
1587         }
1588     }
1589 
1590     cur_size = atomic_read(&ap->cur_jumbo_bufs);
1591     if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
1592         !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
1593 #ifdef DEBUG
1594         printk("refilling jumbo buffers (current %i)\n", cur_size);
1595 #endif
1596         ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
1597     }
1598     ap->tasklet_pending = 0;
1599 }
1600 
1601 
1602 /*
1603  * Copy the contents of the NIC's trace buffer to kernel memory.
1604  */
1605 static void ace_dump_trace(struct ace_private *ap)
1606 {
1607 #if 0
1608     if (!ap->trace_buf)
1609         if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
1610             return;
1611 #endif
1612 }
1613 
1614 
1615 /*
1616  * Load the standard rx ring.
1617  *
1618  * Loading rings is safe without holding the spin lock since this is
1619  * done only before the device is enabled, thus no interrupts are
1620  * generated and by the interrupt handler/tasklet handler.
1621  */
1622 static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
1623 {
1624     struct ace_private *ap = netdev_priv(dev);
1625     struct ace_regs __iomem *regs = ap->regs;
1626     short i, idx;
1627 
1628 
1629     prefetchw(&ap->cur_rx_bufs);
1630 
1631     idx = ap->rx_std_skbprd;
1632 
1633     for (i = 0; i < nr_bufs; i++) {
1634         struct sk_buff *skb;
1635         struct rx_desc *rd;
1636         dma_addr_t mapping;
1637 
1638         skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
1639         if (!skb)
1640             break;
1641 
1642         mapping = dma_map_page(&ap->pdev->dev,
1643                        virt_to_page(skb->data),
1644                        offset_in_page(skb->data),
1645                        ACE_STD_BUFSIZE, DMA_FROM_DEVICE);
1646         ap->skb->rx_std_skbuff[idx].skb = skb;
1647         dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1648                    mapping, mapping);
1649 
1650         rd = &ap->rx_std_ring[idx];
1651         set_aceaddr(&rd->addr, mapping);
1652         rd->size = ACE_STD_BUFSIZE;
1653         rd->idx = idx;
1654         idx = (idx + 1) % RX_STD_RING_ENTRIES;
1655     }
1656 
1657     if (!i)
1658         goto error_out;
1659 
1660     atomic_add(i, &ap->cur_rx_bufs);
1661     ap->rx_std_skbprd = idx;
1662 
1663     if (ACE_IS_TIGON_I(ap)) {
1664         struct cmd cmd;
1665         cmd.evt = C_SET_RX_PRD_IDX;
1666         cmd.code = 0;
1667         cmd.idx = ap->rx_std_skbprd;
1668         ace_issue_cmd(regs, &cmd);
1669     } else {
1670         writel(idx, &regs->RxStdPrd);
1671         wmb();
1672     }
1673 
1674  out:
1675     clear_bit(0, &ap->std_refill_busy);
1676     return;
1677 
1678  error_out:
1679     printk(KERN_INFO "Out of memory when allocating "
1680            "standard receive buffers\n");
1681     goto out;
1682 }
1683 
1684 
1685 static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
1686 {
1687     struct ace_private *ap = netdev_priv(dev);
1688     struct ace_regs __iomem *regs = ap->regs;
1689     short i, idx;
1690 
1691     prefetchw(&ap->cur_mini_bufs);
1692 
1693     idx = ap->rx_mini_skbprd;
1694     for (i = 0; i < nr_bufs; i++) {
1695         struct sk_buff *skb;
1696         struct rx_desc *rd;
1697         dma_addr_t mapping;
1698 
1699         skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
1700         if (!skb)
1701             break;
1702 
1703         mapping = dma_map_page(&ap->pdev->dev,
1704                        virt_to_page(skb->data),
1705                        offset_in_page(skb->data),
1706                        ACE_MINI_BUFSIZE, DMA_FROM_DEVICE);
1707         ap->skb->rx_mini_skbuff[idx].skb = skb;
1708         dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
1709                    mapping, mapping);
1710 
1711         rd = &ap->rx_mini_ring[idx];
1712         set_aceaddr(&rd->addr, mapping);
1713         rd->size = ACE_MINI_BUFSIZE;
1714         rd->idx = idx;
1715         idx = (idx + 1) % RX_MINI_RING_ENTRIES;
1716     }
1717 
1718     if (!i)
1719         goto error_out;
1720 
1721     atomic_add(i, &ap->cur_mini_bufs);
1722 
1723     ap->rx_mini_skbprd = idx;
1724 
1725     writel(idx, &regs->RxMiniPrd);
1726     wmb();
1727 
1728  out:
1729     clear_bit(0, &ap->mini_refill_busy);
1730     return;
1731  error_out:
1732     printk(KERN_INFO "Out of memory when allocating "
1733            "mini receive buffers\n");
1734     goto out;
1735 }
1736 
1737 
1738 /*
1739  * Load the jumbo rx ring, this may happen at any time if the MTU
1740  * is changed to a value > 1500.
1741  */
1742 static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
1743 {
1744     struct ace_private *ap = netdev_priv(dev);
1745     struct ace_regs __iomem *regs = ap->regs;
1746     short i, idx;
1747 
1748     idx = ap->rx_jumbo_skbprd;
1749 
1750     for (i = 0; i < nr_bufs; i++) {
1751         struct sk_buff *skb;
1752         struct rx_desc *rd;
1753         dma_addr_t mapping;
1754 
1755         skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
1756         if (!skb)
1757             break;
1758 
1759         mapping = dma_map_page(&ap->pdev->dev,
1760                        virt_to_page(skb->data),
1761                        offset_in_page(skb->data),
1762                        ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
1763         ap->skb->rx_jumbo_skbuff[idx].skb = skb;
1764         dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
1765                    mapping, mapping);
1766 
1767         rd = &ap->rx_jumbo_ring[idx];
1768         set_aceaddr(&rd->addr, mapping);
1769         rd->size = ACE_JUMBO_BUFSIZE;
1770         rd->idx = idx;
1771         idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
1772     }
1773 
1774     if (!i)
1775         goto error_out;
1776 
1777     atomic_add(i, &ap->cur_jumbo_bufs);
1778     ap->rx_jumbo_skbprd = idx;
1779 
1780     if (ACE_IS_TIGON_I(ap)) {
1781         struct cmd cmd;
1782         cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1783         cmd.code = 0;
1784         cmd.idx = ap->rx_jumbo_skbprd;
1785         ace_issue_cmd(regs, &cmd);
1786     } else {
1787         writel(idx, &regs->RxJumboPrd);
1788         wmb();
1789     }
1790 
1791  out:
1792     clear_bit(0, &ap->jumbo_refill_busy);
1793     return;
1794  error_out:
1795     if (net_ratelimit())
1796         printk(KERN_INFO "Out of memory when allocating "
1797                "jumbo receive buffers\n");
1798     goto out;
1799 }
1800 
1801 
1802 /*
1803  * All events are considered to be slow (RX/TX ints do not generate
1804  * events) and are handled here, outside the main interrupt handler,
1805  * to reduce the size of the handler.
1806  */
1807 static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
1808 {
1809     struct ace_private *ap;
1810 
1811     ap = netdev_priv(dev);
1812 
1813     while (evtcsm != evtprd) {
1814         switch (ap->evt_ring[evtcsm].evt) {
1815         case E_FW_RUNNING:
1816             printk(KERN_INFO "%s: Firmware up and running\n",
1817                    ap->name);
1818             ap->fw_running = 1;
1819             wmb();
1820             break;
1821         case E_STATS_UPDATED:
1822             break;
1823         case E_LNK_STATE:
1824         {
1825             u16 code = ap->evt_ring[evtcsm].code;
1826             switch (code) {
1827             case E_C_LINK_UP:
1828             {
1829                 u32 state = readl(&ap->regs->GigLnkState);
1830                 printk(KERN_WARNING "%s: Optical link UP "
1831                        "(%s Duplex, Flow Control: %s%s)\n",
1832                        ap->name,
1833                        state & LNK_FULL_DUPLEX ? "Full":"Half",
1834                        state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
1835                        state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
1836                 break;
1837             }
1838             case E_C_LINK_DOWN:
1839                 printk(KERN_WARNING "%s: Optical link DOWN\n",
1840                        ap->name);
1841                 break;
1842             case E_C_LINK_10_100:
1843                 printk(KERN_WARNING "%s: 10/100BaseT link "
1844                        "UP\n", ap->name);
1845                 break;
1846             default:
1847                 printk(KERN_ERR "%s: Unknown optical link "
1848                        "state %02x\n", ap->name, code);
1849             }
1850             break;
1851         }
1852         case E_ERROR:
1853             switch(ap->evt_ring[evtcsm].code) {
1854             case E_C_ERR_INVAL_CMD:
1855                 printk(KERN_ERR "%s: invalid command error\n",
1856                        ap->name);
1857                 break;
1858             case E_C_ERR_UNIMP_CMD:
1859                 printk(KERN_ERR "%s: unimplemented command "
1860                        "error\n", ap->name);
1861                 break;
1862             case E_C_ERR_BAD_CFG:
1863                 printk(KERN_ERR "%s: bad config error\n",
1864                        ap->name);
1865                 break;
1866             default:
1867                 printk(KERN_ERR "%s: unknown error %02x\n",
1868                        ap->name, ap->evt_ring[evtcsm].code);
1869             }
1870             break;
1871         case E_RESET_JUMBO_RNG:
1872         {
1873             int i;
1874             for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
1875                 if (ap->skb->rx_jumbo_skbuff[i].skb) {
1876                     ap->rx_jumbo_ring[i].size = 0;
1877                     set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
1878                     dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
1879                     ap->skb->rx_jumbo_skbuff[i].skb = NULL;
1880                 }
1881             }
1882 
1883             if (ACE_IS_TIGON_I(ap)) {
1884                 struct cmd cmd;
1885                 cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
1886                 cmd.code = 0;
1887                 cmd.idx = 0;
1888                 ace_issue_cmd(ap->regs, &cmd);
1889             } else {
1890                 writel(0, &((ap->regs)->RxJumboPrd));
1891                 wmb();
1892             }
1893 
1894             ap->jumbo = 0;
1895             ap->rx_jumbo_skbprd = 0;
1896             printk(KERN_INFO "%s: Jumbo ring flushed\n",
1897                    ap->name);
1898             clear_bit(0, &ap->jumbo_refill_busy);
1899             break;
1900         }
1901         default:
1902             printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
1903                    ap->name, ap->evt_ring[evtcsm].evt);
1904         }
1905         evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
1906     }
1907 
1908     return evtcsm;
1909 }
1910 
1911 
1912 static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
1913 {
1914     struct ace_private *ap = netdev_priv(dev);
1915     u32 idx;
1916     int mini_count = 0, std_count = 0;
1917 
1918     idx = rxretcsm;
1919 
1920     prefetchw(&ap->cur_rx_bufs);
1921     prefetchw(&ap->cur_mini_bufs);
1922 
1923     while (idx != rxretprd) {
1924         struct ring_info *rip;
1925         struct sk_buff *skb;
1926         struct rx_desc *retdesc;
1927         u32 skbidx;
1928         int bd_flags, desc_type, mapsize;
1929         u16 csum;
1930 
1931 
1932         /* make sure the rx descriptor isn't read before rxretprd */
1933         if (idx == rxretcsm)
1934             rmb();
1935 
1936         retdesc = &ap->rx_return_ring[idx];
1937         skbidx = retdesc->idx;
1938         bd_flags = retdesc->flags;
1939         desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
1940 
1941         switch(desc_type) {
1942             /*
1943              * Normal frames do not have any flags set
1944              *
1945              * Mini and normal frames arrive frequently,
1946              * so use a local counter to avoid doing
1947              * atomic operations for each packet arriving.
1948              */
1949         case 0:
1950             rip = &ap->skb->rx_std_skbuff[skbidx];
1951             mapsize = ACE_STD_BUFSIZE;
1952             std_count++;
1953             break;
1954         case BD_FLG_JUMBO:
1955             rip = &ap->skb->rx_jumbo_skbuff[skbidx];
1956             mapsize = ACE_JUMBO_BUFSIZE;
1957             atomic_dec(&ap->cur_jumbo_bufs);
1958             break;
1959         case BD_FLG_MINI:
1960             rip = &ap->skb->rx_mini_skbuff[skbidx];
1961             mapsize = ACE_MINI_BUFSIZE;
1962             mini_count++;
1963             break;
1964         default:
1965             printk(KERN_INFO "%s: unknown frame type (0x%02x) "
1966                    "returned by NIC\n", dev->name,
1967                    retdesc->flags);
1968             goto error;
1969         }
1970 
1971         skb = rip->skb;
1972         rip->skb = NULL;
1973         dma_unmap_page(&ap->pdev->dev, dma_unmap_addr(rip, mapping),
1974                    mapsize, DMA_FROM_DEVICE);
1975         skb_put(skb, retdesc->size);
1976 
1977         /*
1978          * Fly baby, fly!
1979          */
1980         csum = retdesc->tcp_udp_csum;
1981 
1982         skb->protocol = eth_type_trans(skb, dev);
1983 
1984         /*
1985          * Instead of forcing the poor tigon mips cpu to calculate
1986          * pseudo hdr checksum, we do this ourselves.
1987          */
1988         if (bd_flags & BD_FLG_TCP_UDP_SUM) {
1989             skb->csum = htons(csum);
1990             skb->ip_summed = CHECKSUM_COMPLETE;
1991         } else {
1992             skb_checksum_none_assert(skb);
1993         }
1994 
1995         /* send it up */
1996         if ((bd_flags & BD_FLG_VLAN_TAG))
1997             __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
1998         netif_rx(skb);
1999 
2000         dev->stats.rx_packets++;
2001         dev->stats.rx_bytes += retdesc->size;
2002 
2003         idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
2004     }
2005 
2006     atomic_sub(std_count, &ap->cur_rx_bufs);
2007     if (!ACE_IS_TIGON_I(ap))
2008         atomic_sub(mini_count, &ap->cur_mini_bufs);
2009 
2010  out:
2011     /*
2012      * According to the documentation RxRetCsm is obsolete with
2013      * the 12.3.x Firmware - my Tigon I NICs seem to disagree!
2014      */
2015     if (ACE_IS_TIGON_I(ap)) {
2016         writel(idx, &ap->regs->RxRetCsm);
2017     }
2018     ap->cur_rx = idx;
2019 
2020     return;
2021  error:
2022     idx = rxretprd;
2023     goto out;
2024 }
2025 
2026 
2027 static inline void ace_tx_int(struct net_device *dev,
2028                   u32 txcsm, u32 idx)
2029 {
2030     struct ace_private *ap = netdev_priv(dev);
2031 
2032     do {
2033         struct sk_buff *skb;
2034         struct tx_ring_info *info;
2035 
2036         info = ap->skb->tx_skbuff + idx;
2037         skb = info->skb;
2038 
2039         if (dma_unmap_len(info, maplen)) {
2040             dma_unmap_page(&ap->pdev->dev,
2041                        dma_unmap_addr(info, mapping),
2042                        dma_unmap_len(info, maplen),
2043                        DMA_TO_DEVICE);
2044             dma_unmap_len_set(info, maplen, 0);
2045         }
2046 
2047         if (skb) {
2048             dev->stats.tx_packets++;
2049             dev->stats.tx_bytes += skb->len;
2050             dev_consume_skb_irq(skb);
2051             info->skb = NULL;
2052         }
2053 
2054         idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2055     } while (idx != txcsm);
2056 
2057     if (netif_queue_stopped(dev))
2058         netif_wake_queue(dev);
2059 
2060     wmb();
2061     ap->tx_ret_csm = txcsm;
2062 
2063     /* So... tx_ret_csm is advanced _after_ check for device wakeup.
2064      *
2065      * We could try to make it before. In this case we would get
2066      * the following race condition: hard_start_xmit on other cpu
2067      * enters after we advanced tx_ret_csm and fills space,
2068      * which we have just freed, so that we make illegal device wakeup.
2069      * There is no good way to workaround this (at entry
2070      * to ace_start_xmit detects this condition and prevents
2071      * ring corruption, but it is not a good workaround.)
2072      *
2073      * When tx_ret_csm is advanced after, we wake up device _only_
2074      * if we really have some space in ring (though the core doing
2075      * hard_start_xmit can see full ring for some period and has to
2076      * synchronize.) Superb.
2077      * BUT! We get another subtle race condition. hard_start_xmit
2078      * may think that ring is full between wakeup and advancing
2079      * tx_ret_csm and will stop device instantly! It is not so bad.
2080      * We are guaranteed that there is something in ring, so that
2081      * the next irq will resume transmission. To speedup this we could
2082      * mark descriptor, which closes ring with BD_FLG_COAL_NOW
2083      * (see ace_start_xmit).
2084      *
2085      * Well, this dilemma exists in all lock-free devices.
2086      * We, following scheme used in drivers by Donald Becker,
2087      * select the least dangerous.
2088      *                          --ANK
2089      */
2090 }
2091 
2092 
2093 static irqreturn_t ace_interrupt(int irq, void *dev_id)
2094 {
2095     struct net_device *dev = (struct net_device *)dev_id;
2096     struct ace_private *ap = netdev_priv(dev);
2097     struct ace_regs __iomem *regs = ap->regs;
2098     u32 idx;
2099     u32 txcsm, rxretcsm, rxretprd;
2100     u32 evtcsm, evtprd;
2101 
2102     /*
2103      * In case of PCI shared interrupts or spurious interrupts,
2104      * we want to make sure it is actually our interrupt before
2105      * spending any time in here.
2106      */
2107     if (!(readl(&regs->HostCtrl) & IN_INT))
2108         return IRQ_NONE;
2109 
2110     /*
2111      * ACK intr now. Otherwise we will lose updates to rx_ret_prd,
2112      * which happened _after_ rxretprd = *ap->rx_ret_prd; but before
2113      * writel(0, &regs->Mb0Lo).
2114      *
2115      * "IRQ avoidance" recommended in docs applies to IRQs served
2116      * threads and it is wrong even for that case.
2117      */
2118     writel(0, &regs->Mb0Lo);
2119     readl(&regs->Mb0Lo);
2120 
2121     /*
2122      * There is no conflict between transmit handling in
2123      * start_xmit and receive processing, thus there is no reason
2124      * to take a spin lock for RX handling. Wait until we start
2125      * working on the other stuff - hey we don't need a spin lock
2126      * anymore.
2127      */
2128     rxretprd = *ap->rx_ret_prd;
2129     rxretcsm = ap->cur_rx;
2130 
2131     if (rxretprd != rxretcsm)
2132         ace_rx_int(dev, rxretprd, rxretcsm);
2133 
2134     txcsm = *ap->tx_csm;
2135     idx = ap->tx_ret_csm;
2136 
2137     if (txcsm != idx) {
2138         /*
2139          * If each skb takes only one descriptor this check degenerates
2140          * to identity, because new space has just been opened.
2141          * But if skbs are fragmented we must check that this index
2142          * update releases enough of space, otherwise we just
2143          * wait for device to make more work.
2144          */
2145         if (!tx_ring_full(ap, txcsm, ap->tx_prd))
2146             ace_tx_int(dev, txcsm, idx);
2147     }
2148 
2149     evtcsm = readl(&regs->EvtCsm);
2150     evtprd = *ap->evt_prd;
2151 
2152     if (evtcsm != evtprd) {
2153         evtcsm = ace_handle_event(dev, evtcsm, evtprd);
2154         writel(evtcsm, &regs->EvtCsm);
2155     }
2156 
2157     /*
2158      * This has to go last in the interrupt handler and run with
2159      * the spin lock released ... what lock?
2160      */
2161     if (netif_running(dev)) {
2162         int cur_size;
2163         int run_tasklet = 0;
2164 
2165         cur_size = atomic_read(&ap->cur_rx_bufs);
2166         if (cur_size < RX_LOW_STD_THRES) {
2167             if ((cur_size < RX_PANIC_STD_THRES) &&
2168                 !test_and_set_bit(0, &ap->std_refill_busy)) {
2169 #ifdef DEBUG
2170                 printk("low on std buffers %i\n", cur_size);
2171 #endif
2172                 ace_load_std_rx_ring(dev,
2173                              RX_RING_SIZE - cur_size);
2174             } else
2175                 run_tasklet = 1;
2176         }
2177 
2178         if (!ACE_IS_TIGON_I(ap)) {
2179             cur_size = atomic_read(&ap->cur_mini_bufs);
2180             if (cur_size < RX_LOW_MINI_THRES) {
2181                 if ((cur_size < RX_PANIC_MINI_THRES) &&
2182                     !test_and_set_bit(0,
2183                               &ap->mini_refill_busy)) {
2184 #ifdef DEBUG
2185                     printk("low on mini buffers %i\n",
2186                            cur_size);
2187 #endif
2188                     ace_load_mini_rx_ring(dev,
2189                                   RX_MINI_SIZE - cur_size);
2190                 } else
2191                     run_tasklet = 1;
2192             }
2193         }
2194 
2195         if (ap->jumbo) {
2196             cur_size = atomic_read(&ap->cur_jumbo_bufs);
2197             if (cur_size < RX_LOW_JUMBO_THRES) {
2198                 if ((cur_size < RX_PANIC_JUMBO_THRES) &&
2199                     !test_and_set_bit(0,
2200                               &ap->jumbo_refill_busy)){
2201 #ifdef DEBUG
2202                     printk("low on jumbo buffers %i\n",
2203                            cur_size);
2204 #endif
2205                     ace_load_jumbo_rx_ring(dev,
2206                                    RX_JUMBO_SIZE - cur_size);
2207                 } else
2208                     run_tasklet = 1;
2209             }
2210         }
2211         if (run_tasklet && !ap->tasklet_pending) {
2212             ap->tasklet_pending = 1;
2213             tasklet_schedule(&ap->ace_tasklet);
2214         }
2215     }
2216 
2217     return IRQ_HANDLED;
2218 }
2219 
2220 static int ace_open(struct net_device *dev)
2221 {
2222     struct ace_private *ap = netdev_priv(dev);
2223     struct ace_regs __iomem *regs = ap->regs;
2224     struct cmd cmd;
2225 
2226     if (!(ap->fw_running)) {
2227         printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
2228         return -EBUSY;
2229     }
2230 
2231     writel(dev->mtu + ETH_HLEN + 4, &regs->IfMtu);
2232 
2233     cmd.evt = C_CLEAR_STATS;
2234     cmd.code = 0;
2235     cmd.idx = 0;
2236     ace_issue_cmd(regs, &cmd);
2237 
2238     cmd.evt = C_HOST_STATE;
2239     cmd.code = C_C_STACK_UP;
2240     cmd.idx = 0;
2241     ace_issue_cmd(regs, &cmd);
2242 
2243     if (ap->jumbo &&
2244         !test_and_set_bit(0, &ap->jumbo_refill_busy))
2245         ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2246 
2247     if (dev->flags & IFF_PROMISC) {
2248         cmd.evt = C_SET_PROMISC_MODE;
2249         cmd.code = C_C_PROMISC_ENABLE;
2250         cmd.idx = 0;
2251         ace_issue_cmd(regs, &cmd);
2252 
2253         ap->promisc = 1;
2254     }else
2255         ap->promisc = 0;
2256     ap->mcast_all = 0;
2257 
2258 #if 0
2259     cmd.evt = C_LNK_NEGOTIATION;
2260     cmd.code = 0;
2261     cmd.idx = 0;
2262     ace_issue_cmd(regs, &cmd);
2263 #endif
2264 
2265     netif_start_queue(dev);
2266 
2267     /*
2268      * Setup the bottom half rx ring refill handler
2269      */
2270     tasklet_setup(&ap->ace_tasklet, ace_tasklet);
2271     return 0;
2272 }
2273 
2274 
2275 static int ace_close(struct net_device *dev)
2276 {
2277     struct ace_private *ap = netdev_priv(dev);
2278     struct ace_regs __iomem *regs = ap->regs;
2279     struct cmd cmd;
2280     unsigned long flags;
2281     short i;
2282 
2283     /*
2284      * Without (or before) releasing irq and stopping hardware, this
2285      * is an absolute non-sense, by the way. It will be reset instantly
2286      * by the first irq.
2287      */
2288     netif_stop_queue(dev);
2289 
2290 
2291     if (ap->promisc) {
2292         cmd.evt = C_SET_PROMISC_MODE;
2293         cmd.code = C_C_PROMISC_DISABLE;
2294         cmd.idx = 0;
2295         ace_issue_cmd(regs, &cmd);
2296         ap->promisc = 0;
2297     }
2298 
2299     cmd.evt = C_HOST_STATE;
2300     cmd.code = C_C_STACK_DOWN;
2301     cmd.idx = 0;
2302     ace_issue_cmd(regs, &cmd);
2303 
2304     tasklet_kill(&ap->ace_tasklet);
2305 
2306     /*
2307      * Make sure one CPU is not processing packets while
2308      * buffers are being released by another.
2309      */
2310 
2311     local_irq_save(flags);
2312     ace_mask_irq(dev);
2313 
2314     for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2315         struct sk_buff *skb;
2316         struct tx_ring_info *info;
2317 
2318         info = ap->skb->tx_skbuff + i;
2319         skb = info->skb;
2320 
2321         if (dma_unmap_len(info, maplen)) {
2322             if (ACE_IS_TIGON_I(ap)) {
2323                 /* NB: TIGON_1 is special, tx_ring is in io space */
2324                 struct tx_desc __iomem *tx;
2325                 tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
2326                 writel(0, &tx->addr.addrhi);
2327                 writel(0, &tx->addr.addrlo);
2328                 writel(0, &tx->flagsize);
2329             } else
2330                 memset(ap->tx_ring + i, 0,
2331                        sizeof(struct tx_desc));
2332             dma_unmap_page(&ap->pdev->dev,
2333                        dma_unmap_addr(info, mapping),
2334                        dma_unmap_len(info, maplen),
2335                        DMA_TO_DEVICE);
2336             dma_unmap_len_set(info, maplen, 0);
2337         }
2338         if (skb) {
2339             dev_kfree_skb(skb);
2340             info->skb = NULL;
2341         }
2342     }
2343 
2344     if (ap->jumbo) {
2345         cmd.evt = C_RESET_JUMBO_RNG;
2346         cmd.code = 0;
2347         cmd.idx = 0;
2348         ace_issue_cmd(regs, &cmd);
2349     }
2350 
2351     ace_unmask_irq(dev);
2352     local_irq_restore(flags);
2353 
2354     return 0;
2355 }
2356 
2357 
2358 static inline dma_addr_t
2359 ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2360            struct sk_buff *tail, u32 idx)
2361 {
2362     dma_addr_t mapping;
2363     struct tx_ring_info *info;
2364 
2365     mapping = dma_map_page(&ap->pdev->dev, virt_to_page(skb->data),
2366                    offset_in_page(skb->data), skb->len,
2367                    DMA_TO_DEVICE);
2368 
2369     info = ap->skb->tx_skbuff + idx;
2370     info->skb = tail;
2371     dma_unmap_addr_set(info, mapping, mapping);
2372     dma_unmap_len_set(info, maplen, skb->len);
2373     return mapping;
2374 }
2375 
2376 
2377 static inline void
2378 ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
2379            u32 flagsize, u32 vlan_tag)
2380 {
2381 #if !USE_TX_COAL_NOW
2382     flagsize &= ~BD_FLG_COAL_NOW;
2383 #endif
2384 
2385     if (ACE_IS_TIGON_I(ap)) {
2386         struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
2387         writel(addr >> 32, &io->addr.addrhi);
2388         writel(addr & 0xffffffff, &io->addr.addrlo);
2389         writel(flagsize, &io->flagsize);
2390         writel(vlan_tag, &io->vlanres);
2391     } else {
2392         desc->addr.addrhi = addr >> 32;
2393         desc->addr.addrlo = addr;
2394         desc->flagsize = flagsize;
2395         desc->vlanres = vlan_tag;
2396     }
2397 }
2398 
2399 
2400 static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
2401                   struct net_device *dev)
2402 {
2403     struct ace_private *ap = netdev_priv(dev);
2404     struct ace_regs __iomem *regs = ap->regs;
2405     struct tx_desc *desc;
2406     u32 idx, flagsize;
2407     unsigned long maxjiff = jiffies + 3*HZ;
2408 
2409 restart:
2410     idx = ap->tx_prd;
2411 
2412     if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2413         goto overflow;
2414 
2415     if (!skb_shinfo(skb)->nr_frags) {
2416         dma_addr_t mapping;
2417         u32 vlan_tag = 0;
2418 
2419         mapping = ace_map_tx_skb(ap, skb, skb, idx);
2420         flagsize = (skb->len << 16) | (BD_FLG_END);
2421         if (skb->ip_summed == CHECKSUM_PARTIAL)
2422             flagsize |= BD_FLG_TCP_UDP_SUM;
2423         if (skb_vlan_tag_present(skb)) {
2424             flagsize |= BD_FLG_VLAN_TAG;
2425             vlan_tag = skb_vlan_tag_get(skb);
2426         }
2427         desc = ap->tx_ring + idx;
2428         idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2429 
2430         /* Look at ace_tx_int for explanations. */
2431         if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2432             flagsize |= BD_FLG_COAL_NOW;
2433 
2434         ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2435     } else {
2436         dma_addr_t mapping;
2437         u32 vlan_tag = 0;
2438         int i, len = 0;
2439 
2440         mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2441         flagsize = (skb_headlen(skb) << 16);
2442         if (skb->ip_summed == CHECKSUM_PARTIAL)
2443             flagsize |= BD_FLG_TCP_UDP_SUM;
2444         if (skb_vlan_tag_present(skb)) {
2445             flagsize |= BD_FLG_VLAN_TAG;
2446             vlan_tag = skb_vlan_tag_get(skb);
2447         }
2448 
2449         ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
2450 
2451         idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2452 
2453         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2454             const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2455             struct tx_ring_info *info;
2456 
2457             len += skb_frag_size(frag);
2458             info = ap->skb->tx_skbuff + idx;
2459             desc = ap->tx_ring + idx;
2460 
2461             mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
2462                            skb_frag_size(frag),
2463                            DMA_TO_DEVICE);
2464 
2465             flagsize = skb_frag_size(frag) << 16;
2466             if (skb->ip_summed == CHECKSUM_PARTIAL)
2467                 flagsize |= BD_FLG_TCP_UDP_SUM;
2468             idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2469 
2470             if (i == skb_shinfo(skb)->nr_frags - 1) {
2471                 flagsize |= BD_FLG_END;
2472                 if (tx_ring_full(ap, ap->tx_ret_csm, idx))
2473                     flagsize |= BD_FLG_COAL_NOW;
2474 
2475                 /*
2476                  * Only the last fragment frees
2477                  * the skb!
2478                  */
2479                 info->skb = skb;
2480             } else {
2481                 info->skb = NULL;
2482             }
2483             dma_unmap_addr_set(info, mapping, mapping);
2484             dma_unmap_len_set(info, maplen, skb_frag_size(frag));
2485             ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2486         }
2487     }
2488 
2489     wmb();
2490     ap->tx_prd = idx;
2491     ace_set_txprd(regs, ap, idx);
2492 
2493     if (flagsize & BD_FLG_COAL_NOW) {
2494         netif_stop_queue(dev);
2495 
2496         /*
2497          * A TX-descriptor producer (an IRQ) might have gotten
2498          * between, making the ring free again. Since xmit is
2499          * serialized, this is the only situation we have to
2500          * re-test.
2501          */
2502         if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
2503             netif_wake_queue(dev);
2504     }
2505 
2506     return NETDEV_TX_OK;
2507 
2508 overflow:
2509     /*
2510      * This race condition is unavoidable with lock-free drivers.
2511      * We wake up the queue _before_ tx_prd is advanced, so that we can
2512      * enter hard_start_xmit too early, while tx ring still looks closed.
2513      * This happens ~1-4 times per 100000 packets, so that we can allow
2514      * to loop syncing to other CPU. Probably, we need an additional
2515      * wmb() in ace_tx_intr as well.
2516      *
2517      * Note that this race is relieved by reserving one more entry
2518      * in tx ring than it is necessary (see original non-SG driver).
2519      * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
2520      * is already overkill.
2521      *
2522      * Alternative is to return with 1 not throttling queue. In this
2523      * case loop becomes longer, no more useful effects.
2524      */
2525     if (time_before(jiffies, maxjiff)) {
2526         barrier();
2527         cpu_relax();
2528         goto restart;
2529     }
2530 
2531     /* The ring is stuck full. */
2532     printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
2533     return NETDEV_TX_BUSY;
2534 }
2535 
2536 
2537 static int ace_change_mtu(struct net_device *dev, int new_mtu)
2538 {
2539     struct ace_private *ap = netdev_priv(dev);
2540     struct ace_regs __iomem *regs = ap->regs;
2541 
2542     writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
2543     dev->mtu = new_mtu;
2544 
2545     if (new_mtu > ACE_STD_MTU) {
2546         if (!(ap->jumbo)) {
2547             printk(KERN_INFO "%s: Enabling Jumbo frame "
2548                    "support\n", dev->name);
2549             ap->jumbo = 1;
2550             if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
2551                 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
2552             ace_set_rxtx_parms(dev, 1);
2553         }
2554     } else {
2555         while (test_and_set_bit(0, &ap->jumbo_refill_busy));
2556         ace_sync_irq(dev->irq);
2557         ace_set_rxtx_parms(dev, 0);
2558         if (ap->jumbo) {
2559             struct cmd cmd;
2560 
2561             cmd.evt = C_RESET_JUMBO_RNG;
2562             cmd.code = 0;
2563             cmd.idx = 0;
2564             ace_issue_cmd(regs, &cmd);
2565         }
2566     }
2567 
2568     return 0;
2569 }
2570 
2571 static int ace_get_link_ksettings(struct net_device *dev,
2572                   struct ethtool_link_ksettings *cmd)
2573 {
2574     struct ace_private *ap = netdev_priv(dev);
2575     struct ace_regs __iomem *regs = ap->regs;
2576     u32 link;
2577     u32 supported;
2578 
2579     memset(cmd, 0, sizeof(struct ethtool_link_ksettings));
2580 
2581     supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2582              SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2583              SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
2584              SUPPORTED_Autoneg | SUPPORTED_FIBRE);
2585 
2586     cmd->base.port = PORT_FIBRE;
2587 
2588     link = readl(&regs->GigLnkState);
2589     if (link & LNK_1000MB) {
2590         cmd->base.speed = SPEED_1000;
2591     } else {
2592         link = readl(&regs->FastLnkState);
2593         if (link & LNK_100MB)
2594             cmd->base.speed = SPEED_100;
2595         else if (link & LNK_10MB)
2596             cmd->base.speed = SPEED_10;
2597         else
2598             cmd->base.speed = 0;
2599     }
2600     if (link & LNK_FULL_DUPLEX)
2601         cmd->base.duplex = DUPLEX_FULL;
2602     else
2603         cmd->base.duplex = DUPLEX_HALF;
2604 
2605     if (link & LNK_NEGOTIATE)
2606         cmd->base.autoneg = AUTONEG_ENABLE;
2607     else
2608         cmd->base.autoneg = AUTONEG_DISABLE;
2609 
2610 #if 0
2611     /*
2612      * Current struct ethtool_cmd is insufficient
2613      */
2614     ecmd->trace = readl(&regs->TuneTrace);
2615 
2616     ecmd->txcoal = readl(&regs->TuneTxCoalTicks);
2617     ecmd->rxcoal = readl(&regs->TuneRxCoalTicks);
2618 #endif
2619 
2620     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2621                         supported);
2622 
2623     return 0;
2624 }
2625 
2626 static int ace_set_link_ksettings(struct net_device *dev,
2627                   const struct ethtool_link_ksettings *cmd)
2628 {
2629     struct ace_private *ap = netdev_priv(dev);
2630     struct ace_regs __iomem *regs = ap->regs;
2631     u32 link, speed;
2632 
2633     link = readl(&regs->GigLnkState);
2634     if (link & LNK_1000MB)
2635         speed = SPEED_1000;
2636     else {
2637         link = readl(&regs->FastLnkState);
2638         if (link & LNK_100MB)
2639             speed = SPEED_100;
2640         else if (link & LNK_10MB)
2641             speed = SPEED_10;
2642         else
2643             speed = SPEED_100;
2644     }
2645 
2646     link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
2647         LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
2648     if (!ACE_IS_TIGON_I(ap))
2649         link |= LNK_TX_FLOW_CTL_Y;
2650     if (cmd->base.autoneg == AUTONEG_ENABLE)
2651         link |= LNK_NEGOTIATE;
2652     if (cmd->base.speed != speed) {
2653         link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
2654         switch (cmd->base.speed) {
2655         case SPEED_1000:
2656             link |= LNK_1000MB;
2657             break;
2658         case SPEED_100:
2659             link |= LNK_100MB;
2660             break;
2661         case SPEED_10:
2662             link |= LNK_10MB;
2663             break;
2664         }
2665     }
2666 
2667     if (cmd->base.duplex == DUPLEX_FULL)
2668         link |= LNK_FULL_DUPLEX;
2669 
2670     if (link != ap->link) {
2671         struct cmd cmd;
2672         printk(KERN_INFO "%s: Renegotiating link state\n",
2673                dev->name);
2674 
2675         ap->link = link;
2676         writel(link, &regs->TuneLink);
2677         if (!ACE_IS_TIGON_I(ap))
2678             writel(link, &regs->TuneFastLink);
2679         wmb();
2680 
2681         cmd.evt = C_LNK_NEGOTIATION;
2682         cmd.code = 0;
2683         cmd.idx = 0;
2684         ace_issue_cmd(regs, &cmd);
2685     }
2686     return 0;
2687 }
2688 
2689 static void ace_get_drvinfo(struct net_device *dev,
2690                 struct ethtool_drvinfo *info)
2691 {
2692     struct ace_private *ap = netdev_priv(dev);
2693 
2694     strlcpy(info->driver, "acenic", sizeof(info->driver));
2695     snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
2696          ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
2697 
2698     if (ap->pdev)
2699         strlcpy(info->bus_info, pci_name(ap->pdev),
2700             sizeof(info->bus_info));
2701 
2702 }
2703 
2704 /*
2705  * Set the hardware MAC address.
2706  */
2707 static int ace_set_mac_addr(struct net_device *dev, void *p)
2708 {
2709     struct ace_private *ap = netdev_priv(dev);
2710     struct ace_regs __iomem *regs = ap->regs;
2711     struct sockaddr *addr=p;
2712     const u8 *da;
2713     struct cmd cmd;
2714 
2715     if(netif_running(dev))
2716         return -EBUSY;
2717 
2718     eth_hw_addr_set(dev, addr->sa_data);
2719 
2720     da = (const u8 *)dev->dev_addr;
2721 
2722     writel(da[0] << 8 | da[1], &regs->MacAddrHi);
2723     writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
2724            &regs->MacAddrLo);
2725 
2726     cmd.evt = C_SET_MAC_ADDR;
2727     cmd.code = 0;
2728     cmd.idx = 0;
2729     ace_issue_cmd(regs, &cmd);
2730 
2731     return 0;
2732 }
2733 
2734 
2735 static void ace_set_multicast_list(struct net_device *dev)
2736 {
2737     struct ace_private *ap = netdev_priv(dev);
2738     struct ace_regs __iomem *regs = ap->regs;
2739     struct cmd cmd;
2740 
2741     if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
2742         cmd.evt = C_SET_MULTICAST_MODE;
2743         cmd.code = C_C_MCAST_ENABLE;
2744         cmd.idx = 0;
2745         ace_issue_cmd(regs, &cmd);
2746         ap->mcast_all = 1;
2747     } else if (ap->mcast_all) {
2748         cmd.evt = C_SET_MULTICAST_MODE;
2749         cmd.code = C_C_MCAST_DISABLE;
2750         cmd.idx = 0;
2751         ace_issue_cmd(regs, &cmd);
2752         ap->mcast_all = 0;
2753     }
2754 
2755     if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
2756         cmd.evt = C_SET_PROMISC_MODE;
2757         cmd.code = C_C_PROMISC_ENABLE;
2758         cmd.idx = 0;
2759         ace_issue_cmd(regs, &cmd);
2760         ap->promisc = 1;
2761     }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
2762         cmd.evt = C_SET_PROMISC_MODE;
2763         cmd.code = C_C_PROMISC_DISABLE;
2764         cmd.idx = 0;
2765         ace_issue_cmd(regs, &cmd);
2766         ap->promisc = 0;
2767     }
2768 
2769     /*
2770      * For the time being multicast relies on the upper layers
2771      * filtering it properly. The Firmware does not allow one to
2772      * set the entire multicast list at a time and keeping track of
2773      * it here is going to be messy.
2774      */
2775     if (!netdev_mc_empty(dev) && !ap->mcast_all) {
2776         cmd.evt = C_SET_MULTICAST_MODE;
2777         cmd.code = C_C_MCAST_ENABLE;
2778         cmd.idx = 0;
2779         ace_issue_cmd(regs, &cmd);
2780     }else if (!ap->mcast_all) {
2781         cmd.evt = C_SET_MULTICAST_MODE;
2782         cmd.code = C_C_MCAST_DISABLE;
2783         cmd.idx = 0;
2784         ace_issue_cmd(regs, &cmd);
2785     }
2786 }
2787 
2788 
2789 static struct net_device_stats *ace_get_stats(struct net_device *dev)
2790 {
2791     struct ace_private *ap = netdev_priv(dev);
2792     struct ace_mac_stats __iomem *mac_stats =
2793         (struct ace_mac_stats __iomem *)ap->regs->Stats;
2794 
2795     dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
2796     dev->stats.multicast = readl(&mac_stats->kept_mc);
2797     dev->stats.collisions = readl(&mac_stats->coll);
2798 
2799     return &dev->stats;
2800 }
2801 
2802 
2803 static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
2804              u32 dest, int size)
2805 {
2806     void __iomem *tdest;
2807     short tsize, i;
2808 
2809     if (size <= 0)
2810         return;
2811 
2812     while (size > 0) {
2813         tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2814                 min_t(u32, size, ACE_WINDOW_SIZE));
2815         tdest = (void __iomem *) &regs->Window +
2816             (dest & (ACE_WINDOW_SIZE - 1));
2817         writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
2818         for (i = 0; i < (tsize / 4); i++) {
2819             /* Firmware is big-endian */
2820             writel(be32_to_cpup(src), tdest);
2821             src++;
2822             tdest += 4;
2823             dest += 4;
2824             size -= 4;
2825         }
2826     }
2827 }
2828 
2829 
2830 static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
2831 {
2832     void __iomem *tdest;
2833     short tsize = 0, i;
2834 
2835     if (size <= 0)
2836         return;
2837 
2838     while (size > 0) {
2839         tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
2840                 min_t(u32, size, ACE_WINDOW_SIZE));
2841         tdest = (void __iomem *) &regs->Window +
2842             (dest & (ACE_WINDOW_SIZE - 1));
2843         writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
2844 
2845         for (i = 0; i < (tsize / 4); i++) {
2846             writel(0, tdest + i*4);
2847         }
2848 
2849         dest += tsize;
2850         size -= tsize;
2851     }
2852 }
2853 
2854 
2855 /*
2856  * Download the firmware into the SRAM on the NIC
2857  *
2858  * This operation requires the NIC to be halted and is performed with
2859  * interrupts disabled and with the spinlock hold.
2860  */
2861 static int ace_load_firmware(struct net_device *dev)
2862 {
2863     const struct firmware *fw;
2864     const char *fw_name = "acenic/tg2.bin";
2865     struct ace_private *ap = netdev_priv(dev);
2866     struct ace_regs __iomem *regs = ap->regs;
2867     const __be32 *fw_data;
2868     u32 load_addr;
2869     int ret;
2870 
2871     if (!(readl(&regs->CpuCtrl) & CPU_HALTED)) {
2872         printk(KERN_ERR "%s: trying to download firmware while the "
2873                "CPU is running!\n", ap->name);
2874         return -EFAULT;
2875     }
2876 
2877     if (ACE_IS_TIGON_I(ap))
2878         fw_name = "acenic/tg1.bin";
2879 
2880     ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
2881     if (ret) {
2882         printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
2883                ap->name, fw_name);
2884         return ret;
2885     }
2886 
2887     fw_data = (void *)fw->data;
2888 
2889     /* Firmware blob starts with version numbers, followed by
2890        load and start address. Remainder is the blob to be loaded
2891        contiguously from load address. We don't bother to represent
2892        the BSS/SBSS sections any more, since we were clearing the
2893        whole thing anyway. */
2894     ap->firmware_major = fw->data[0];
2895     ap->firmware_minor = fw->data[1];
2896     ap->firmware_fix = fw->data[2];
2897 
2898     ap->firmware_start = be32_to_cpu(fw_data[1]);
2899     if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
2900         printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2901                ap->name, ap->firmware_start, fw_name);
2902         ret = -EINVAL;
2903         goto out;
2904     }
2905 
2906     load_addr = be32_to_cpu(fw_data[2]);
2907     if (load_addr < 0x4000 || load_addr >= 0x80000) {
2908         printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
2909                ap->name, load_addr, fw_name);
2910         ret = -EINVAL;
2911         goto out;
2912     }
2913 
2914     /*
2915      * Do not try to clear more than 512KiB or we end up seeing
2916      * funny things on NICs with only 512KiB SRAM
2917      */
2918     ace_clear(regs, 0x2000, 0x80000-0x2000);
2919     ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
2920  out:
2921     release_firmware(fw);
2922     return ret;
2923 }
2924 
2925 
2926 /*
2927  * The eeprom on the AceNIC is an Atmel i2c EEPROM.
2928  *
2929  * Accessing the EEPROM is `interesting' to say the least - don't read
2930  * this code right after dinner.
2931  *
2932  * This is all about black magic and bit-banging the device .... I
2933  * wonder in what hospital they have put the guy who designed the i2c
2934  * specs.
2935  *
2936  * Oh yes, this is only the beginning!
2937  *
2938  * Thanks to Stevarino Webinski for helping tracking down the bugs in the
2939  * code i2c readout code by beta testing all my hacks.
2940  */
2941 static void eeprom_start(struct ace_regs __iomem *regs)
2942 {
2943     u32 local;
2944 
2945     readl(&regs->LocalCtrl);
2946     udelay(ACE_SHORT_DELAY);
2947     local = readl(&regs->LocalCtrl);
2948     local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
2949     writel(local, &regs->LocalCtrl);
2950     readl(&regs->LocalCtrl);
2951     mb();
2952     udelay(ACE_SHORT_DELAY);
2953     local |= EEPROM_CLK_OUT;
2954     writel(local, &regs->LocalCtrl);
2955     readl(&regs->LocalCtrl);
2956     mb();
2957     udelay(ACE_SHORT_DELAY);
2958     local &= ~EEPROM_DATA_OUT;
2959     writel(local, &regs->LocalCtrl);
2960     readl(&regs->LocalCtrl);
2961     mb();
2962     udelay(ACE_SHORT_DELAY);
2963     local &= ~EEPROM_CLK_OUT;
2964     writel(local, &regs->LocalCtrl);
2965     readl(&regs->LocalCtrl);
2966     mb();
2967 }
2968 
2969 
2970 static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
2971 {
2972     short i;
2973     u32 local;
2974 
2975     udelay(ACE_SHORT_DELAY);
2976     local = readl(&regs->LocalCtrl);
2977     local &= ~EEPROM_DATA_OUT;
2978     local |= EEPROM_WRITE_ENABLE;
2979     writel(local, &regs->LocalCtrl);
2980     readl(&regs->LocalCtrl);
2981     mb();
2982 
2983     for (i = 0; i < 8; i++, magic <<= 1) {
2984         udelay(ACE_SHORT_DELAY);
2985         if (magic & 0x80)
2986             local |= EEPROM_DATA_OUT;
2987         else
2988             local &= ~EEPROM_DATA_OUT;
2989         writel(local, &regs->LocalCtrl);
2990         readl(&regs->LocalCtrl);
2991         mb();
2992 
2993         udelay(ACE_SHORT_DELAY);
2994         local |= EEPROM_CLK_OUT;
2995         writel(local, &regs->LocalCtrl);
2996         readl(&regs->LocalCtrl);
2997         mb();
2998         udelay(ACE_SHORT_DELAY);
2999         local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
3000         writel(local, &regs->LocalCtrl);
3001         readl(&regs->LocalCtrl);
3002         mb();
3003     }
3004 }
3005 
3006 
3007 static int eeprom_check_ack(struct ace_regs __iomem *regs)
3008 {
3009     int state;
3010     u32 local;
3011 
3012     local = readl(&regs->LocalCtrl);
3013     local &= ~EEPROM_WRITE_ENABLE;
3014     writel(local, &regs->LocalCtrl);
3015     readl(&regs->LocalCtrl);
3016     mb();
3017     udelay(ACE_LONG_DELAY);
3018     local |= EEPROM_CLK_OUT;
3019     writel(local, &regs->LocalCtrl);
3020     readl(&regs->LocalCtrl);
3021     mb();
3022     udelay(ACE_SHORT_DELAY);
3023     /* sample data in middle of high clk */
3024     state = (readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0;
3025     udelay(ACE_SHORT_DELAY);
3026     mb();
3027     writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
3028     readl(&regs->LocalCtrl);
3029     mb();
3030 
3031     return state;
3032 }
3033 
3034 
3035 static void eeprom_stop(struct ace_regs __iomem *regs)
3036 {
3037     u32 local;
3038 
3039     udelay(ACE_SHORT_DELAY);
3040     local = readl(&regs->LocalCtrl);
3041     local |= EEPROM_WRITE_ENABLE;
3042     writel(local, &regs->LocalCtrl);
3043     readl(&regs->LocalCtrl);
3044     mb();
3045     udelay(ACE_SHORT_DELAY);
3046     local &= ~EEPROM_DATA_OUT;
3047     writel(local, &regs->LocalCtrl);
3048     readl(&regs->LocalCtrl);
3049     mb();
3050     udelay(ACE_SHORT_DELAY);
3051     local |= EEPROM_CLK_OUT;
3052     writel(local, &regs->LocalCtrl);
3053     readl(&regs->LocalCtrl);
3054     mb();
3055     udelay(ACE_SHORT_DELAY);
3056     local |= EEPROM_DATA_OUT;
3057     writel(local, &regs->LocalCtrl);
3058     readl(&regs->LocalCtrl);
3059     mb();
3060     udelay(ACE_LONG_DELAY);
3061     local &= ~EEPROM_CLK_OUT;
3062     writel(local, &regs->LocalCtrl);
3063     mb();
3064 }
3065 
3066 
3067 /*
3068  * Read a whole byte from the EEPROM.
3069  */
3070 static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
3071 {
3072     struct ace_private *ap = netdev_priv(dev);
3073     struct ace_regs __iomem *regs = ap->regs;
3074     unsigned long flags;
3075     u32 local;
3076     int result = 0;
3077     short i;
3078 
3079     /*
3080      * Don't take interrupts on this CPU will bit banging
3081      * the %#%#@$ I2C device
3082      */
3083     local_irq_save(flags);
3084 
3085     eeprom_start(regs);
3086 
3087     eeprom_prep(regs, EEPROM_WRITE_SELECT);
3088     if (eeprom_check_ack(regs)) {
3089         local_irq_restore(flags);
3090         printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
3091         result = -EIO;
3092         goto eeprom_read_error;
3093     }
3094 
3095     eeprom_prep(regs, (offset >> 8) & 0xff);
3096     if (eeprom_check_ack(regs)) {
3097         local_irq_restore(flags);
3098         printk(KERN_ERR "%s: Unable to set address byte 0\n",
3099                ap->name);
3100         result = -EIO;
3101         goto eeprom_read_error;
3102     }
3103 
3104     eeprom_prep(regs, offset & 0xff);
3105     if (eeprom_check_ack(regs)) {
3106         local_irq_restore(flags);
3107         printk(KERN_ERR "%s: Unable to set address byte 1\n",
3108                ap->name);
3109         result = -EIO;
3110         goto eeprom_read_error;
3111     }
3112 
3113     eeprom_start(regs);
3114     eeprom_prep(regs, EEPROM_READ_SELECT);
3115     if (eeprom_check_ack(regs)) {
3116         local_irq_restore(flags);
3117         printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
3118                ap->name);
3119         result = -EIO;
3120         goto eeprom_read_error;
3121     }
3122 
3123     for (i = 0; i < 8; i++) {
3124         local = readl(&regs->LocalCtrl);
3125         local &= ~EEPROM_WRITE_ENABLE;
3126         writel(local, &regs->LocalCtrl);
3127         readl(&regs->LocalCtrl);
3128         udelay(ACE_LONG_DELAY);
3129         mb();
3130         local |= EEPROM_CLK_OUT;
3131         writel(local, &regs->LocalCtrl);
3132         readl(&regs->LocalCtrl);
3133         mb();
3134         udelay(ACE_SHORT_DELAY);
3135         /* sample data mid high clk */
3136         result = (result << 1) |
3137             ((readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0);
3138         udelay(ACE_SHORT_DELAY);
3139         mb();
3140         local = readl(&regs->LocalCtrl);
3141         local &= ~EEPROM_CLK_OUT;
3142         writel(local, &regs->LocalCtrl);
3143         readl(&regs->LocalCtrl);
3144         udelay(ACE_SHORT_DELAY);
3145         mb();
3146         if (i == 7) {
3147             local |= EEPROM_WRITE_ENABLE;
3148             writel(local, &regs->LocalCtrl);
3149             readl(&regs->LocalCtrl);
3150             mb();
3151             udelay(ACE_SHORT_DELAY);
3152         }
3153     }
3154 
3155     local |= EEPROM_DATA_OUT;
3156     writel(local, &regs->LocalCtrl);
3157     readl(&regs->LocalCtrl);
3158     mb();
3159     udelay(ACE_SHORT_DELAY);
3160     writel(readl(&regs->LocalCtrl) | EEPROM_CLK_OUT, &regs->LocalCtrl);
3161     readl(&regs->LocalCtrl);
3162     udelay(ACE_LONG_DELAY);
3163     writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
3164     readl(&regs->LocalCtrl);
3165     mb();
3166     udelay(ACE_SHORT_DELAY);
3167     eeprom_stop(regs);
3168 
3169     local_irq_restore(flags);
3170  out:
3171     return result;
3172 
3173  eeprom_read_error:
3174     printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
3175            ap->name, offset);
3176     goto out;
3177 }
3178 
3179 module_pci_driver(acenic_pci_driver);