Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
0004  *
0005  * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
0006  *
0007  * Thanks to Essential Communication for providing us with hardware
0008  * and very comprehensive documentation without which I would not have
0009  * been able to write this driver. A special thank you to John Gibbon
0010  * for sorting out the legal issues, with the NDA, allowing the code to
0011  * be released under the GPL.
0012  *
0013  * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
0014  * stupid bugs in my code.
0015  *
0016  * Softnet support and various other patches from Val Henson of
0017  * ODS/Essential.
0018  *
0019  * PCI DMA mapping code partly based on work by Francois Romieu.
0020  */
0021 
0022 
0023 #define DEBUG 1
0024 #define RX_DMA_SKBUFF 1
0025 #define PKT_COPY_THRESHOLD 512
0026 
0027 #include <linux/module.h>
0028 #include <linux/types.h>
0029 #include <linux/errno.h>
0030 #include <linux/ioport.h>
0031 #include <linux/pci.h>
0032 #include <linux/kernel.h>
0033 #include <linux/netdevice.h>
0034 #include <linux/hippidevice.h>
0035 #include <linux/skbuff.h>
0036 #include <linux/delay.h>
0037 #include <linux/mm.h>
0038 #include <linux/slab.h>
0039 #include <net/sock.h>
0040 
0041 #include <asm/cache.h>
0042 #include <asm/byteorder.h>
0043 #include <asm/io.h>
0044 #include <asm/irq.h>
0045 #include <linux/uaccess.h>
0046 
0047 #define rr_if_busy(dev)     netif_queue_stopped(dev)
0048 #define rr_if_running(dev)  netif_running(dev)
0049 
0050 #include "rrunner.h"
0051 
0052 #define RUN_AT(x) (jiffies + (x))
0053 
0054 
0055 MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
0056 MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
0057 MODULE_LICENSE("GPL");
0058 
0059 static const char version[] =
0060 "rrunner.c: v0.50 11/11/2002  Jes Sorensen (jes@wildopensource.com)\n";
0061 
0062 
0063 static const struct net_device_ops rr_netdev_ops = {
0064     .ndo_open       = rr_open,
0065     .ndo_stop       = rr_close,
0066     .ndo_siocdevprivate = rr_siocdevprivate,
0067     .ndo_start_xmit     = rr_start_xmit,
0068     .ndo_set_mac_address    = hippi_mac_addr,
0069 };
0070 
0071 /*
0072  * Implementation notes:
0073  *
0074  * The DMA engine only allows for DMA within physical 64KB chunks of
0075  * memory. The current approach of the driver (and stack) is to use
0076  * linear blocks of memory for the skbuffs. However, as the data block
0077  * is always the first part of the skb and skbs are 2^n aligned so we
0078  * are guarantted to get the whole block within one 64KB align 64KB
0079  * chunk.
0080  *
0081  * On the long term, relying on being able to allocate 64KB linear
0082  * chunks of memory is not feasible and the skb handling code and the
0083  * stack will need to know about I/O vectors or something similar.
0084  */
0085 
0086 static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
0087 {
0088     struct net_device *dev;
0089     static int version_disp;
0090     u8 pci_latency;
0091     struct rr_private *rrpriv;
0092     void *tmpptr;
0093     dma_addr_t ring_dma;
0094     int ret = -ENOMEM;
0095 
0096     dev = alloc_hippi_dev(sizeof(struct rr_private));
0097     if (!dev)
0098         goto out3;
0099 
0100     ret = pci_enable_device(pdev);
0101     if (ret) {
0102         ret = -ENODEV;
0103         goto out2;
0104     }
0105 
0106     rrpriv = netdev_priv(dev);
0107 
0108     SET_NETDEV_DEV(dev, &pdev->dev);
0109 
0110     ret = pci_request_regions(pdev, "rrunner");
0111     if (ret < 0)
0112         goto out;
0113 
0114     pci_set_drvdata(pdev, dev);
0115 
0116     rrpriv->pci_dev = pdev;
0117 
0118     spin_lock_init(&rrpriv->lock);
0119 
0120     dev->netdev_ops = &rr_netdev_ops;
0121 
0122     /* display version info if adapter is found */
0123     if (!version_disp) {
0124         /* set display flag to TRUE so that */
0125         /* we only display this string ONCE */
0126         version_disp = 1;
0127         printk(version);
0128     }
0129 
0130     pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
0131     if (pci_latency <= 0x58){
0132         pci_latency = 0x58;
0133         pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
0134     }
0135 
0136     pci_set_master(pdev);
0137 
0138     printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
0139            "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
0140            (unsigned long long)pci_resource_start(pdev, 0),
0141            pdev->irq, pci_latency);
0142 
0143     /*
0144      * Remap the MMIO regs into kernel space.
0145      */
0146     rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
0147     if (!rrpriv->regs) {
0148         printk(KERN_ERR "%s:  Unable to map I/O register, "
0149             "RoadRunner will be disabled.\n", dev->name);
0150         ret = -EIO;
0151         goto out;
0152     }
0153 
0154     tmpptr = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
0155                     GFP_KERNEL);
0156     rrpriv->tx_ring = tmpptr;
0157     rrpriv->tx_ring_dma = ring_dma;
0158 
0159     if (!tmpptr) {
0160         ret = -ENOMEM;
0161         goto out;
0162     }
0163 
0164     tmpptr = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
0165                     GFP_KERNEL);
0166     rrpriv->rx_ring = tmpptr;
0167     rrpriv->rx_ring_dma = ring_dma;
0168 
0169     if (!tmpptr) {
0170         ret = -ENOMEM;
0171         goto out;
0172     }
0173 
0174     tmpptr = dma_alloc_coherent(&pdev->dev, EVT_RING_SIZE, &ring_dma,
0175                     GFP_KERNEL);
0176     rrpriv->evt_ring = tmpptr;
0177     rrpriv->evt_ring_dma = ring_dma;
0178 
0179     if (!tmpptr) {
0180         ret = -ENOMEM;
0181         goto out;
0182     }
0183 
0184     /*
0185      * Don't access any register before this point!
0186      */
0187 #ifdef __BIG_ENDIAN
0188     writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
0189         &rrpriv->regs->HostCtrl);
0190 #endif
0191     /*
0192      * Need to add a case for little-endian 64-bit hosts here.
0193      */
0194 
0195     rr_init(dev);
0196 
0197     ret = register_netdev(dev);
0198     if (ret)
0199         goto out;
0200     return 0;
0201 
0202  out:
0203     if (rrpriv->evt_ring)
0204         dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rrpriv->evt_ring,
0205                   rrpriv->evt_ring_dma);
0206     if (rrpriv->rx_ring)
0207         dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rrpriv->rx_ring,
0208                   rrpriv->rx_ring_dma);
0209     if (rrpriv->tx_ring)
0210         dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rrpriv->tx_ring,
0211                   rrpriv->tx_ring_dma);
0212     if (rrpriv->regs)
0213         pci_iounmap(pdev, rrpriv->regs);
0214     if (pdev)
0215         pci_release_regions(pdev);
0216     pci_disable_device(pdev);
0217  out2:
0218     free_netdev(dev);
0219  out3:
0220     return ret;
0221 }
0222 
0223 static void rr_remove_one(struct pci_dev *pdev)
0224 {
0225     struct net_device *dev = pci_get_drvdata(pdev);
0226     struct rr_private *rr = netdev_priv(dev);
0227 
0228     if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
0229         printk(KERN_ERR "%s: trying to unload running NIC\n",
0230                dev->name);
0231         writel(HALT_NIC, &rr->regs->HostCtrl);
0232     }
0233 
0234     unregister_netdev(dev);
0235     dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rr->evt_ring,
0236               rr->evt_ring_dma);
0237     dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rr->rx_ring,
0238               rr->rx_ring_dma);
0239     dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rr->tx_ring,
0240               rr->tx_ring_dma);
0241     pci_iounmap(pdev, rr->regs);
0242     pci_release_regions(pdev);
0243     pci_disable_device(pdev);
0244     free_netdev(dev);
0245 }
0246 
0247 
0248 /*
0249  * Commands are considered to be slow, thus there is no reason to
0250  * inline this.
0251  */
0252 static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
0253 {
0254     struct rr_regs __iomem *regs;
0255     u32 idx;
0256 
0257     regs = rrpriv->regs;
0258     /*
0259      * This is temporary - it will go away in the final version.
0260      * We probably also want to make this function inline.
0261      */
0262     if (readl(&regs->HostCtrl) & NIC_HALTED){
0263         printk("issuing command for halted NIC, code 0x%x, "
0264                "HostCtrl %08x\n", cmd->code, readl(&regs->HostCtrl));
0265         if (readl(&regs->Mode) & FATAL_ERR)
0266             printk("error codes Fail1 %02x, Fail2 %02x\n",
0267                    readl(&regs->Fail1), readl(&regs->Fail2));
0268     }
0269 
0270     idx = rrpriv->info->cmd_ctrl.pi;
0271 
0272     writel(*(u32*)(cmd), &regs->CmdRing[idx]);
0273     wmb();
0274 
0275     idx = (idx - 1) % CMD_RING_ENTRIES;
0276     rrpriv->info->cmd_ctrl.pi = idx;
0277     wmb();
0278 
0279     if (readl(&regs->Mode) & FATAL_ERR)
0280         printk("error code %02x\n", readl(&regs->Fail1));
0281 }
0282 
0283 
0284 /*
0285  * Reset the board in a sensible manner. The NIC is already halted
0286  * when we get here and a spin-lock is held.
0287  */
0288 static int rr_reset(struct net_device *dev)
0289 {
0290     struct rr_private *rrpriv;
0291     struct rr_regs __iomem *regs;
0292     u32 start_pc;
0293     int i;
0294 
0295     rrpriv = netdev_priv(dev);
0296     regs = rrpriv->regs;
0297 
0298     rr_load_firmware(dev);
0299 
0300     writel(0x01000000, &regs->TX_state);
0301     writel(0xff800000, &regs->RX_state);
0302     writel(0, &regs->AssistState);
0303     writel(CLEAR_INTA, &regs->LocalCtrl);
0304     writel(0x01, &regs->BrkPt);
0305     writel(0, &regs->Timer);
0306     writel(0, &regs->TimerRef);
0307     writel(RESET_DMA, &regs->DmaReadState);
0308     writel(RESET_DMA, &regs->DmaWriteState);
0309     writel(0, &regs->DmaWriteHostHi);
0310     writel(0, &regs->DmaWriteHostLo);
0311     writel(0, &regs->DmaReadHostHi);
0312     writel(0, &regs->DmaReadHostLo);
0313     writel(0, &regs->DmaReadLen);
0314     writel(0, &regs->DmaWriteLen);
0315     writel(0, &regs->DmaWriteLcl);
0316     writel(0, &regs->DmaWriteIPchecksum);
0317     writel(0, &regs->DmaReadLcl);
0318     writel(0, &regs->DmaReadIPchecksum);
0319     writel(0, &regs->PciState);
0320 #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
0321     writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, &regs->Mode);
0322 #elif (BITS_PER_LONG == 64)
0323     writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, &regs->Mode);
0324 #else
0325     writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, &regs->Mode);
0326 #endif
0327 
0328 #if 0
0329     /*
0330      * Don't worry, this is just black magic.
0331      */
0332     writel(0xdf000, &regs->RxBase);
0333     writel(0xdf000, &regs->RxPrd);
0334     writel(0xdf000, &regs->RxCon);
0335     writel(0xce000, &regs->TxBase);
0336     writel(0xce000, &regs->TxPrd);
0337     writel(0xce000, &regs->TxCon);
0338     writel(0, &regs->RxIndPro);
0339     writel(0, &regs->RxIndCon);
0340     writel(0, &regs->RxIndRef);
0341     writel(0, &regs->TxIndPro);
0342     writel(0, &regs->TxIndCon);
0343     writel(0, &regs->TxIndRef);
0344     writel(0xcc000, &regs->pad10[0]);
0345     writel(0, &regs->DrCmndPro);
0346     writel(0, &regs->DrCmndCon);
0347     writel(0, &regs->DwCmndPro);
0348     writel(0, &regs->DwCmndCon);
0349     writel(0, &regs->DwCmndRef);
0350     writel(0, &regs->DrDataPro);
0351     writel(0, &regs->DrDataCon);
0352     writel(0, &regs->DrDataRef);
0353     writel(0, &regs->DwDataPro);
0354     writel(0, &regs->DwDataCon);
0355     writel(0, &regs->DwDataRef);
0356 #endif
0357 
0358     writel(0xffffffff, &regs->MbEvent);
0359     writel(0, &regs->Event);
0360 
0361     writel(0, &regs->TxPi);
0362     writel(0, &regs->IpRxPi);
0363 
0364     writel(0, &regs->EvtCon);
0365     writel(0, &regs->EvtPrd);
0366 
0367     rrpriv->info->evt_ctrl.pi = 0;
0368 
0369     for (i = 0; i < CMD_RING_ENTRIES; i++)
0370         writel(0, &regs->CmdRing[i]);
0371 
0372 /*
0373  * Why 32 ? is this not cache line size dependent?
0374  */
0375     writel(RBURST_64|WBURST_64, &regs->PciState);
0376     wmb();
0377 
0378     start_pc = rr_read_eeprom_word(rrpriv,
0379             offsetof(struct eeprom, rncd_info.FwStart));
0380 
0381 #if (DEBUG > 1)
0382     printk("%s: Executing firmware at address 0x%06x\n",
0383            dev->name, start_pc);
0384 #endif
0385 
0386     writel(start_pc + 0x800, &regs->Pc);
0387     wmb();
0388     udelay(5);
0389 
0390     writel(start_pc, &regs->Pc);
0391     wmb();
0392 
0393     return 0;
0394 }
0395 
0396 
0397 /*
0398  * Read a string from the EEPROM.
0399  */
0400 static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
0401                 unsigned long offset,
0402                 unsigned char *buf,
0403                 unsigned long length)
0404 {
0405     struct rr_regs __iomem *regs = rrpriv->regs;
0406     u32 misc, io, host, i;
0407 
0408     io = readl(&regs->ExtIo);
0409     writel(0, &regs->ExtIo);
0410     misc = readl(&regs->LocalCtrl);
0411     writel(0, &regs->LocalCtrl);
0412     host = readl(&regs->HostCtrl);
0413     writel(host | HALT_NIC, &regs->HostCtrl);
0414     mb();
0415 
0416     for (i = 0; i < length; i++){
0417         writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
0418         mb();
0419         buf[i] = (readl(&regs->WinData) >> 24) & 0xff;
0420         mb();
0421     }
0422 
0423     writel(host, &regs->HostCtrl);
0424     writel(misc, &regs->LocalCtrl);
0425     writel(io, &regs->ExtIo);
0426     mb();
0427     return i;
0428 }
0429 
0430 
0431 /*
0432  * Shortcut to read one word (4 bytes) out of the EEPROM and convert
0433  * it to our CPU byte-order.
0434  */
0435 static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
0436                 size_t offset)
0437 {
0438     __be32 word;
0439 
0440     if ((rr_read_eeprom(rrpriv, offset,
0441                 (unsigned char *)&word, 4) == 4))
0442         return be32_to_cpu(word);
0443     return 0;
0444 }
0445 
0446 
0447 /*
0448  * Write a string to the EEPROM.
0449  *
0450  * This is only called when the firmware is not running.
0451  */
0452 static unsigned int write_eeprom(struct rr_private *rrpriv,
0453                  unsigned long offset,
0454                  unsigned char *buf,
0455                  unsigned long length)
0456 {
0457     struct rr_regs __iomem *regs = rrpriv->regs;
0458     u32 misc, io, data, i, j, ready, error = 0;
0459 
0460     io = readl(&regs->ExtIo);
0461     writel(0, &regs->ExtIo);
0462     misc = readl(&regs->LocalCtrl);
0463     writel(ENABLE_EEPROM_WRITE, &regs->LocalCtrl);
0464     mb();
0465 
0466     for (i = 0; i < length; i++){
0467         writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
0468         mb();
0469         data = buf[i] << 24;
0470         /*
0471          * Only try to write the data if it is not the same
0472          * value already.
0473          */
0474         if ((readl(&regs->WinData) & 0xff000000) != data){
0475             writel(data, &regs->WinData);
0476             ready = 0;
0477             j = 0;
0478             mb();
0479             while(!ready){
0480                 udelay(20);
0481                 if ((readl(&regs->WinData) & 0xff000000) ==
0482                     data)
0483                     ready = 1;
0484                 mb();
0485                 if (j++ > 5000){
0486                     printk("data mismatch: %08x, "
0487                            "WinData %08x\n", data,
0488                            readl(&regs->WinData));
0489                     ready = 1;
0490                     error = 1;
0491                 }
0492             }
0493         }
0494     }
0495 
0496     writel(misc, &regs->LocalCtrl);
0497     writel(io, &regs->ExtIo);
0498     mb();
0499 
0500     return error;
0501 }
0502 
0503 
0504 static int rr_init(struct net_device *dev)
0505 {
0506     u8 addr[HIPPI_ALEN] __aligned(4);
0507     struct rr_private *rrpriv;
0508     struct rr_regs __iomem *regs;
0509     u32 sram_size, rev;
0510 
0511     rrpriv = netdev_priv(dev);
0512     regs = rrpriv->regs;
0513 
0514     rev = readl(&regs->FwRev);
0515     rrpriv->fw_rev = rev;
0516     if (rev > 0x00020024)
0517         printk("  Firmware revision: %i.%i.%i\n", (rev >> 16),
0518                ((rev >> 8) & 0xff), (rev & 0xff));
0519     else if (rev >= 0x00020000) {
0520         printk("  Firmware revision: %i.%i.%i (2.0.37 or "
0521                "later is recommended)\n", (rev >> 16),
0522                ((rev >> 8) & 0xff), (rev & 0xff));
0523     }else{
0524         printk("  Firmware revision too old: %i.%i.%i, please "
0525                "upgrade to 2.0.37 or later.\n",
0526                (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
0527     }
0528 
0529 #if (DEBUG > 2)
0530     printk("  Maximum receive rings %i\n", readl(&regs->MaxRxRng));
0531 #endif
0532 
0533     /*
0534      * Read the hardware address from the eeprom.  The HW address
0535      * is not really necessary for HIPPI but awfully convenient.
0536      * The pointer arithmetic to put it in dev_addr is ugly, but
0537      * Donald Becker does it this way for the GigE version of this
0538      * card and it's shorter and more portable than any
0539      * other method I've seen.  -VAL
0540      */
0541 
0542     *(__be16 *)(addr) =
0543       htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
0544     *(__be32 *)(addr+2) =
0545       htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
0546     dev_addr_set(dev, addr);
0547 
0548     printk("  MAC: %pM\n", dev->dev_addr);
0549 
0550     sram_size = rr_read_eeprom_word(rrpriv, 8);
0551     printk("  SRAM size 0x%06x\n", sram_size);
0552 
0553     return 0;
0554 }
0555 
0556 
0557 static int rr_init1(struct net_device *dev)
0558 {
0559     struct rr_private *rrpriv;
0560     struct rr_regs __iomem *regs;
0561     unsigned long myjif, flags;
0562     struct cmd cmd;
0563     u32 hostctrl;
0564     int ecode = 0;
0565     short i;
0566 
0567     rrpriv = netdev_priv(dev);
0568     regs = rrpriv->regs;
0569 
0570     spin_lock_irqsave(&rrpriv->lock, flags);
0571 
0572     hostctrl = readl(&regs->HostCtrl);
0573     writel(hostctrl | HALT_NIC | RR_CLEAR_INT, &regs->HostCtrl);
0574     wmb();
0575 
0576     if (hostctrl & PARITY_ERR){
0577         printk("%s: Parity error halting NIC - this is serious!\n",
0578                dev->name);
0579         spin_unlock_irqrestore(&rrpriv->lock, flags);
0580         ecode = -EFAULT;
0581         goto error;
0582     }
0583 
0584     set_rxaddr(regs, rrpriv->rx_ctrl_dma);
0585     set_infoaddr(regs, rrpriv->info_dma);
0586 
0587     rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
0588     rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
0589     rrpriv->info->evt_ctrl.mode = 0;
0590     rrpriv->info->evt_ctrl.pi = 0;
0591     set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
0592 
0593     rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
0594     rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
0595     rrpriv->info->cmd_ctrl.mode = 0;
0596     rrpriv->info->cmd_ctrl.pi = 15;
0597 
0598     for (i = 0; i < CMD_RING_ENTRIES; i++) {
0599         writel(0, &regs->CmdRing[i]);
0600     }
0601 
0602     for (i = 0; i < TX_RING_ENTRIES; i++) {
0603         rrpriv->tx_ring[i].size = 0;
0604         set_rraddr(&rrpriv->tx_ring[i].addr, 0);
0605         rrpriv->tx_skbuff[i] = NULL;
0606     }
0607     rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
0608     rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
0609     rrpriv->info->tx_ctrl.mode = 0;
0610     rrpriv->info->tx_ctrl.pi = 0;
0611     set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
0612 
0613     /*
0614      * Set dirty_tx before we start receiving interrupts, otherwise
0615      * the interrupt handler might think it is supposed to process
0616      * tx ints before we are up and running, which may cause a null
0617      * pointer access in the int handler.
0618      */
0619     rrpriv->tx_full = 0;
0620     rrpriv->cur_rx = 0;
0621     rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
0622 
0623     rr_reset(dev);
0624 
0625     /* Tuning values */
0626     writel(0x5000, &regs->ConRetry);
0627     writel(0x100, &regs->ConRetryTmr);
0628     writel(0x500000, &regs->ConTmout);
0629     writel(0x60, &regs->IntrTmr);
0630     writel(0x500000, &regs->TxDataMvTimeout);
0631     writel(0x200000, &regs->RxDataMvTimeout);
0632     writel(0x80, &regs->WriteDmaThresh);
0633     writel(0x80, &regs->ReadDmaThresh);
0634 
0635     rrpriv->fw_running = 0;
0636     wmb();
0637 
0638     hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
0639     writel(hostctrl, &regs->HostCtrl);
0640     wmb();
0641 
0642     spin_unlock_irqrestore(&rrpriv->lock, flags);
0643 
0644     for (i = 0; i < RX_RING_ENTRIES; i++) {
0645         struct sk_buff *skb;
0646         dma_addr_t addr;
0647 
0648         rrpriv->rx_ring[i].mode = 0;
0649         skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
0650         if (!skb) {
0651             printk(KERN_WARNING "%s: Unable to allocate memory "
0652                    "for receive ring - halting NIC\n", dev->name);
0653             ecode = -ENOMEM;
0654             goto error;
0655         }
0656         rrpriv->rx_skbuff[i] = skb;
0657         addr = dma_map_single(&rrpriv->pci_dev->dev, skb->data,
0658                       dev->mtu + HIPPI_HLEN, DMA_FROM_DEVICE);
0659         /*
0660          * Sanity test to see if we conflict with the DMA
0661          * limitations of the Roadrunner.
0662          */
0663         if ((((unsigned long)skb->data) & 0xfff) > ~65320)
0664             printk("skb alloc error\n");
0665 
0666         set_rraddr(&rrpriv->rx_ring[i].addr, addr);
0667         rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
0668     }
0669 
0670     rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
0671     rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
0672     rrpriv->rx_ctrl[4].mode = 8;
0673     rrpriv->rx_ctrl[4].pi = 0;
0674     wmb();
0675     set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
0676 
0677     udelay(1000);
0678 
0679     /*
0680      * Now start the FirmWare.
0681      */
0682     cmd.code = C_START_FW;
0683     cmd.ring = 0;
0684     cmd.index = 0;
0685 
0686     rr_issue_cmd(rrpriv, &cmd);
0687 
0688     /*
0689      * Give the FirmWare time to chew on the `get running' command.
0690      */
0691     myjif = jiffies + 5 * HZ;
0692     while (time_before(jiffies, myjif) && !rrpriv->fw_running)
0693         cpu_relax();
0694 
0695     netif_start_queue(dev);
0696 
0697     return ecode;
0698 
0699  error:
0700     /*
0701      * We might have gotten here because we are out of memory,
0702      * make sure we release everything we allocated before failing
0703      */
0704     for (i = 0; i < RX_RING_ENTRIES; i++) {
0705         struct sk_buff *skb = rrpriv->rx_skbuff[i];
0706 
0707         if (skb) {
0708             dma_unmap_single(&rrpriv->pci_dev->dev,
0709                      rrpriv->rx_ring[i].addr.addrlo,
0710                      dev->mtu + HIPPI_HLEN,
0711                      DMA_FROM_DEVICE);
0712             rrpriv->rx_ring[i].size = 0;
0713             set_rraddr(&rrpriv->rx_ring[i].addr, 0);
0714             dev_kfree_skb(skb);
0715             rrpriv->rx_skbuff[i] = NULL;
0716         }
0717     }
0718     return ecode;
0719 }
0720 
0721 
0722 /*
0723  * All events are considered to be slow (RX/TX ints do not generate
0724  * events) and are handled here, outside the main interrupt handler,
0725  * to reduce the size of the handler.
0726  */
0727 static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
0728 {
0729     struct rr_private *rrpriv;
0730     struct rr_regs __iomem *regs;
0731     u32 tmp;
0732 
0733     rrpriv = netdev_priv(dev);
0734     regs = rrpriv->regs;
0735 
0736     while (prodidx != eidx){
0737         switch (rrpriv->evt_ring[eidx].code){
0738         case E_NIC_UP:
0739             tmp = readl(&regs->FwRev);
0740             printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
0741                    "up and running\n", dev->name,
0742                    (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
0743             rrpriv->fw_running = 1;
0744             writel(RX_RING_ENTRIES - 1, &regs->IpRxPi);
0745             wmb();
0746             break;
0747         case E_LINK_ON:
0748             printk(KERN_INFO "%s: Optical link ON\n", dev->name);
0749             break;
0750         case E_LINK_OFF:
0751             printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
0752             break;
0753         case E_RX_IDLE:
0754             printk(KERN_WARNING "%s: RX data not moving\n",
0755                    dev->name);
0756             goto drop;
0757         case E_WATCHDOG:
0758             printk(KERN_INFO "%s: The watchdog is here to see "
0759                    "us\n", dev->name);
0760             break;
0761         case E_INTERN_ERR:
0762             printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
0763                    dev->name);
0764             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0765                    &regs->HostCtrl);
0766             wmb();
0767             break;
0768         case E_HOST_ERR:
0769             printk(KERN_ERR "%s: Host software error\n",
0770                    dev->name);
0771             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0772                    &regs->HostCtrl);
0773             wmb();
0774             break;
0775         /*
0776          * TX events.
0777          */
0778         case E_CON_REJ:
0779             printk(KERN_WARNING "%s: Connection rejected\n",
0780                    dev->name);
0781             dev->stats.tx_aborted_errors++;
0782             break;
0783         case E_CON_TMOUT:
0784             printk(KERN_WARNING "%s: Connection timeout\n",
0785                    dev->name);
0786             break;
0787         case E_DISC_ERR:
0788             printk(KERN_WARNING "%s: HIPPI disconnect error\n",
0789                    dev->name);
0790             dev->stats.tx_aborted_errors++;
0791             break;
0792         case E_INT_PRTY:
0793             printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
0794                    dev->name);
0795             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0796                    &regs->HostCtrl);
0797             wmb();
0798             break;
0799         case E_TX_IDLE:
0800             printk(KERN_WARNING "%s: Transmitter idle\n",
0801                    dev->name);
0802             break;
0803         case E_TX_LINK_DROP:
0804             printk(KERN_WARNING "%s: Link lost during transmit\n",
0805                    dev->name);
0806             dev->stats.tx_aborted_errors++;
0807             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0808                    &regs->HostCtrl);
0809             wmb();
0810             break;
0811         case E_TX_INV_RNG:
0812             printk(KERN_ERR "%s: Invalid send ring block\n",
0813                    dev->name);
0814             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0815                    &regs->HostCtrl);
0816             wmb();
0817             break;
0818         case E_TX_INV_BUF:
0819             printk(KERN_ERR "%s: Invalid send buffer address\n",
0820                    dev->name);
0821             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0822                    &regs->HostCtrl);
0823             wmb();
0824             break;
0825         case E_TX_INV_DSC:
0826             printk(KERN_ERR "%s: Invalid descriptor address\n",
0827                    dev->name);
0828             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0829                    &regs->HostCtrl);
0830             wmb();
0831             break;
0832         /*
0833          * RX events.
0834          */
0835         case E_RX_RNG_OUT:
0836             printk(KERN_INFO "%s: Receive ring full\n", dev->name);
0837             break;
0838 
0839         case E_RX_PAR_ERR:
0840             printk(KERN_WARNING "%s: Receive parity error\n",
0841                    dev->name);
0842             goto drop;
0843         case E_RX_LLRC_ERR:
0844             printk(KERN_WARNING "%s: Receive LLRC error\n",
0845                    dev->name);
0846             goto drop;
0847         case E_PKT_LN_ERR:
0848             printk(KERN_WARNING "%s: Receive packet length "
0849                    "error\n", dev->name);
0850             goto drop;
0851         case E_DTA_CKSM_ERR:
0852             printk(KERN_WARNING "%s: Data checksum error\n",
0853                    dev->name);
0854             goto drop;
0855         case E_SHT_BST:
0856             printk(KERN_WARNING "%s: Unexpected short burst "
0857                    "error\n", dev->name);
0858             goto drop;
0859         case E_STATE_ERR:
0860             printk(KERN_WARNING "%s: Recv. state transition"
0861                    " error\n", dev->name);
0862             goto drop;
0863         case E_UNEXP_DATA:
0864             printk(KERN_WARNING "%s: Unexpected data error\n",
0865                    dev->name);
0866             goto drop;
0867         case E_LST_LNK_ERR:
0868             printk(KERN_WARNING "%s: Link lost error\n",
0869                    dev->name);
0870             goto drop;
0871         case E_FRM_ERR:
0872             printk(KERN_WARNING "%s: Framing Error\n",
0873                    dev->name);
0874             goto drop;
0875         case E_FLG_SYN_ERR:
0876             printk(KERN_WARNING "%s: Flag sync. lost during "
0877                    "packet\n", dev->name);
0878             goto drop;
0879         case E_RX_INV_BUF:
0880             printk(KERN_ERR "%s: Invalid receive buffer "
0881                    "address\n", dev->name);
0882             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0883                    &regs->HostCtrl);
0884             wmb();
0885             break;
0886         case E_RX_INV_DSC:
0887             printk(KERN_ERR "%s: Invalid receive descriptor "
0888                    "address\n", dev->name);
0889             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0890                    &regs->HostCtrl);
0891             wmb();
0892             break;
0893         case E_RNG_BLK:
0894             printk(KERN_ERR "%s: Invalid ring block\n",
0895                    dev->name);
0896             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
0897                    &regs->HostCtrl);
0898             wmb();
0899             break;
0900         drop:
0901             /* Label packet to be dropped.
0902              * Actual dropping occurs in rx
0903              * handling.
0904              *
0905              * The index of packet we get to drop is
0906              * the index of the packet following
0907              * the bad packet. -kbf
0908              */
0909             {
0910                 u16 index = rrpriv->evt_ring[eidx].index;
0911                 index = (index + (RX_RING_ENTRIES - 1)) %
0912                     RX_RING_ENTRIES;
0913                 rrpriv->rx_ring[index].mode |=
0914                     (PACKET_BAD | PACKET_END);
0915             }
0916             break;
0917         default:
0918             printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
0919                    dev->name, rrpriv->evt_ring[eidx].code);
0920         }
0921         eidx = (eidx + 1) % EVT_RING_ENTRIES;
0922     }
0923 
0924     rrpriv->info->evt_ctrl.pi = eidx;
0925     wmb();
0926     return eidx;
0927 }
0928 
0929 
0930 static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
0931 {
0932     struct rr_private *rrpriv = netdev_priv(dev);
0933     struct rr_regs __iomem *regs = rrpriv->regs;
0934 
0935     do {
0936         struct rx_desc *desc;
0937         u32 pkt_len;
0938 
0939         desc = &(rrpriv->rx_ring[index]);
0940         pkt_len = desc->size;
0941 #if (DEBUG > 2)
0942         printk("index %i, rxlimit %i\n", index, rxlimit);
0943         printk("len %x, mode %x\n", pkt_len, desc->mode);
0944 #endif
0945         if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
0946             dev->stats.rx_dropped++;
0947             goto defer;
0948         }
0949 
0950         if (pkt_len > 0){
0951             struct sk_buff *skb, *rx_skb;
0952 
0953             rx_skb = rrpriv->rx_skbuff[index];
0954 
0955             if (pkt_len < PKT_COPY_THRESHOLD) {
0956                 skb = alloc_skb(pkt_len, GFP_ATOMIC);
0957                 if (skb == NULL){
0958                     printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
0959                     dev->stats.rx_dropped++;
0960                     goto defer;
0961                 } else {
0962                     dma_sync_single_for_cpu(&rrpriv->pci_dev->dev,
0963                                 desc->addr.addrlo,
0964                                 pkt_len,
0965                                 DMA_FROM_DEVICE);
0966 
0967                     skb_put_data(skb, rx_skb->data,
0968                              pkt_len);
0969 
0970                     dma_sync_single_for_device(&rrpriv->pci_dev->dev,
0971                                    desc->addr.addrlo,
0972                                    pkt_len,
0973                                    DMA_FROM_DEVICE);
0974                 }
0975             }else{
0976                 struct sk_buff *newskb;
0977 
0978                 newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
0979                     GFP_ATOMIC);
0980                 if (newskb){
0981                     dma_addr_t addr;
0982 
0983                     dma_unmap_single(&rrpriv->pci_dev->dev,
0984                              desc->addr.addrlo,
0985                              dev->mtu + HIPPI_HLEN,
0986                              DMA_FROM_DEVICE);
0987                     skb = rx_skb;
0988                     skb_put(skb, pkt_len);
0989                     rrpriv->rx_skbuff[index] = newskb;
0990                     addr = dma_map_single(&rrpriv->pci_dev->dev,
0991                                   newskb->data,
0992                                   dev->mtu + HIPPI_HLEN,
0993                                   DMA_FROM_DEVICE);
0994                     set_rraddr(&desc->addr, addr);
0995                 } else {
0996                     printk("%s: Out of memory, deferring "
0997                            "packet\n", dev->name);
0998                     dev->stats.rx_dropped++;
0999                     goto defer;
1000                 }
1001             }
1002             skb->protocol = hippi_type_trans(skb, dev);
1003 
1004             netif_rx(skb);      /* send it up */
1005 
1006             dev->stats.rx_packets++;
1007             dev->stats.rx_bytes += pkt_len;
1008         }
1009     defer:
1010         desc->mode = 0;
1011         desc->size = dev->mtu + HIPPI_HLEN;
1012 
1013         if ((index & 7) == 7)
1014             writel(index, &regs->IpRxPi);
1015 
1016         index = (index + 1) % RX_RING_ENTRIES;
1017     } while(index != rxlimit);
1018 
1019     rrpriv->cur_rx = index;
1020     wmb();
1021 }
1022 
1023 
1024 static irqreturn_t rr_interrupt(int irq, void *dev_id)
1025 {
1026     struct rr_private *rrpriv;
1027     struct rr_regs __iomem *regs;
1028     struct net_device *dev = (struct net_device *)dev_id;
1029     u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
1030 
1031     rrpriv = netdev_priv(dev);
1032     regs = rrpriv->regs;
1033 
1034     if (!(readl(&regs->HostCtrl) & RR_INT))
1035         return IRQ_NONE;
1036 
1037     spin_lock(&rrpriv->lock);
1038 
1039     prodidx = readl(&regs->EvtPrd);
1040     txcsmr = (prodidx >> 8) & 0xff;
1041     rxlimit = (prodidx >> 16) & 0xff;
1042     prodidx &= 0xff;
1043 
1044 #if (DEBUG > 2)
1045     printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
1046            prodidx, rrpriv->info->evt_ctrl.pi);
1047 #endif
1048     /*
1049      * Order here is important.  We must handle events
1050      * before doing anything else in order to catch
1051      * such things as LLRC errors, etc -kbf
1052      */
1053 
1054     eidx = rrpriv->info->evt_ctrl.pi;
1055     if (prodidx != eidx)
1056         eidx = rr_handle_event(dev, prodidx, eidx);
1057 
1058     rxindex = rrpriv->cur_rx;
1059     if (rxindex != rxlimit)
1060         rx_int(dev, rxlimit, rxindex);
1061 
1062     txcon = rrpriv->dirty_tx;
1063     if (txcsmr != txcon) {
1064         do {
1065             /* Due to occational firmware TX producer/consumer out
1066              * of sync. error need to check entry in ring -kbf
1067              */
1068             if(rrpriv->tx_skbuff[txcon]){
1069                 struct tx_desc *desc;
1070                 struct sk_buff *skb;
1071 
1072                 desc = &(rrpriv->tx_ring[txcon]);
1073                 skb = rrpriv->tx_skbuff[txcon];
1074 
1075                 dev->stats.tx_packets++;
1076                 dev->stats.tx_bytes += skb->len;
1077 
1078                 dma_unmap_single(&rrpriv->pci_dev->dev,
1079                          desc->addr.addrlo, skb->len,
1080                          DMA_TO_DEVICE);
1081                 dev_kfree_skb_irq(skb);
1082 
1083                 rrpriv->tx_skbuff[txcon] = NULL;
1084                 desc->size = 0;
1085                 set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
1086                 desc->mode = 0;
1087             }
1088             txcon = (txcon + 1) % TX_RING_ENTRIES;
1089         } while (txcsmr != txcon);
1090         wmb();
1091 
1092         rrpriv->dirty_tx = txcon;
1093         if (rrpriv->tx_full && rr_if_busy(dev) &&
1094             (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
1095              != rrpriv->dirty_tx)){
1096             rrpriv->tx_full = 0;
1097             netif_wake_queue(dev);
1098         }
1099     }
1100 
1101     eidx |= ((txcsmr << 8) | (rxlimit << 16));
1102     writel(eidx, &regs->EvtCon);
1103     wmb();
1104 
1105     spin_unlock(&rrpriv->lock);
1106     return IRQ_HANDLED;
1107 }
1108 
1109 static inline void rr_raz_tx(struct rr_private *rrpriv,
1110                  struct net_device *dev)
1111 {
1112     int i;
1113 
1114     for (i = 0; i < TX_RING_ENTRIES; i++) {
1115         struct sk_buff *skb = rrpriv->tx_skbuff[i];
1116 
1117         if (skb) {
1118             struct tx_desc *desc = &(rrpriv->tx_ring[i]);
1119 
1120             dma_unmap_single(&rrpriv->pci_dev->dev,
1121                      desc->addr.addrlo, skb->len,
1122                      DMA_TO_DEVICE);
1123             desc->size = 0;
1124             set_rraddr(&desc->addr, 0);
1125             dev_kfree_skb(skb);
1126             rrpriv->tx_skbuff[i] = NULL;
1127         }
1128     }
1129 }
1130 
1131 
1132 static inline void rr_raz_rx(struct rr_private *rrpriv,
1133                  struct net_device *dev)
1134 {
1135     int i;
1136 
1137     for (i = 0; i < RX_RING_ENTRIES; i++) {
1138         struct sk_buff *skb = rrpriv->rx_skbuff[i];
1139 
1140         if (skb) {
1141             struct rx_desc *desc = &(rrpriv->rx_ring[i]);
1142 
1143             dma_unmap_single(&rrpriv->pci_dev->dev,
1144                      desc->addr.addrlo,
1145                      dev->mtu + HIPPI_HLEN,
1146                      DMA_FROM_DEVICE);
1147             desc->size = 0;
1148             set_rraddr(&desc->addr, 0);
1149             dev_kfree_skb(skb);
1150             rrpriv->rx_skbuff[i] = NULL;
1151         }
1152     }
1153 }
1154 
1155 static void rr_timer(struct timer_list *t)
1156 {
1157     struct rr_private *rrpriv = from_timer(rrpriv, t, timer);
1158     struct net_device *dev = pci_get_drvdata(rrpriv->pci_dev);
1159     struct rr_regs __iomem *regs = rrpriv->regs;
1160     unsigned long flags;
1161 
1162     if (readl(&regs->HostCtrl) & NIC_HALTED){
1163         printk("%s: Restarting nic\n", dev->name);
1164         memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
1165         memset(rrpriv->info, 0, sizeof(struct rr_info));
1166         wmb();
1167 
1168         rr_raz_tx(rrpriv, dev);
1169         rr_raz_rx(rrpriv, dev);
1170 
1171         if (rr_init1(dev)) {
1172             spin_lock_irqsave(&rrpriv->lock, flags);
1173             writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
1174                    &regs->HostCtrl);
1175             spin_unlock_irqrestore(&rrpriv->lock, flags);
1176         }
1177     }
1178     rrpriv->timer.expires = RUN_AT(5*HZ);
1179     add_timer(&rrpriv->timer);
1180 }
1181 
1182 
1183 static int rr_open(struct net_device *dev)
1184 {
1185     struct rr_private *rrpriv = netdev_priv(dev);
1186     struct pci_dev *pdev = rrpriv->pci_dev;
1187     struct rr_regs __iomem *regs;
1188     int ecode = 0;
1189     unsigned long flags;
1190     dma_addr_t dma_addr;
1191 
1192     regs = rrpriv->regs;
1193 
1194     if (rrpriv->fw_rev < 0x00020000) {
1195         printk(KERN_WARNING "%s: trying to configure device with "
1196                "obsolete firmware\n", dev->name);
1197         ecode = -EBUSY;
1198         goto error;
1199     }
1200 
1201     rrpriv->rx_ctrl = dma_alloc_coherent(&pdev->dev,
1202                          256 * sizeof(struct ring_ctrl),
1203                          &dma_addr, GFP_KERNEL);
1204     if (!rrpriv->rx_ctrl) {
1205         ecode = -ENOMEM;
1206         goto error;
1207     }
1208     rrpriv->rx_ctrl_dma = dma_addr;
1209 
1210     rrpriv->info = dma_alloc_coherent(&pdev->dev, sizeof(struct rr_info),
1211                       &dma_addr, GFP_KERNEL);
1212     if (!rrpriv->info) {
1213         ecode = -ENOMEM;
1214         goto error;
1215     }
1216     rrpriv->info_dma = dma_addr;
1217     wmb();
1218 
1219     spin_lock_irqsave(&rrpriv->lock, flags);
1220     writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
1221     readl(&regs->HostCtrl);
1222     spin_unlock_irqrestore(&rrpriv->lock, flags);
1223 
1224     if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
1225         printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1226                dev->name, pdev->irq);
1227         ecode = -EAGAIN;
1228         goto error;
1229     }
1230 
1231     if ((ecode = rr_init1(dev)))
1232         goto error;
1233 
1234     /* Set the timer to switch to check for link beat and perhaps switch
1235        to an alternate media type. */
1236     timer_setup(&rrpriv->timer, rr_timer, 0);
1237     rrpriv->timer.expires = RUN_AT(5*HZ);           /* 5 sec. watchdog */
1238     add_timer(&rrpriv->timer);
1239 
1240     netif_start_queue(dev);
1241 
1242     return ecode;
1243 
1244  error:
1245     spin_lock_irqsave(&rrpriv->lock, flags);
1246     writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
1247     spin_unlock_irqrestore(&rrpriv->lock, flags);
1248 
1249     if (rrpriv->info) {
1250         dma_free_coherent(&pdev->dev, sizeof(struct rr_info),
1251                   rrpriv->info, rrpriv->info_dma);
1252         rrpriv->info = NULL;
1253     }
1254     if (rrpriv->rx_ctrl) {
1255         dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
1256                   rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
1257         rrpriv->rx_ctrl = NULL;
1258     }
1259 
1260     netif_stop_queue(dev);
1261 
1262     return ecode;
1263 }
1264 
1265 
1266 static void rr_dump(struct net_device *dev)
1267 {
1268     struct rr_private *rrpriv;
1269     struct rr_regs __iomem *regs;
1270     u32 index, cons;
1271     short i;
1272     int len;
1273 
1274     rrpriv = netdev_priv(dev);
1275     regs = rrpriv->regs;
1276 
1277     printk("%s: dumping NIC TX rings\n", dev->name);
1278 
1279     printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
1280            readl(&regs->RxPrd), readl(&regs->TxPrd),
1281            readl(&regs->EvtPrd), readl(&regs->TxPi),
1282            rrpriv->info->tx_ctrl.pi);
1283 
1284     printk("Error code 0x%x\n", readl(&regs->Fail1));
1285 
1286     index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
1287     cons = rrpriv->dirty_tx;
1288     printk("TX ring index %i, TX consumer %i\n",
1289            index, cons);
1290 
1291     if (rrpriv->tx_skbuff[index]){
1292         len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
1293         printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
1294         for (i = 0; i < len; i++){
1295             if (!(i & 7))
1296                 printk("\n");
1297             printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
1298         }
1299         printk("\n");
1300     }
1301 
1302     if (rrpriv->tx_skbuff[cons]){
1303         len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
1304         printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
1305         printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n",
1306                rrpriv->tx_ring[cons].mode,
1307                rrpriv->tx_ring[cons].size,
1308                (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
1309                rrpriv->tx_skbuff[cons]->data,
1310                (unsigned int)rrpriv->tx_skbuff[cons]->truesize);
1311         for (i = 0; i < len; i++){
1312             if (!(i & 7))
1313                 printk("\n");
1314             printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
1315         }
1316         printk("\n");
1317     }
1318 
1319     printk("dumping TX ring info:\n");
1320     for (i = 0; i < TX_RING_ENTRIES; i++)
1321         printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
1322                rrpriv->tx_ring[i].mode,
1323                rrpriv->tx_ring[i].size,
1324                (unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
1325 
1326 }
1327 
1328 
1329 static int rr_close(struct net_device *dev)
1330 {
1331     struct rr_private *rrpriv = netdev_priv(dev);
1332     struct rr_regs __iomem *regs = rrpriv->regs;
1333     struct pci_dev *pdev = rrpriv->pci_dev;
1334     unsigned long flags;
1335     u32 tmp;
1336     short i;
1337 
1338     netif_stop_queue(dev);
1339 
1340 
1341     /*
1342      * Lock to make sure we are not cleaning up while another CPU
1343      * is handling interrupts.
1344      */
1345     spin_lock_irqsave(&rrpriv->lock, flags);
1346 
1347     tmp = readl(&regs->HostCtrl);
1348     if (tmp & NIC_HALTED){
1349         printk("%s: NIC already halted\n", dev->name);
1350         rr_dump(dev);
1351     }else{
1352         tmp |= HALT_NIC | RR_CLEAR_INT;
1353         writel(tmp, &regs->HostCtrl);
1354         readl(&regs->HostCtrl);
1355     }
1356 
1357     rrpriv->fw_running = 0;
1358 
1359     spin_unlock_irqrestore(&rrpriv->lock, flags);
1360     del_timer_sync(&rrpriv->timer);
1361     spin_lock_irqsave(&rrpriv->lock, flags);
1362 
1363     writel(0, &regs->TxPi);
1364     writel(0, &regs->IpRxPi);
1365 
1366     writel(0, &regs->EvtCon);
1367     writel(0, &regs->EvtPrd);
1368 
1369     for (i = 0; i < CMD_RING_ENTRIES; i++)
1370         writel(0, &regs->CmdRing[i]);
1371 
1372     rrpriv->info->tx_ctrl.entries = 0;
1373     rrpriv->info->cmd_ctrl.pi = 0;
1374     rrpriv->info->evt_ctrl.pi = 0;
1375     rrpriv->rx_ctrl[4].entries = 0;
1376 
1377     rr_raz_tx(rrpriv, dev);
1378     rr_raz_rx(rrpriv, dev);
1379 
1380     dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
1381               rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
1382     rrpriv->rx_ctrl = NULL;
1383 
1384     dma_free_coherent(&pdev->dev, sizeof(struct rr_info), rrpriv->info,
1385               rrpriv->info_dma);
1386     rrpriv->info = NULL;
1387 
1388     spin_unlock_irqrestore(&rrpriv->lock, flags);
1389     free_irq(pdev->irq, dev);
1390 
1391     return 0;
1392 }
1393 
1394 
1395 static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
1396                  struct net_device *dev)
1397 {
1398     struct rr_private *rrpriv = netdev_priv(dev);
1399     struct rr_regs __iomem *regs = rrpriv->regs;
1400     struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
1401     struct ring_ctrl *txctrl;
1402     unsigned long flags;
1403     u32 index, len = skb->len;
1404     u32 *ifield;
1405     struct sk_buff *new_skb;
1406 
1407     if (readl(&regs->Mode) & FATAL_ERR)
1408         printk("error codes Fail1 %02x, Fail2 %02x\n",
1409                readl(&regs->Fail1), readl(&regs->Fail2));
1410 
1411     /*
1412      * We probably need to deal with tbusy here to prevent overruns.
1413      */
1414 
1415     if (skb_headroom(skb) < 8){
1416         printk("incoming skb too small - reallocating\n");
1417         if (!(new_skb = dev_alloc_skb(len + 8))) {
1418             dev_kfree_skb(skb);
1419             netif_wake_queue(dev);
1420             return NETDEV_TX_OK;
1421         }
1422         skb_reserve(new_skb, 8);
1423         skb_put(new_skb, len);
1424         skb_copy_from_linear_data(skb, new_skb->data, len);
1425         dev_kfree_skb(skb);
1426         skb = new_skb;
1427     }
1428 
1429     ifield = skb_push(skb, 8);
1430 
1431     ifield[0] = 0;
1432     ifield[1] = hcb->ifield;
1433 
1434     /*
1435      * We don't need the lock before we are actually going to start
1436      * fiddling with the control blocks.
1437      */
1438     spin_lock_irqsave(&rrpriv->lock, flags);
1439 
1440     txctrl = &rrpriv->info->tx_ctrl;
1441 
1442     index = txctrl->pi;
1443 
1444     rrpriv->tx_skbuff[index] = skb;
1445     set_rraddr(&rrpriv->tx_ring[index].addr,
1446            dma_map_single(&rrpriv->pci_dev->dev, skb->data, len + 8, DMA_TO_DEVICE));
1447     rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
1448     rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
1449     txctrl->pi = (index + 1) % TX_RING_ENTRIES;
1450     wmb();
1451     writel(txctrl->pi, &regs->TxPi);
1452 
1453     if (txctrl->pi == rrpriv->dirty_tx){
1454         rrpriv->tx_full = 1;
1455         netif_stop_queue(dev);
1456     }
1457 
1458     spin_unlock_irqrestore(&rrpriv->lock, flags);
1459 
1460     return NETDEV_TX_OK;
1461 }
1462 
1463 
1464 /*
1465  * Read the firmware out of the EEPROM and put it into the SRAM
1466  * (or from user space - later)
1467  *
1468  * This operation requires the NIC to be halted and is performed with
1469  * interrupts disabled and with the spinlock hold.
1470  */
1471 static int rr_load_firmware(struct net_device *dev)
1472 {
1473     struct rr_private *rrpriv;
1474     struct rr_regs __iomem *regs;
1475     size_t eptr, segptr;
1476     int i, j;
1477     u32 localctrl, sptr, len, tmp;
1478     u32 p2len, p2size, nr_seg, revision, io, sram_size;
1479 
1480     rrpriv = netdev_priv(dev);
1481     regs = rrpriv->regs;
1482 
1483     if (dev->flags & IFF_UP)
1484         return -EBUSY;
1485 
1486     if (!(readl(&regs->HostCtrl) & NIC_HALTED)){
1487         printk("%s: Trying to load firmware to a running NIC.\n",
1488                dev->name);
1489         return -EBUSY;
1490     }
1491 
1492     localctrl = readl(&regs->LocalCtrl);
1493     writel(0, &regs->LocalCtrl);
1494 
1495     writel(0, &regs->EvtPrd);
1496     writel(0, &regs->RxPrd);
1497     writel(0, &regs->TxPrd);
1498 
1499     /*
1500      * First wipe the entire SRAM, otherwise we might run into all
1501      * kinds of trouble ... sigh, this took almost all afternoon
1502      * to track down ;-(
1503      */
1504     io = readl(&regs->ExtIo);
1505     writel(0, &regs->ExtIo);
1506     sram_size = rr_read_eeprom_word(rrpriv, 8);
1507 
1508     for (i = 200; i < sram_size / 4; i++){
1509         writel(i * 4, &regs->WinBase);
1510         mb();
1511         writel(0, &regs->WinData);
1512         mb();
1513     }
1514     writel(io, &regs->ExtIo);
1515     mb();
1516 
1517     eptr = rr_read_eeprom_word(rrpriv,
1518                offsetof(struct eeprom, rncd_info.AddrRunCodeSegs));
1519     eptr = ((eptr & 0x1fffff) >> 3);
1520 
1521     p2len = rr_read_eeprom_word(rrpriv, 0x83*4);
1522     p2len = (p2len << 2);
1523     p2size = rr_read_eeprom_word(rrpriv, 0x84*4);
1524     p2size = ((p2size & 0x1fffff) >> 3);
1525 
1526     if ((eptr < p2size) || (eptr > (p2size + p2len))){
1527         printk("%s: eptr is invalid\n", dev->name);
1528         goto out;
1529     }
1530 
1531     revision = rr_read_eeprom_word(rrpriv,
1532             offsetof(struct eeprom, manf.HeaderFmt));
1533 
1534     if (revision != 1){
1535         printk("%s: invalid firmware format (%i)\n",
1536                dev->name, revision);
1537         goto out;
1538     }
1539 
1540     nr_seg = rr_read_eeprom_word(rrpriv, eptr);
1541     eptr +=4;
1542 #if (DEBUG > 1)
1543     printk("%s: nr_seg %i\n", dev->name, nr_seg);
1544 #endif
1545 
1546     for (i = 0; i < nr_seg; i++){
1547         sptr = rr_read_eeprom_word(rrpriv, eptr);
1548         eptr += 4;
1549         len = rr_read_eeprom_word(rrpriv, eptr);
1550         eptr += 4;
1551         segptr = rr_read_eeprom_word(rrpriv, eptr);
1552         segptr = ((segptr & 0x1fffff) >> 3);
1553         eptr += 4;
1554 #if (DEBUG > 1)
1555         printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
1556                dev->name, i, sptr, len, segptr);
1557 #endif
1558         for (j = 0; j < len; j++){
1559             tmp = rr_read_eeprom_word(rrpriv, segptr);
1560             writel(sptr, &regs->WinBase);
1561             mb();
1562             writel(tmp, &regs->WinData);
1563             mb();
1564             segptr += 4;
1565             sptr += 4;
1566         }
1567     }
1568 
1569 out:
1570     writel(localctrl, &regs->LocalCtrl);
1571     mb();
1572     return 0;
1573 }
1574 
1575 
1576 static int rr_siocdevprivate(struct net_device *dev, struct ifreq *rq,
1577                  void __user *data, int cmd)
1578 {
1579     struct rr_private *rrpriv;
1580     unsigned char *image, *oldimage;
1581     unsigned long flags;
1582     unsigned int i;
1583     int error = -EOPNOTSUPP;
1584 
1585     rrpriv = netdev_priv(dev);
1586 
1587     switch(cmd){
1588     case SIOCRRGFW:
1589         if (!capable(CAP_SYS_RAWIO)){
1590             return -EPERM;
1591         }
1592 
1593         image = kmalloc_array(EEPROM_WORDS, sizeof(u32), GFP_KERNEL);
1594         if (!image)
1595             return -ENOMEM;
1596 
1597         if (rrpriv->fw_running){
1598             printk("%s: Firmware already running\n", dev->name);
1599             error = -EPERM;
1600             goto gf_out;
1601         }
1602 
1603         spin_lock_irqsave(&rrpriv->lock, flags);
1604         i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
1605         spin_unlock_irqrestore(&rrpriv->lock, flags);
1606         if (i != EEPROM_BYTES){
1607             printk(KERN_ERR "%s: Error reading EEPROM\n",
1608                    dev->name);
1609             error = -EFAULT;
1610             goto gf_out;
1611         }
1612         error = copy_to_user(data, image, EEPROM_BYTES);
1613         if (error)
1614             error = -EFAULT;
1615     gf_out:
1616         kfree(image);
1617         return error;
1618 
1619     case SIOCRRPFW:
1620         if (!capable(CAP_SYS_RAWIO)){
1621             return -EPERM;
1622         }
1623 
1624         image = memdup_user(data, EEPROM_BYTES);
1625         if (IS_ERR(image))
1626             return PTR_ERR(image);
1627 
1628         oldimage = kmalloc(EEPROM_BYTES, GFP_KERNEL);
1629         if (!oldimage) {
1630             kfree(image);
1631             return -ENOMEM;
1632         }
1633 
1634         if (rrpriv->fw_running){
1635             printk("%s: Firmware already running\n", dev->name);
1636             error = -EPERM;
1637             goto wf_out;
1638         }
1639 
1640         printk("%s: Updating EEPROM firmware\n", dev->name);
1641 
1642         spin_lock_irqsave(&rrpriv->lock, flags);
1643         error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
1644         if (error)
1645             printk(KERN_ERR "%s: Error writing EEPROM\n",
1646                    dev->name);
1647 
1648         i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
1649         spin_unlock_irqrestore(&rrpriv->lock, flags);
1650 
1651         if (i != EEPROM_BYTES)
1652             printk(KERN_ERR "%s: Error reading back EEPROM "
1653                    "image\n", dev->name);
1654 
1655         error = memcmp(image, oldimage, EEPROM_BYTES);
1656         if (error){
1657             printk(KERN_ERR "%s: Error verifying EEPROM image\n",
1658                    dev->name);
1659             error = -EFAULT;
1660         }
1661     wf_out:
1662         kfree(oldimage);
1663         kfree(image);
1664         return error;
1665 
1666     case SIOCRRID:
1667         return put_user(0x52523032, (int __user *)data);
1668     default:
1669         return error;
1670     }
1671 }
1672 
1673 static const struct pci_device_id rr_pci_tbl[] = {
1674     { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
1675         PCI_ANY_ID, PCI_ANY_ID, },
1676     { 0,}
1677 };
1678 MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
1679 
1680 static struct pci_driver rr_driver = {
1681     .name       = "rrunner",
1682     .id_table   = rr_pci_tbl,
1683     .probe      = rr_init_one,
1684     .remove     = rr_remove_one,
1685 };
1686 
1687 module_pci_driver(rr_driver);