Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  *  linux/drivers/message/fusion/mptlan.c
0003  *      IP Over Fibre Channel device driver.
0004  *      For use with LSI Fibre Channel PCI chip/adapters
0005  *      running LSI Fusion MPT (Message Passing Technology) firmware.
0006  *
0007  *  Copyright (c) 2000-2008 LSI Corporation
0008  *  (mailto:DL-MPTFusionLinux@lsi.com)
0009  *
0010  */
0011 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0012 /*
0013     This program is free software; you can redistribute it and/or modify
0014     it under the terms of the GNU General Public License as published by
0015     the Free Software Foundation; version 2 of the License.
0016 
0017     This program is distributed in the hope that it will be useful,
0018     but WITHOUT ANY WARRANTY; without even the implied warranty of
0019     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0020     GNU General Public License for more details.
0021 
0022     NO WARRANTY
0023     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
0024     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
0025     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
0026     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
0027     solely responsible for determining the appropriateness of using and
0028     distributing the Program and assumes all risks associated with its
0029     exercise of rights under this Agreement, including but not limited to
0030     the risks and costs of program errors, damage to or loss of data,
0031     programs or equipment, and unavailability or interruption of operations.
0032 
0033     DISCLAIMER OF LIABILITY
0034     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
0035     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0036     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
0037     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
0038     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
0039     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
0040     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
0041 
0042     You should have received a copy of the GNU General Public License
0043     along with this program; if not, write to the Free Software
0044     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
0045 */
0046 
0047 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0048 /*
0049  * Define statements used for debugging
0050  */
0051 //#define MPT_LAN_IO_DEBUG
0052 
0053 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0054 
0055 #include "mptlan.h"
0056 #include <linux/init.h>
0057 #include <linux/module.h>
0058 #include <linux/fs.h>
0059 #include <linux/sched.h>
0060 #include <linux/slab.h>
0061 
0062 #define my_VERSION  MPT_LINUX_VERSION_COMMON
0063 #define MYNAM       "mptlan"
0064 
0065 MODULE_LICENSE("GPL");
0066 MODULE_VERSION(my_VERSION);
0067 
0068 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0069 /*
0070  * MPT LAN message sizes without variable part.
0071  */
0072 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
0073     (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
0074 
0075 /*
0076  *  Fusion MPT LAN private structures
0077  */
0078 
0079 struct BufferControl {
0080     struct sk_buff  *skb;
0081     dma_addr_t  dma;
0082     unsigned int    len;
0083 };
0084 
0085 struct mpt_lan_priv {
0086     MPT_ADAPTER *mpt_dev;
0087     u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
0088 
0089     atomic_t buckets_out;       /* number of unused buckets on IOC */
0090     int bucketthresh;       /* Send more when this many left */
0091 
0092     int *mpt_txfidx; /* Free Tx Context list */
0093     int mpt_txfidx_tail;
0094     spinlock_t txfidx_lock;
0095 
0096     int *mpt_rxfidx; /* Free Rx Context list */
0097     int mpt_rxfidx_tail;
0098     spinlock_t rxfidx_lock;
0099 
0100     struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
0101     struct BufferControl *SendCtl;  /* Send BufferControl structs */
0102 
0103     int max_buckets_out;        /* Max buckets to send to IOC */
0104     int tx_max_out;         /* IOC's Tx queue len */
0105 
0106     u32 total_posted;
0107     u32 total_received;
0108 
0109     struct delayed_work post_buckets_task;
0110     struct net_device *dev;
0111     unsigned long post_buckets_active;
0112 };
0113 
0114 struct mpt_lan_ohdr {
0115     u16 dtype;
0116     u8  daddr[FC_ALEN];
0117     u16 stype;
0118     u8  saddr[FC_ALEN];
0119 };
0120 
0121 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0122 
0123 /*
0124  *  Forward protos...
0125  */
0126 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
0127                MPT_FRAME_HDR *reply);
0128 static int  mpt_lan_open(struct net_device *dev);
0129 static int  mpt_lan_reset(struct net_device *dev);
0130 static int  mpt_lan_close(struct net_device *dev);
0131 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
0132 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
0133                        int priority);
0134 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
0135 static int  mpt_lan_receive_post_reply(struct net_device *dev,
0136                        LANReceivePostReply_t *pRecvRep);
0137 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
0138 static int  mpt_lan_send_reply(struct net_device *dev,
0139                    LANSendReply_t *pSendRep);
0140 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
0141 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
0142 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
0143                      struct net_device *dev);
0144 
0145 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0146 /*
0147  *  Fusion MPT LAN private data
0148  */
0149 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
0150 
0151 static u32 max_buckets_out = 127;
0152 static u32 tx_max_out_p = 127 - 16;
0153 
0154 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0155 /**
0156  *  lan_reply - Handle all data sent from the hardware.
0157  *  @ioc: Pointer to MPT_ADAPTER structure
0158  *  @mf: Pointer to original MPT request frame (NULL if TurboReply)
0159  *  @reply: Pointer to MPT reply frame
0160  *
0161  *  Returns 1 indicating original alloc'd request frame ptr
0162  *  should be freed, or 0 if it shouldn't.
0163  */
0164 static int
0165 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
0166 {
0167     struct net_device *dev = ioc->netdev;
0168     int FreeReqFrame = 0;
0169 
0170     dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
0171           IOC_AND_NETDEV_NAMES_s_s(dev)));
0172 
0173 //  dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
0174 //          mf, reply));
0175 
0176     if (mf == NULL) {
0177         u32 tmsg = CAST_PTR_TO_U32(reply);
0178 
0179         dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
0180                 IOC_AND_NETDEV_NAMES_s_s(dev),
0181                 tmsg));
0182 
0183         switch (GET_LAN_FORM(tmsg)) {
0184 
0185         // NOTE!  (Optimization) First case here is now caught in
0186         //  mptbase.c::mpt_interrupt() routine and callcack here
0187         //  is now skipped for this case!
0188 #if 0
0189         case LAN_REPLY_FORM_MESSAGE_CONTEXT:
0190 //          dioprintk((KERN_INFO MYNAM "/lan_reply: "
0191 //                "MessageContext turbo reply received\n"));
0192             FreeReqFrame = 1;
0193             break;
0194 #endif
0195 
0196         case LAN_REPLY_FORM_SEND_SINGLE:
0197 //          dioprintk((MYNAM "/lan_reply: "
0198 //                "calling mpt_lan_send_reply (turbo)\n"));
0199 
0200             // Potential BUG here?
0201             //  FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
0202             //  If/when mpt_lan_send_turbo would return 1 here,
0203             //  calling routine (mptbase.c|mpt_interrupt)
0204             //  would Oops because mf has already been set
0205             //  to NULL.  So after return from this func,
0206             //  mpt_interrupt() will attempt to put (NULL) mf ptr
0207             //  item back onto its adapter FreeQ - Oops!:-(
0208             //  It's Ok, since mpt_lan_send_turbo() *currently*
0209             //  always returns 0, but..., just in case:
0210 
0211             (void) mpt_lan_send_turbo(dev, tmsg);
0212             FreeReqFrame = 0;
0213 
0214             break;
0215 
0216         case LAN_REPLY_FORM_RECEIVE_SINGLE:
0217 //          dioprintk((KERN_INFO MYNAM "@lan_reply: "
0218 //                "rcv-Turbo = %08x\n", tmsg));
0219             mpt_lan_receive_post_turbo(dev, tmsg);
0220             break;
0221 
0222         default:
0223             printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
0224                 "that I don't know what to do with\n");
0225 
0226             /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
0227 
0228             break;
0229         }
0230 
0231         return FreeReqFrame;
0232     }
0233 
0234 //  msg = (u32 *) reply;
0235 //  dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
0236 //        le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
0237 //        le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
0238 //  dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
0239 //        reply->u.hdr.Function));
0240 
0241     switch (reply->u.hdr.Function) {
0242 
0243     case MPI_FUNCTION_LAN_SEND:
0244     {
0245         LANSendReply_t *pSendRep;
0246 
0247         pSendRep = (LANSendReply_t *) reply;
0248         FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
0249         break;
0250     }
0251 
0252     case MPI_FUNCTION_LAN_RECEIVE:
0253     {
0254         LANReceivePostReply_t *pRecvRep;
0255 
0256         pRecvRep = (LANReceivePostReply_t *) reply;
0257         if (pRecvRep->NumberOfContexts) {
0258             mpt_lan_receive_post_reply(dev, pRecvRep);
0259             if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
0260                 FreeReqFrame = 1;
0261         } else
0262             dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
0263                   "ReceivePostReply received.\n"));
0264         break;
0265     }
0266 
0267     case MPI_FUNCTION_LAN_RESET:
0268         /* Just a default reply. Might want to check it to
0269          * make sure that everything went ok.
0270          */
0271         FreeReqFrame = 1;
0272         break;
0273 
0274     case MPI_FUNCTION_EVENT_NOTIFICATION:
0275     case MPI_FUNCTION_EVENT_ACK:
0276         /*  _EVENT_NOTIFICATION should NOT come down this path any more.
0277          *  Should be routed to mpt_lan_event_process(), but just in case...
0278          */
0279         FreeReqFrame = 1;
0280         break;
0281 
0282     default:
0283         printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
0284             "reply that I don't know what to do with\n");
0285 
0286         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
0287         FreeReqFrame = 1;
0288 
0289         break;
0290     }
0291 
0292     return FreeReqFrame;
0293 }
0294 
0295 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0296 static int
0297 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
0298 {
0299     struct net_device *dev = ioc->netdev;
0300     struct mpt_lan_priv *priv;
0301 
0302     if (dev == NULL)
0303         return(1);
0304     else
0305         priv = netdev_priv(dev);
0306 
0307     dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
0308             reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
0309             reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
0310 
0311     if (priv->mpt_rxfidx == NULL)
0312         return (1);
0313 
0314     if (reset_phase == MPT_IOC_SETUP_RESET) {
0315         ;
0316     } else if (reset_phase == MPT_IOC_PRE_RESET) {
0317         int i;
0318         unsigned long flags;
0319 
0320         netif_stop_queue(dev);
0321 
0322         dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
0323 
0324         atomic_set(&priv->buckets_out, 0);
0325 
0326         /* Reset Rx Free Tail index and re-populate the queue. */
0327         spin_lock_irqsave(&priv->rxfidx_lock, flags);
0328         priv->mpt_rxfidx_tail = -1;
0329         for (i = 0; i < priv->max_buckets_out; i++)
0330             priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
0331         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
0332     } else {
0333         mpt_lan_post_receive_buckets(priv);
0334         netif_wake_queue(dev);
0335     }
0336 
0337     return 1;
0338 }
0339 
0340 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0341 static int
0342 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
0343 {
0344     dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
0345 
0346     switch (le32_to_cpu(pEvReply->Event)) {
0347     case MPI_EVENT_NONE:                /* 00 */
0348     case MPI_EVENT_LOG_DATA:            /* 01 */
0349     case MPI_EVENT_STATE_CHANGE:            /* 02 */
0350     case MPI_EVENT_UNIT_ATTENTION:          /* 03 */
0351     case MPI_EVENT_IOC_BUS_RESET:           /* 04 */
0352     case MPI_EVENT_EXT_BUS_RESET:           /* 05 */
0353     case MPI_EVENT_RESCAN:              /* 06 */
0354         /* Ok, do we need to do anything here? As far as
0355            I can tell, this is when a new device gets added
0356            to the loop. */
0357     case MPI_EVENT_LINK_STATUS_CHANGE:      /* 07 */
0358     case MPI_EVENT_LOOP_STATE_CHANGE:       /* 08 */
0359     case MPI_EVENT_LOGOUT:              /* 09 */
0360     case MPI_EVENT_EVENT_CHANGE:            /* 0A */
0361     default:
0362         break;
0363     }
0364 
0365     /*
0366      *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
0367      *  Do NOT do it here now!
0368      */
0369 
0370     return 1;
0371 }
0372 
0373 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0374 static int
0375 mpt_lan_open(struct net_device *dev)
0376 {
0377     struct mpt_lan_priv *priv = netdev_priv(dev);
0378     int i;
0379 
0380     if (mpt_lan_reset(dev) != 0) {
0381         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0382 
0383         printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
0384 
0385         if (mpt_dev->active)
0386             printk ("The ioc is active. Perhaps it needs to be"
0387                 " reset?\n");
0388         else
0389             printk ("The ioc in inactive, most likely in the "
0390                 "process of being reset. Please try again in "
0391                 "a moment.\n");
0392     }
0393 
0394     priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
0395                      GFP_KERNEL);
0396     if (priv->mpt_txfidx == NULL)
0397         goto out;
0398     priv->mpt_txfidx_tail = -1;
0399 
0400     priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
0401                 GFP_KERNEL);
0402     if (priv->SendCtl == NULL)
0403         goto out_mpt_txfidx;
0404     for (i = 0; i < priv->tx_max_out; i++)
0405         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
0406 
0407     dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
0408 
0409     priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
0410                      GFP_KERNEL);
0411     if (priv->mpt_rxfidx == NULL)
0412         goto out_SendCtl;
0413     priv->mpt_rxfidx_tail = -1;
0414 
0415     priv->RcvCtl = kcalloc(priv->max_buckets_out,
0416                    sizeof(struct BufferControl),
0417                    GFP_KERNEL);
0418     if (priv->RcvCtl == NULL)
0419         goto out_mpt_rxfidx;
0420     for (i = 0; i < priv->max_buckets_out; i++)
0421         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
0422 
0423 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
0424 /**/    for (i = 0; i < priv->tx_max_out; i++)
0425 /**/        dlprintk((" %xh", priv->mpt_txfidx[i]));
0426 /**/    dlprintk(("\n"));
0427 
0428     dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
0429 
0430     mpt_lan_post_receive_buckets(priv);
0431     printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
0432             IOC_AND_NETDEV_NAMES_s_s(dev));
0433 
0434     if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
0435         printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
0436             " Notifications. This is a bad thing! We're not going "
0437             "to go ahead, but I'd be leery of system stability at "
0438             "this point.\n");
0439     }
0440 
0441     netif_start_queue(dev);
0442     dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
0443 
0444     return 0;
0445 out_mpt_rxfidx:
0446     kfree(priv->mpt_rxfidx);
0447     priv->mpt_rxfidx = NULL;
0448 out_SendCtl:
0449     kfree(priv->SendCtl);
0450     priv->SendCtl = NULL;
0451 out_mpt_txfidx:
0452     kfree(priv->mpt_txfidx);
0453     priv->mpt_txfidx = NULL;
0454 out:    return -ENOMEM;
0455 }
0456 
0457 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0458 /* Send a LanReset message to the FW. This should result in the FW returning
0459    any buckets it still has. */
0460 static int
0461 mpt_lan_reset(struct net_device *dev)
0462 {
0463     MPT_FRAME_HDR *mf;
0464     LANResetRequest_t *pResetReq;
0465     struct mpt_lan_priv *priv = netdev_priv(dev);
0466 
0467     mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
0468 
0469     if (mf == NULL) {
0470 /*      dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
0471         "Unable to allocate a request frame.\n"));
0472 */
0473         return -1;
0474     }
0475 
0476     pResetReq = (LANResetRequest_t *) mf;
0477 
0478     pResetReq->Function = MPI_FUNCTION_LAN_RESET;
0479     pResetReq->ChainOffset  = 0;
0480     pResetReq->Reserved = 0;
0481     pResetReq->PortNumber   = priv->pnum;
0482     pResetReq->MsgFlags = 0;
0483     pResetReq->Reserved2    = 0;
0484 
0485     mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
0486 
0487     return 0;
0488 }
0489 
0490 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0491 static int
0492 mpt_lan_close(struct net_device *dev)
0493 {
0494     struct mpt_lan_priv *priv = netdev_priv(dev);
0495     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0496     unsigned long timeout;
0497     int i;
0498 
0499     dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
0500 
0501     mpt_event_deregister(LanCtx);
0502 
0503     dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
0504           "since driver was loaded, %d still out\n",
0505           priv->total_posted,atomic_read(&priv->buckets_out)));
0506 
0507     netif_stop_queue(dev);
0508 
0509     mpt_lan_reset(dev);
0510 
0511     timeout = jiffies + 2 * HZ;
0512     while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
0513         schedule_timeout_interruptible(1);
0514 
0515     for (i = 0; i < priv->max_buckets_out; i++) {
0516         if (priv->RcvCtl[i].skb != NULL) {
0517 /**/            dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
0518 /**/                  "is still out\n", i));
0519             dma_unmap_single(&mpt_dev->pcidev->dev,
0520                      priv->RcvCtl[i].dma,
0521                      priv->RcvCtl[i].len, DMA_FROM_DEVICE);
0522             dev_kfree_skb(priv->RcvCtl[i].skb);
0523         }
0524     }
0525 
0526     kfree(priv->RcvCtl);
0527     kfree(priv->mpt_rxfidx);
0528 
0529     for (i = 0; i < priv->tx_max_out; i++) {
0530         if (priv->SendCtl[i].skb != NULL) {
0531             dma_unmap_single(&mpt_dev->pcidev->dev,
0532                      priv->SendCtl[i].dma,
0533                      priv->SendCtl[i].len, DMA_TO_DEVICE);
0534             dev_kfree_skb(priv->SendCtl[i].skb);
0535         }
0536     }
0537 
0538     kfree(priv->SendCtl);
0539     kfree(priv->mpt_txfidx);
0540 
0541     atomic_set(&priv->buckets_out, 0);
0542 
0543     printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
0544             IOC_AND_NETDEV_NAMES_s_s(dev));
0545 
0546     return 0;
0547 }
0548 
0549 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0550 /* Tx timeout handler. */
0551 static void
0552 mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
0553 {
0554     struct mpt_lan_priv *priv = netdev_priv(dev);
0555     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0556 
0557     if (mpt_dev->active) {
0558         dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
0559         netif_wake_queue(dev);
0560     }
0561 }
0562 
0563 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0564 //static inline int
0565 static int
0566 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
0567 {
0568     struct mpt_lan_priv *priv = netdev_priv(dev);
0569     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0570     struct sk_buff *sent;
0571     unsigned long flags;
0572     u32 ctx;
0573 
0574     ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
0575     sent = priv->SendCtl[ctx].skb;
0576 
0577     dev->stats.tx_packets++;
0578     dev->stats.tx_bytes += sent->len;
0579 
0580     dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
0581             IOC_AND_NETDEV_NAMES_s_s(dev),
0582             __func__, sent));
0583 
0584     priv->SendCtl[ctx].skb = NULL;
0585     dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
0586              priv->SendCtl[ctx].len, DMA_TO_DEVICE);
0587     dev_kfree_skb_irq(sent);
0588 
0589     spin_lock_irqsave(&priv->txfidx_lock, flags);
0590     priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
0591     spin_unlock_irqrestore(&priv->txfidx_lock, flags);
0592 
0593     netif_wake_queue(dev);
0594     return 0;
0595 }
0596 
0597 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0598 static int
0599 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
0600 {
0601     struct mpt_lan_priv *priv = netdev_priv(dev);
0602     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0603     struct sk_buff *sent;
0604     unsigned long flags;
0605     int FreeReqFrame = 0;
0606     u32 *pContext;
0607     u32 ctx;
0608     u8 count;
0609 
0610     count = pSendRep->NumberOfContexts;
0611 
0612     dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
0613          le16_to_cpu(pSendRep->IOCStatus)));
0614 
0615     /* Add check for Loginfo Flag in IOCStatus */
0616 
0617     switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
0618     case MPI_IOCSTATUS_SUCCESS:
0619         dev->stats.tx_packets += count;
0620         break;
0621 
0622     case MPI_IOCSTATUS_LAN_CANCELED:
0623     case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
0624         break;
0625 
0626     case MPI_IOCSTATUS_INVALID_SGL:
0627         dev->stats.tx_errors += count;
0628         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
0629                 IOC_AND_NETDEV_NAMES_s_s(dev));
0630         goto out;
0631 
0632     default:
0633         dev->stats.tx_errors += count;
0634         break;
0635     }
0636 
0637     pContext = &pSendRep->BufferContext;
0638 
0639     spin_lock_irqsave(&priv->txfidx_lock, flags);
0640     while (count > 0) {
0641         ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
0642 
0643         sent = priv->SendCtl[ctx].skb;
0644         dev->stats.tx_bytes += sent->len;
0645 
0646         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
0647                 IOC_AND_NETDEV_NAMES_s_s(dev),
0648                 __func__, sent));
0649 
0650         priv->SendCtl[ctx].skb = NULL;
0651         dma_unmap_single(&mpt_dev->pcidev->dev,
0652                  priv->SendCtl[ctx].dma,
0653                  priv->SendCtl[ctx].len, DMA_TO_DEVICE);
0654         dev_kfree_skb_irq(sent);
0655 
0656         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
0657 
0658         pContext++;
0659         count--;
0660     }
0661     spin_unlock_irqrestore(&priv->txfidx_lock, flags);
0662 
0663 out:
0664     if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
0665         FreeReqFrame = 1;
0666 
0667     netif_wake_queue(dev);
0668     return FreeReqFrame;
0669 }
0670 
0671 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0672 static netdev_tx_t
0673 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
0674 {
0675     struct mpt_lan_priv *priv = netdev_priv(dev);
0676     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0677     MPT_FRAME_HDR *mf;
0678     LANSendRequest_t *pSendReq;
0679     SGETransaction32_t *pTrans;
0680     SGESimple64_t *pSimple;
0681     const unsigned char *mac;
0682     dma_addr_t dma;
0683     unsigned long flags;
0684     int ctx;
0685     u16 cur_naa = 0x1000;
0686 
0687     dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
0688             __func__, skb));
0689 
0690     spin_lock_irqsave(&priv->txfidx_lock, flags);
0691     if (priv->mpt_txfidx_tail < 0) {
0692         netif_stop_queue(dev);
0693         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
0694 
0695         printk (KERN_ERR "%s: no tx context available: %u\n",
0696             __func__, priv->mpt_txfidx_tail);
0697         return NETDEV_TX_BUSY;
0698     }
0699 
0700     mf = mpt_get_msg_frame(LanCtx, mpt_dev);
0701     if (mf == NULL) {
0702         netif_stop_queue(dev);
0703         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
0704 
0705         printk (KERN_ERR "%s: Unable to alloc request frame\n",
0706             __func__);
0707         return NETDEV_TX_BUSY;
0708     }
0709 
0710     ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
0711     spin_unlock_irqrestore(&priv->txfidx_lock, flags);
0712 
0713 //  dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
0714 //          IOC_AND_NETDEV_NAMES_s_s(dev)));
0715 
0716     pSendReq = (LANSendRequest_t *) mf;
0717 
0718     /* Set the mac.raw pointer, since this apparently isn't getting
0719      * done before we get the skb. Pull the data pointer past the mac data.
0720      */
0721     skb_reset_mac_header(skb);
0722     skb_pull(skb, 12);
0723 
0724     dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
0725                  DMA_TO_DEVICE);
0726 
0727     priv->SendCtl[ctx].skb = skb;
0728     priv->SendCtl[ctx].dma = dma;
0729     priv->SendCtl[ctx].len = skb->len;
0730 
0731     /* Message Header */
0732     pSendReq->Reserved    = 0;
0733     pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
0734     pSendReq->ChainOffset = 0;
0735     pSendReq->Reserved2   = 0;
0736     pSendReq->MsgFlags    = 0;
0737     pSendReq->PortNumber  = priv->pnum;
0738 
0739     /* Transaction Context Element */
0740     pTrans = (SGETransaction32_t *) pSendReq->SG_List;
0741 
0742     /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
0743     pTrans->ContextSize   = sizeof(u32);
0744     pTrans->DetailsLength = 2 * sizeof(u32);
0745     pTrans->Flags         = 0;
0746     pTrans->TransactionContext = cpu_to_le32(ctx);
0747 
0748 //  dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
0749 //          IOC_AND_NETDEV_NAMES_s_s(dev),
0750 //          ctx, skb, skb->data));
0751 
0752     mac = skb_mac_header(skb);
0753 
0754     pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
0755                             (mac[0] <<  8) |
0756                             (mac[1] <<  0));
0757     pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
0758                             (mac[3] << 16) |
0759                             (mac[4] <<  8) |
0760                             (mac[5] <<  0));
0761 
0762     pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
0763 
0764     /* If we ever decide to send more than one Simple SGE per LANSend, then
0765        we will need to make sure that LAST_ELEMENT only gets set on the
0766        last one. Otherwise, bad voodoo and evil funkiness will commence. */
0767     pSimple->FlagsLength = cpu_to_le32(
0768             ((MPI_SGE_FLAGS_LAST_ELEMENT |
0769               MPI_SGE_FLAGS_END_OF_BUFFER |
0770               MPI_SGE_FLAGS_SIMPLE_ELEMENT |
0771               MPI_SGE_FLAGS_SYSTEM_ADDRESS |
0772               MPI_SGE_FLAGS_HOST_TO_IOC |
0773               MPI_SGE_FLAGS_64_BIT_ADDRESSING |
0774               MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
0775             skb->len);
0776     pSimple->Address.Low = cpu_to_le32((u32) dma);
0777     if (sizeof(dma_addr_t) > sizeof(u32))
0778         pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
0779     else
0780         pSimple->Address.High = 0;
0781 
0782     mpt_put_msg_frame (LanCtx, mpt_dev, mf);
0783     netif_trans_update(dev);
0784 
0785     dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
0786             IOC_AND_NETDEV_NAMES_s_s(dev),
0787             le32_to_cpu(pSimple->FlagsLength)));
0788 
0789     return NETDEV_TX_OK;
0790 }
0791 
0792 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0793 static void
0794 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
0795 /*
0796  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
0797  */
0798 {
0799     struct mpt_lan_priv *priv = netdev_priv(dev);
0800     
0801     if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
0802         if (priority) {
0803             schedule_delayed_work(&priv->post_buckets_task, 0);
0804         } else {
0805             schedule_delayed_work(&priv->post_buckets_task, 1);
0806             dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
0807                    "timer.\n"));
0808         }
0809             dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
0810                IOC_AND_NETDEV_NAMES_s_s(dev) ));
0811     }
0812 }
0813 
0814 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0815 static int
0816 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
0817 {
0818     struct mpt_lan_priv *priv = netdev_priv(dev);
0819 
0820     skb->protocol = mpt_lan_type_trans(skb, dev);
0821 
0822     dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
0823          "delivered to upper level.\n",
0824             IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
0825 
0826     dev->stats.rx_bytes += skb->len;
0827     dev->stats.rx_packets++;
0828 
0829     skb->dev = dev;
0830     netif_rx(skb);
0831 
0832     dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
0833          atomic_read(&priv->buckets_out)));
0834 
0835     if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
0836         mpt_lan_wake_post_buckets_task(dev, 1);
0837 
0838     dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
0839           "remaining, %d received back since sod\n",
0840           atomic_read(&priv->buckets_out), priv->total_received));
0841 
0842     return 0;
0843 }
0844 
0845 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0846 //static inline int
0847 static int
0848 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
0849 {
0850     struct mpt_lan_priv *priv = netdev_priv(dev);
0851     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0852     struct sk_buff *skb, *old_skb;
0853     unsigned long flags;
0854     u32 ctx, len;
0855 
0856     ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
0857     skb = priv->RcvCtl[ctx].skb;
0858 
0859     len = GET_LAN_PACKET_LENGTH(tmsg);
0860 
0861     if (len < MPT_LAN_RX_COPYBREAK) {
0862         old_skb = skb;
0863 
0864         skb = (struct sk_buff *)dev_alloc_skb(len);
0865         if (!skb) {
0866             printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
0867                     IOC_AND_NETDEV_NAMES_s_s(dev),
0868                     __FILE__, __LINE__);
0869             return -ENOMEM;
0870         }
0871 
0872         dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
0873                     priv->RcvCtl[ctx].dma,
0874                     priv->RcvCtl[ctx].len,
0875                     DMA_FROM_DEVICE);
0876 
0877         skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
0878 
0879         dma_sync_single_for_device(&mpt_dev->pcidev->dev,
0880                        priv->RcvCtl[ctx].dma,
0881                        priv->RcvCtl[ctx].len,
0882                        DMA_FROM_DEVICE);
0883         goto out;
0884     }
0885 
0886     skb_put(skb, len);
0887 
0888     priv->RcvCtl[ctx].skb = NULL;
0889 
0890     dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
0891              priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
0892 
0893 out:
0894     spin_lock_irqsave(&priv->rxfidx_lock, flags);
0895     priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
0896     spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
0897 
0898     atomic_dec(&priv->buckets_out);
0899     priv->total_received++;
0900 
0901     return mpt_lan_receive_skb(dev, skb);
0902 }
0903 
0904 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0905 static int
0906 mpt_lan_receive_post_free(struct net_device *dev,
0907               LANReceivePostReply_t *pRecvRep)
0908 {
0909     struct mpt_lan_priv *priv = netdev_priv(dev);
0910     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0911     unsigned long flags;
0912     struct sk_buff *skb;
0913     u32 ctx;
0914     int count;
0915     int i;
0916 
0917     count = pRecvRep->NumberOfContexts;
0918 
0919 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
0920           "IOC returned %d buckets, freeing them...\n", count));
0921 
0922     spin_lock_irqsave(&priv->rxfidx_lock, flags);
0923     for (i = 0; i < count; i++) {
0924         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
0925 
0926         skb = priv->RcvCtl[ctx].skb;
0927 
0928 //      dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
0929 //              IOC_AND_NETDEV_NAMES_s_s(dev)));
0930 //      dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
0931 //              priv, &(priv->buckets_out)));
0932 //      dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
0933 
0934         priv->RcvCtl[ctx].skb = NULL;
0935         dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
0936                  priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
0937         dev_kfree_skb_any(skb);
0938 
0939         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
0940     }
0941     spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
0942 
0943     atomic_sub(count, &priv->buckets_out);
0944 
0945 //  for (i = 0; i < priv->max_buckets_out; i++)
0946 //      if (priv->RcvCtl[i].skb != NULL)
0947 //          dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
0948 //                "is still out\n", i));
0949 
0950 /*  dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
0951           count));
0952 */
0953 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
0954 /**/          "remaining, %d received back since sod.\n",
0955 /**/          atomic_read(&priv->buckets_out), priv->total_received));
0956     return 0;
0957 }
0958 
0959 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0960 static int
0961 mpt_lan_receive_post_reply(struct net_device *dev,
0962                LANReceivePostReply_t *pRecvRep)
0963 {
0964     struct mpt_lan_priv *priv = netdev_priv(dev);
0965     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
0966     struct sk_buff *skb, *old_skb;
0967     unsigned long flags;
0968     u32 len, ctx, offset;
0969     u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
0970     int count;
0971     int i, l;
0972 
0973     dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
0974     dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
0975          le16_to_cpu(pRecvRep->IOCStatus)));
0976 
0977     if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
0978                         MPI_IOCSTATUS_LAN_CANCELED)
0979         return mpt_lan_receive_post_free(dev, pRecvRep);
0980 
0981     len = le32_to_cpu(pRecvRep->PacketLength);
0982     if (len == 0) {
0983         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
0984             "ReceivePostReply w/ PacketLength zero!\n",
0985                 IOC_AND_NETDEV_NAMES_s_s(dev));
0986         printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
0987                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
0988         return -1;
0989     }
0990 
0991     ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
0992     count  = pRecvRep->NumberOfContexts;
0993     skb    = priv->RcvCtl[ctx].skb;
0994 
0995     offset = le32_to_cpu(pRecvRep->PacketOffset);
0996 //  if (offset != 0) {
0997 //      printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
0998 //          "w/ PacketOffset %u\n",
0999 //              IOC_AND_NETDEV_NAMES_s_s(dev),
1000 //              offset);
1001 //  }
1002 
1003     dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1004             IOC_AND_NETDEV_NAMES_s_s(dev),
1005             offset, len));
1006 
1007     if (count > 1) {
1008         int szrem = len;
1009 
1010 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1011 //          "for single packet, concatenating...\n",
1012 //              IOC_AND_NETDEV_NAMES_s_s(dev)));
1013 
1014         skb = (struct sk_buff *)dev_alloc_skb(len);
1015         if (!skb) {
1016             printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1017                     IOC_AND_NETDEV_NAMES_s_s(dev),
1018                     __FILE__, __LINE__);
1019             return -ENOMEM;
1020         }
1021 
1022         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1023         for (i = 0; i < count; i++) {
1024 
1025             ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1026             old_skb = priv->RcvCtl[ctx].skb;
1027 
1028             l = priv->RcvCtl[ctx].len;
1029             if (szrem < l)
1030                 l = szrem;
1031 
1032 //          dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1033 //                  IOC_AND_NETDEV_NAMES_s_s(dev),
1034 //                  i, l));
1035 
1036             dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
1037                         priv->RcvCtl[ctx].dma,
1038                         priv->RcvCtl[ctx].len,
1039                         DMA_FROM_DEVICE);
1040             skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1041 
1042             dma_sync_single_for_device(&mpt_dev->pcidev->dev,
1043                            priv->RcvCtl[ctx].dma,
1044                            priv->RcvCtl[ctx].len,
1045                            DMA_FROM_DEVICE);
1046 
1047             priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1048             szrem -= l;
1049         }
1050         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1051 
1052     } else if (len < MPT_LAN_RX_COPYBREAK) {
1053 
1054         old_skb = skb;
1055 
1056         skb = (struct sk_buff *)dev_alloc_skb(len);
1057         if (!skb) {
1058             printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1059                     IOC_AND_NETDEV_NAMES_s_s(dev),
1060                     __FILE__, __LINE__);
1061             return -ENOMEM;
1062         }
1063 
1064         dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
1065                     priv->RcvCtl[ctx].dma,
1066                     priv->RcvCtl[ctx].len,
1067                     DMA_FROM_DEVICE);
1068 
1069         skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1070 
1071         dma_sync_single_for_device(&mpt_dev->pcidev->dev,
1072                        priv->RcvCtl[ctx].dma,
1073                        priv->RcvCtl[ctx].len,
1074                        DMA_FROM_DEVICE);
1075 
1076         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1077         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1078         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1079 
1080     } else {
1081         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1082 
1083         priv->RcvCtl[ctx].skb = NULL;
1084 
1085         dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
1086                  priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
1087         priv->RcvCtl[ctx].dma = 0;
1088 
1089         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1090         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1091 
1092         skb_put(skb,len);
1093     }
1094 
1095     atomic_sub(count, &priv->buckets_out);
1096     priv->total_received += count;
1097 
1098     if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1099         printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1100             "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1101                 IOC_AND_NETDEV_NAMES_s_s(dev),
1102                 priv->mpt_rxfidx_tail,
1103                 MPT_LAN_MAX_BUCKETS_OUT);
1104 
1105         return -1;
1106     }
1107 
1108     if (remaining == 0)
1109         printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1110             "(priv->buckets_out = %d)\n",
1111             IOC_AND_NETDEV_NAMES_s_s(dev),
1112             atomic_read(&priv->buckets_out));
1113     else if (remaining < 10)
1114         printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1115             "(priv->buckets_out = %d)\n",
1116             IOC_AND_NETDEV_NAMES_s_s(dev),
1117             remaining, atomic_read(&priv->buckets_out));
1118     
1119     if ((remaining < priv->bucketthresh) &&
1120         ((atomic_read(&priv->buckets_out) - remaining) >
1121          MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1122         
1123         printk (KERN_WARNING MYNAM " Mismatch between driver's "
1124             "buckets_out count and fw's BucketsRemaining "
1125             "count has crossed the threshold, issuing a "
1126             "LanReset to clear the fw's hashtable. You may "
1127             "want to check your /var/log/messages for \"CRC "
1128             "error\" event notifications.\n");
1129         
1130         mpt_lan_reset(dev);
1131         mpt_lan_wake_post_buckets_task(dev, 0);
1132     }
1133     
1134     return mpt_lan_receive_skb(dev, skb);
1135 }
1136 
1137 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1138 /* Simple SGE's only at the moment */
1139 
1140 static void
1141 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1142 {
1143     struct net_device *dev = priv->dev;
1144     MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1145     MPT_FRAME_HDR *mf;
1146     LANReceivePostRequest_t *pRecvReq;
1147     SGETransaction32_t *pTrans;
1148     SGESimple64_t *pSimple;
1149     struct sk_buff *skb;
1150     dma_addr_t dma;
1151     u32 curr, buckets, count, max;
1152     u32 len = (dev->mtu + dev->hard_header_len + 4);
1153     unsigned long flags;
1154     int i;
1155 
1156     curr = atomic_read(&priv->buckets_out);
1157     buckets = (priv->max_buckets_out - curr);
1158 
1159     dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1160             IOC_AND_NETDEV_NAMES_s_s(dev),
1161             __func__, buckets, curr));
1162 
1163     max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1164             (sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
1165 
1166     while (buckets) {
1167         mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1168         if (mf == NULL) {
1169             printk (KERN_ERR "%s: Unable to alloc request frame\n",
1170                 __func__);
1171             dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1172                  __func__, buckets));
1173             goto out;
1174         }
1175         pRecvReq = (LANReceivePostRequest_t *) mf;
1176 
1177         i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1178         mpt_dev->RequestNB[i] = 0;
1179         count = buckets;
1180         if (count > max)
1181             count = max;
1182 
1183         pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1184         pRecvReq->ChainOffset = 0;
1185         pRecvReq->MsgFlags    = 0;
1186         pRecvReq->PortNumber  = priv->pnum;
1187 
1188         pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1189         pSimple = NULL;
1190 
1191         for (i = 0; i < count; i++) {
1192             int ctx;
1193 
1194             spin_lock_irqsave(&priv->rxfidx_lock, flags);
1195             if (priv->mpt_rxfidx_tail < 0) {
1196                 printk (KERN_ERR "%s: Can't alloc context\n",
1197                     __func__);
1198                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1199                                flags);
1200                 break;
1201             }
1202 
1203             ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1204 
1205             skb = priv->RcvCtl[ctx].skb;
1206             if (skb && (priv->RcvCtl[ctx].len != len)) {
1207                 dma_unmap_single(&mpt_dev->pcidev->dev,
1208                          priv->RcvCtl[ctx].dma,
1209                          priv->RcvCtl[ctx].len,
1210                          DMA_FROM_DEVICE);
1211                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1212                 skb = priv->RcvCtl[ctx].skb = NULL;
1213             }
1214 
1215             if (skb == NULL) {
1216                 skb = dev_alloc_skb(len);
1217                 if (skb == NULL) {
1218                     printk (KERN_WARNING
1219                         MYNAM "/%s: Can't alloc skb\n",
1220                         __func__);
1221                     priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1222                     spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1223                     break;
1224                 }
1225 
1226                 dma = dma_map_single(&mpt_dev->pcidev->dev,
1227                              skb->data, len,
1228                              DMA_FROM_DEVICE);
1229 
1230                 priv->RcvCtl[ctx].skb = skb;
1231                 priv->RcvCtl[ctx].dma = dma;
1232                 priv->RcvCtl[ctx].len = len;
1233             }
1234 
1235             spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1236 
1237             pTrans->ContextSize   = sizeof(u32);
1238             pTrans->DetailsLength = 0;
1239             pTrans->Flags         = 0;
1240             pTrans->TransactionContext = cpu_to_le32(ctx);
1241 
1242             pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1243 
1244             pSimple->FlagsLength = cpu_to_le32(
1245                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1246                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1247                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1248             pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1249             if (sizeof(dma_addr_t) > sizeof(u32))
1250                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1251             else
1252                 pSimple->Address.High = 0;
1253 
1254             pTrans = (SGETransaction32_t *) (pSimple + 1);
1255         }
1256 
1257         if (pSimple == NULL) {
1258 /**/            printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1259 /**/                __func__);
1260             mpt_free_msg_frame(mpt_dev, mf);
1261             goto out;
1262         }
1263 
1264         pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1265 
1266         pRecvReq->BucketCount = cpu_to_le32(i);
1267 
1268 /*  printk(KERN_INFO MYNAM ": posting buckets\n   ");
1269  *  for (i = 0; i < j + 2; i ++)
1270  *      printk (" %08x", le32_to_cpu(msg[i]));
1271  *  printk ("\n");
1272  */
1273 
1274         mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1275 
1276         priv->total_posted += i;
1277         buckets -= i;
1278         atomic_add(i, &priv->buckets_out);
1279     }
1280 
1281 out:
1282     dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1283           __func__, buckets, atomic_read(&priv->buckets_out)));
1284     dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1285     __func__, priv->total_posted, priv->total_received));
1286 
1287     clear_bit(0, &priv->post_buckets_active);
1288 }
1289 
1290 static void
1291 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1292 {
1293     mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1294                           post_buckets_task.work));
1295 }
1296 
1297 static const struct net_device_ops mpt_netdev_ops = {
1298     .ndo_open       = mpt_lan_open,
1299     .ndo_stop       = mpt_lan_close,
1300     .ndo_start_xmit = mpt_lan_sdu_send,
1301     .ndo_tx_timeout = mpt_lan_tx_timeout,
1302 };
1303 
1304 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1305 static struct net_device *
1306 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1307 {
1308     struct net_device *dev;
1309     struct mpt_lan_priv *priv;
1310     u8 HWaddr[FC_ALEN], *a;
1311 
1312     dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1313     if (!dev)
1314         return NULL;
1315 
1316     dev->mtu = MPT_LAN_MTU;
1317 
1318     priv = netdev_priv(dev);
1319 
1320     priv->dev = dev;
1321     priv->mpt_dev = mpt_dev;
1322     priv->pnum = pnum;
1323 
1324     INIT_DELAYED_WORK(&priv->post_buckets_task,
1325               mpt_lan_post_receive_buckets_work);
1326     priv->post_buckets_active = 0;
1327 
1328     dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1329             __LINE__, dev->mtu + dev->hard_header_len + 4));
1330 
1331     atomic_set(&priv->buckets_out, 0);
1332     priv->total_posted = 0;
1333     priv->total_received = 0;
1334     priv->max_buckets_out = max_buckets_out;
1335     if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1336         priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1337 
1338     dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1339             __LINE__,
1340             mpt_dev->pfacts[0].MaxLanBuckets,
1341             max_buckets_out,
1342             priv->max_buckets_out));
1343 
1344     priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1345     spin_lock_init(&priv->txfidx_lock);
1346     spin_lock_init(&priv->rxfidx_lock);
1347 
1348     /*  Grab pre-fetched LANPage1 stuff. :-) */
1349     a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1350 
1351     HWaddr[0] = a[5];
1352     HWaddr[1] = a[4];
1353     HWaddr[2] = a[3];
1354     HWaddr[3] = a[2];
1355     HWaddr[4] = a[1];
1356     HWaddr[5] = a[0];
1357 
1358     dev->addr_len = FC_ALEN;
1359     dev_addr_set(dev, HWaddr);
1360     memset(dev->broadcast, 0xff, FC_ALEN);
1361 
1362     /* The Tx queue is 127 deep on the 909.
1363      * Give ourselves some breathing room.
1364      */
1365     priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1366                 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1367 
1368     dev->netdev_ops = &mpt_netdev_ops;
1369     dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1370 
1371     /* MTU range: 96 - 65280 */
1372     dev->min_mtu = MPT_LAN_MIN_MTU;
1373     dev->max_mtu = MPT_LAN_MAX_MTU;
1374 
1375     dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1376         "and setting initial values\n"));
1377 
1378     if (register_netdev(dev) != 0) {
1379         free_netdev(dev);
1380         dev = NULL;
1381     }
1382     return dev;
1383 }
1384 
1385 static int
1386 mptlan_probe(struct pci_dev *pdev)
1387 {
1388     MPT_ADAPTER         *ioc = pci_get_drvdata(pdev);
1389     struct net_device   *dev;
1390     int         i;
1391 
1392     for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1393         printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1394                "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1395                ioc->name, ioc->pfacts[i].PortNumber,
1396                ioc->pfacts[i].ProtocolFlags,
1397                MPT_PROTOCOL_FLAGS_c_c_c_c(
1398                    ioc->pfacts[i].ProtocolFlags));
1399 
1400         if (!(ioc->pfacts[i].ProtocolFlags &
1401                     MPI_PORTFACTS_PROTOCOL_LAN)) {
1402             printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1403                    "seems to be disabled on this adapter port!\n",
1404                    ioc->name);
1405             continue;
1406         }
1407 
1408         dev = mpt_register_lan_device(ioc, i);
1409         if (!dev) {
1410             printk(KERN_ERR MYNAM ": %s: Unable to register "
1411                    "port%d as a LAN device\n", ioc->name,
1412                    ioc->pfacts[i].PortNumber);
1413             continue;
1414         }
1415         
1416         printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1417                "registered as '%s'\n", ioc->name, dev->name);
1418         printk(KERN_INFO MYNAM ": %s/%s: "
1419                "LanAddr = %pM\n",
1420                IOC_AND_NETDEV_NAMES_s_s(dev),
1421                dev->dev_addr);
1422     
1423         ioc->netdev = dev;
1424 
1425         return 0;
1426     }
1427 
1428     return -ENODEV;
1429 }
1430 
1431 static void
1432 mptlan_remove(struct pci_dev *pdev)
1433 {
1434     MPT_ADAPTER         *ioc = pci_get_drvdata(pdev);
1435     struct net_device   *dev = ioc->netdev;
1436 
1437     if(dev != NULL) {
1438         unregister_netdev(dev);
1439         free_netdev(dev);
1440     }
1441 }
1442 
1443 static struct mpt_pci_driver mptlan_driver = {
1444     .probe      = mptlan_probe,
1445     .remove     = mptlan_remove,
1446 };
1447 
1448 static int __init mpt_lan_init (void)
1449 {
1450     show_mptmod_ver(LANAME, LANVER);
1451 
1452     LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1453                 "lan_reply");
1454     if (LanCtx <= 0) {
1455         printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1456         return -EBUSY;
1457     }
1458 
1459     dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1460 
1461     if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1462         printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1463                "handler with mptbase! The world is at an end! "
1464                "Everything is fading to black! Goodbye.\n");
1465         return -EBUSY;
1466     }
1467 
1468     dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1469     
1470     mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1471     return 0;
1472 }
1473 
1474 static void __exit mpt_lan_exit(void)
1475 {
1476     mpt_device_driver_deregister(MPTLAN_DRIVER);
1477     mpt_reset_deregister(LanCtx);
1478 
1479     if (LanCtx) {
1480         mpt_deregister(LanCtx);
1481         LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1482     }
1483 }
1484 
1485 module_init(mpt_lan_init);
1486 module_exit(mpt_lan_exit);
1487 
1488 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1489 static unsigned short
1490 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1491 {
1492     struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1493     struct fcllc *fcllc;
1494 
1495     skb_reset_mac_header(skb);
1496     skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1497 
1498     if (fch->dtype == htons(0xffff)) {
1499         u32 *p = (u32 *) fch;
1500 
1501         swab32s(p + 0);
1502         swab32s(p + 1);
1503         swab32s(p + 2);
1504         swab32s(p + 3);
1505 
1506         printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1507                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1508         printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1509                 fch->saddr);
1510     }
1511 
1512     if (*fch->daddr & 1) {
1513         if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1514             skb->pkt_type = PACKET_BROADCAST;
1515         } else {
1516             skb->pkt_type = PACKET_MULTICAST;
1517         }
1518     } else {
1519         if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1520             skb->pkt_type = PACKET_OTHERHOST;
1521         } else {
1522             skb->pkt_type = PACKET_HOST;
1523         }
1524     }
1525 
1526     fcllc = (struct fcllc *)skb->data;
1527 
1528     /* Strip the SNAP header from ARP packets since we don't
1529      * pass them through to the 802.2/SNAP layers.
1530      */
1531     if (fcllc->dsap == EXTENDED_SAP &&
1532         (fcllc->ethertype == htons(ETH_P_IP) ||
1533          fcllc->ethertype == htons(ETH_P_ARP))) {
1534         skb_pull(skb, sizeof(struct fcllc));
1535         return fcllc->ethertype;
1536     }
1537 
1538     return htons(ETH_P_802_2);
1539 }
1540 
1541 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/