Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  Adaptec AAC series RAID controller driver
0004  *  (c) Copyright 2001 Red Hat Inc.
0005  *
0006  * based on the old aacraid driver that is..
0007  * Adaptec aacraid device driver for Linux.
0008  *
0009  * Copyright (c) 2000-2010 Adaptec, Inc.
0010  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
0011  *       2016-2017 Microsemi Corp. (aacraid@microsemi.com)
0012  *
0013  * Module Name:
0014  *  commsup.c
0015  *
0016  * Abstract: Contain all routines that are required for FSA host/adapter
0017  *    communication.
0018  */
0019 
0020 #include <linux/kernel.h>
0021 #include <linux/init.h>
0022 #include <linux/crash_dump.h>
0023 #include <linux/types.h>
0024 #include <linux/sched.h>
0025 #include <linux/pci.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/slab.h>
0028 #include <linux/completion.h>
0029 #include <linux/blkdev.h>
0030 #include <linux/delay.h>
0031 #include <linux/kthread.h>
0032 #include <linux/interrupt.h>
0033 #include <linux/bcd.h>
0034 #include <scsi/scsi.h>
0035 #include <scsi/scsi_host.h>
0036 #include <scsi/scsi_device.h>
0037 #include <scsi/scsi_cmnd.h>
0038 
0039 #include "aacraid.h"
0040 
0041 /**
0042  *  fib_map_alloc       -   allocate the fib objects
0043  *  @dev: Adapter to allocate for
0044  *
0045  *  Allocate and map the shared PCI space for the FIB blocks used to
0046  *  talk to the Adaptec firmware.
0047  */
0048 
0049 static int fib_map_alloc(struct aac_dev *dev)
0050 {
0051     if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
0052         dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
0053     else
0054         dev->max_cmd_size = dev->max_fib_size;
0055     if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
0056         dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
0057     } else {
0058         dev->max_cmd_size = dev->max_fib_size;
0059     }
0060 
0061     dprintk((KERN_INFO
0062       "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
0063       &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
0064       AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
0065     dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
0066         (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
0067         * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
0068         &dev->hw_fib_pa, GFP_KERNEL);
0069     if (dev->hw_fib_va == NULL)
0070         return -ENOMEM;
0071     return 0;
0072 }
0073 
0074 /**
0075  *  aac_fib_map_free        -   free the fib objects
0076  *  @dev: Adapter to free
0077  *
0078  *  Free the PCI mappings and the memory allocated for FIB blocks
0079  *  on this adapter.
0080  */
0081 
0082 void aac_fib_map_free(struct aac_dev *dev)
0083 {
0084     size_t alloc_size;
0085     size_t fib_size;
0086     int num_fibs;
0087 
0088     if(!dev->hw_fib_va || !dev->max_cmd_size)
0089         return;
0090 
0091     num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
0092     fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
0093     alloc_size = fib_size * num_fibs + ALIGN32 - 1;
0094 
0095     dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
0096               dev->hw_fib_pa);
0097 
0098     dev->hw_fib_va = NULL;
0099     dev->hw_fib_pa = 0;
0100 }
0101 
0102 void aac_fib_vector_assign(struct aac_dev *dev)
0103 {
0104     u32 i = 0;
0105     u32 vector = 1;
0106     struct fib *fibptr = NULL;
0107 
0108     for (i = 0, fibptr = &dev->fibs[i];
0109         i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
0110         i++, fibptr++) {
0111         if ((dev->max_msix == 1) ||
0112           (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
0113             - dev->vector_cap))) {
0114             fibptr->vector_no = 0;
0115         } else {
0116             fibptr->vector_no = vector;
0117             vector++;
0118             if (vector == dev->max_msix)
0119                 vector = 1;
0120         }
0121     }
0122 }
0123 
0124 /**
0125  *  aac_fib_setup   -   setup the fibs
0126  *  @dev: Adapter to set up
0127  *
0128  *  Allocate the PCI space for the fibs, map it and then initialise the
0129  *  fib area, the unmapped fib data and also the free list
0130  */
0131 
0132 int aac_fib_setup(struct aac_dev * dev)
0133 {
0134     struct fib *fibptr;
0135     struct hw_fib *hw_fib;
0136     dma_addr_t hw_fib_pa;
0137     int i;
0138     u32 max_cmds;
0139 
0140     while (((i = fib_map_alloc(dev)) == -ENOMEM)
0141      && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
0142         max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
0143         dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
0144         if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
0145             dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
0146     }
0147     if (i<0)
0148         return -ENOMEM;
0149 
0150     memset(dev->hw_fib_va, 0,
0151         (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
0152         (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
0153 
0154     /* 32 byte alignment for PMC */
0155     hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
0156     hw_fib    = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
0157                     (hw_fib_pa - dev->hw_fib_pa));
0158 
0159     /* add Xport header */
0160     hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
0161         sizeof(struct aac_fib_xporthdr));
0162     hw_fib_pa += sizeof(struct aac_fib_xporthdr);
0163 
0164     /*
0165      *  Initialise the fibs
0166      */
0167     for (i = 0, fibptr = &dev->fibs[i];
0168         i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
0169         i++, fibptr++)
0170     {
0171         fibptr->flags = 0;
0172         fibptr->size = sizeof(struct fib);
0173         fibptr->dev = dev;
0174         fibptr->hw_fib_va = hw_fib;
0175         fibptr->data = (void *) fibptr->hw_fib_va->data;
0176         fibptr->next = fibptr+1;    /* Forward chain the fibs */
0177         init_completion(&fibptr->event_wait);
0178         spin_lock_init(&fibptr->event_lock);
0179         hw_fib->header.XferState = cpu_to_le32(0xffffffff);
0180         hw_fib->header.SenderSize =
0181             cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
0182         fibptr->hw_fib_pa = hw_fib_pa;
0183         fibptr->hw_sgl_pa = hw_fib_pa +
0184             offsetof(struct aac_hba_cmd_req, sge[2]);
0185         /*
0186          * one element is for the ptr to the separate sg list,
0187          * second element for 32 byte alignment
0188          */
0189         fibptr->hw_error_pa = hw_fib_pa +
0190             offsetof(struct aac_native_hba, resp.resp_bytes[0]);
0191 
0192         hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
0193             dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
0194         hw_fib_pa = hw_fib_pa +
0195             dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
0196     }
0197 
0198     /*
0199      *Assign vector numbers to fibs
0200      */
0201     aac_fib_vector_assign(dev);
0202 
0203     /*
0204      *  Add the fib chain to the free list
0205      */
0206     dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
0207     /*
0208     *   Set 8 fibs aside for management tools
0209     */
0210     dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
0211     return 0;
0212 }
0213 
0214 /**
0215  *  aac_fib_alloc_tag-allocate a fib using tags
0216  *  @dev: Adapter to allocate the fib for
0217  *  @scmd: SCSI command
0218  *
0219  *  Allocate a fib from the adapter fib pool using tags
0220  *  from the blk layer.
0221  */
0222 
0223 struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
0224 {
0225     struct fib *fibptr;
0226 
0227     fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
0228     /*
0229      *  Null out fields that depend on being zero at the start of
0230      *  each I/O
0231      */
0232     fibptr->hw_fib_va->header.XferState = 0;
0233     fibptr->type = FSAFS_NTC_FIB_CONTEXT;
0234     fibptr->callback_data = NULL;
0235     fibptr->callback = NULL;
0236     fibptr->flags = 0;
0237 
0238     return fibptr;
0239 }
0240 
0241 /**
0242  *  aac_fib_alloc   -   allocate a fib
0243  *  @dev: Adapter to allocate the fib for
0244  *
0245  *  Allocate a fib from the adapter fib pool. If the pool is empty we
0246  *  return NULL.
0247  */
0248 
0249 struct fib *aac_fib_alloc(struct aac_dev *dev)
0250 {
0251     struct fib * fibptr;
0252     unsigned long flags;
0253     spin_lock_irqsave(&dev->fib_lock, flags);
0254     fibptr = dev->free_fib;
0255     if(!fibptr){
0256         spin_unlock_irqrestore(&dev->fib_lock, flags);
0257         return fibptr;
0258     }
0259     dev->free_fib = fibptr->next;
0260     spin_unlock_irqrestore(&dev->fib_lock, flags);
0261     /*
0262      *  Set the proper node type code and node byte size
0263      */
0264     fibptr->type = FSAFS_NTC_FIB_CONTEXT;
0265     fibptr->size = sizeof(struct fib);
0266     /*
0267      *  Null out fields that depend on being zero at the start of
0268      *  each I/O
0269      */
0270     fibptr->hw_fib_va->header.XferState = 0;
0271     fibptr->flags = 0;
0272     fibptr->callback = NULL;
0273     fibptr->callback_data = NULL;
0274 
0275     return fibptr;
0276 }
0277 
0278 /**
0279  *  aac_fib_free    -   free a fib
0280  *  @fibptr: fib to free up
0281  *
0282  *  Frees up a fib and places it on the appropriate queue
0283  */
0284 
0285 void aac_fib_free(struct fib *fibptr)
0286 {
0287     unsigned long flags;
0288 
0289     if (fibptr->done == 2)
0290         return;
0291 
0292     spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
0293     if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
0294         aac_config.fib_timeouts++;
0295     if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
0296         fibptr->hw_fib_va->header.XferState != 0) {
0297         printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
0298              (void*)fibptr,
0299              le32_to_cpu(fibptr->hw_fib_va->header.XferState));
0300     }
0301     fibptr->next = fibptr->dev->free_fib;
0302     fibptr->dev->free_fib = fibptr;
0303     spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
0304 }
0305 
0306 /**
0307  *  aac_fib_init    -   initialise a fib
0308  *  @fibptr: The fib to initialize
0309  *
0310  *  Set up the generic fib fields ready for use
0311  */
0312 
0313 void aac_fib_init(struct fib *fibptr)
0314 {
0315     struct hw_fib *hw_fib = fibptr->hw_fib_va;
0316 
0317     memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
0318     hw_fib->header.StructType = FIB_MAGIC;
0319     hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
0320     hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
0321     hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
0322     hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
0323 }
0324 
0325 /**
0326  *  fib_dealloc     -   deallocate a fib
0327  *  @fibptr: fib to deallocate
0328  *
0329  *  Will deallocate and return to the free pool the FIB pointed to by the
0330  *  caller.
0331  */
0332 
0333 static void fib_dealloc(struct fib * fibptr)
0334 {
0335     struct hw_fib *hw_fib = fibptr->hw_fib_va;
0336     hw_fib->header.XferState = 0;
0337 }
0338 
0339 /*
0340  *  Commuication primitives define and support the queuing method we use to
0341  *  support host to adapter commuication. All queue accesses happen through
0342  *  these routines and are the only routines which have a knowledge of the
0343  *   how these queues are implemented.
0344  */
0345 
0346 /**
0347  *  aac_get_entry       -   get a queue entry
0348  *  @dev: Adapter
0349  *  @qid: Queue Number
0350  *  @entry: Entry return
0351  *  @index: Index return
0352  *  @nonotify: notification control
0353  *
0354  *  With a priority the routine returns a queue entry if the queue has free entries. If the queue
0355  *  is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
0356  *  returned.
0357  */
0358 
0359 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
0360 {
0361     struct aac_queue * q;
0362     unsigned long idx;
0363 
0364     /*
0365      *  All of the queues wrap when they reach the end, so we check
0366      *  to see if they have reached the end and if they have we just
0367      *  set the index back to zero. This is a wrap. You could or off
0368      *  the high bits in all updates but this is a bit faster I think.
0369      */
0370 
0371     q = &dev->queues->queue[qid];
0372 
0373     idx = *index = le32_to_cpu(*(q->headers.producer));
0374     /* Interrupt Moderation, only interrupt for first two entries */
0375     if (idx != le32_to_cpu(*(q->headers.consumer))) {
0376         if (--idx == 0) {
0377             if (qid == AdapNormCmdQueue)
0378                 idx = ADAP_NORM_CMD_ENTRIES;
0379             else
0380                 idx = ADAP_NORM_RESP_ENTRIES;
0381         }
0382         if (idx != le32_to_cpu(*(q->headers.consumer)))
0383             *nonotify = 1;
0384     }
0385 
0386     if (qid == AdapNormCmdQueue) {
0387         if (*index >= ADAP_NORM_CMD_ENTRIES)
0388             *index = 0; /* Wrap to front of the Producer Queue. */
0389     } else {
0390         if (*index >= ADAP_NORM_RESP_ENTRIES)
0391             *index = 0; /* Wrap to front of the Producer Queue. */
0392     }
0393 
0394     /* Queue is full */
0395     if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
0396         printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
0397                 qid, atomic_read(&q->numpending));
0398         return 0;
0399     } else {
0400         *entry = q->base + *index;
0401         return 1;
0402     }
0403 }
0404 
0405 /**
0406  *  aac_queue_get       -   get the next free QE
0407  *  @dev: Adapter
0408  *  @index: Returned index
0409  *  @qid: Queue number
0410  *  @hw_fib: Fib to associate with the queue entry
0411  *  @wait: Wait if queue full
0412  *  @fibptr: Driver fib object to go with fib
0413  *  @nonotify: Don't notify the adapter
0414  *
0415  *  Gets the next free QE off the requested priorty adapter command
0416  *  queue and associates the Fib with the QE. The QE represented by
0417  *  index is ready to insert on the queue when this routine returns
0418  *  success.
0419  */
0420 
0421 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
0422 {
0423     struct aac_entry * entry = NULL;
0424     int map = 0;
0425 
0426     if (qid == AdapNormCmdQueue) {
0427         /*  if no entries wait for some if caller wants to */
0428         while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
0429             printk(KERN_ERR "GetEntries failed\n");
0430         }
0431         /*
0432          *  Setup queue entry with a command, status and fib mapped
0433          */
0434         entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
0435         map = 1;
0436     } else {
0437         while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
0438             /* if no entries wait for some if caller wants to */
0439         }
0440         /*
0441          *  Setup queue entry with command, status and fib mapped
0442          */
0443         entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
0444         entry->addr = hw_fib->header.SenderFibAddress;
0445             /* Restore adapters pointer to the FIB */
0446         hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress;  /* Let the adapter now where to find its data */
0447         map = 0;
0448     }
0449     /*
0450      *  If MapFib is true than we need to map the Fib and put pointers
0451      *  in the queue entry.
0452      */
0453     if (map)
0454         entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
0455     return 0;
0456 }
0457 
0458 /*
0459  *  Define the highest level of host to adapter communication routines.
0460  *  These routines will support host to adapter FS commuication. These
0461  *  routines have no knowledge of the commuication method used. This level
0462  *  sends and receives FIBs. This level has no knowledge of how these FIBs
0463  *  get passed back and forth.
0464  */
0465 
0466 /**
0467  *  aac_fib_send    -   send a fib to the adapter
0468  *  @command: Command to send
0469  *  @fibptr: The fib
0470  *  @size: Size of fib data area
0471  *  @priority: Priority of Fib
0472  *  @wait: Async/sync select
0473  *  @reply: True if a reply is wanted
0474  *  @callback: Called with reply
0475  *  @callback_data: Passed to callback
0476  *
0477  *  Sends the requested FIB to the adapter and optionally will wait for a
0478  *  response FIB. If the caller does not wish to wait for a response than
0479  *  an event to wait on must be supplied. This event will be set when a
0480  *  response FIB is received from the adapter.
0481  */
0482 
0483 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
0484         int priority, int wait, int reply, fib_callback callback,
0485         void *callback_data)
0486 {
0487     struct aac_dev * dev = fibptr->dev;
0488     struct hw_fib * hw_fib = fibptr->hw_fib_va;
0489     unsigned long flags = 0;
0490     unsigned long mflags = 0;
0491     unsigned long sflags = 0;
0492 
0493     if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
0494         return -EBUSY;
0495 
0496     if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
0497         return -EINVAL;
0498 
0499     /*
0500      *  There are 5 cases with the wait and response requested flags.
0501      *  The only invalid cases are if the caller requests to wait and
0502      *  does not request a response and if the caller does not want a
0503      *  response and the Fib is not allocated from pool. If a response
0504      *  is not requested the Fib will just be deallocaed by the DPC
0505      *  routine when the response comes back from the adapter. No
0506      *  further processing will be done besides deleting the Fib. We
0507      *  will have a debug mode where the adapter can notify the host
0508      *  it had a problem and the host can log that fact.
0509      */
0510     fibptr->flags = 0;
0511     if (wait && !reply) {
0512         return -EINVAL;
0513     } else if (!wait && reply) {
0514         hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
0515         FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
0516     } else if (!wait && !reply) {
0517         hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
0518         FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
0519     } else if (wait && reply) {
0520         hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
0521         FIB_COUNTER_INCREMENT(aac_config.NormalSent);
0522     }
0523     /*
0524      *  Map the fib into 32bits by using the fib number
0525      */
0526 
0527     hw_fib->header.SenderFibAddress =
0528         cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
0529 
0530     /* use the same shifted value for handle to be compatible
0531      * with the new native hba command handle
0532      */
0533     hw_fib->header.Handle =
0534         cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
0535 
0536     /*
0537      *  Set FIB state to indicate where it came from and if we want a
0538      *  response from the adapter. Also load the command from the
0539      *  caller.
0540      *
0541      *  Map the hw fib pointer as a 32bit value
0542      */
0543     hw_fib->header.Command = cpu_to_le16(command);
0544     hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
0545     /*
0546      *  Set the size of the Fib we want to send to the adapter
0547      */
0548     hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
0549     if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
0550         return -EMSGSIZE;
0551     }
0552     /*
0553      *  Get a queue entry connect the FIB to it and send an notify
0554      *  the adapter a command is ready.
0555      */
0556     hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
0557 
0558     /*
0559      *  Fill in the Callback and CallbackContext if we are not
0560      *  going to wait.
0561      */
0562     if (!wait) {
0563         fibptr->callback = callback;
0564         fibptr->callback_data = callback_data;
0565         fibptr->flags = FIB_CONTEXT_FLAG;
0566     }
0567 
0568     fibptr->done = 0;
0569 
0570     FIB_COUNTER_INCREMENT(aac_config.FibsSent);
0571 
0572     dprintk((KERN_DEBUG "Fib contents:.\n"));
0573     dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
0574     dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
0575     dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
0576     dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
0577     dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
0578     dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
0579 
0580     if (!dev->queues)
0581         return -EBUSY;
0582 
0583     if (wait) {
0584 
0585         spin_lock_irqsave(&dev->manage_lock, mflags);
0586         if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
0587             printk(KERN_INFO "No management Fibs Available:%d\n",
0588                         dev->management_fib_count);
0589             spin_unlock_irqrestore(&dev->manage_lock, mflags);
0590             return -EBUSY;
0591         }
0592         dev->management_fib_count++;
0593         spin_unlock_irqrestore(&dev->manage_lock, mflags);
0594         spin_lock_irqsave(&fibptr->event_lock, flags);
0595     }
0596 
0597     if (dev->sync_mode) {
0598         if (wait)
0599             spin_unlock_irqrestore(&fibptr->event_lock, flags);
0600         spin_lock_irqsave(&dev->sync_lock, sflags);
0601         if (dev->sync_fib) {
0602             list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
0603             spin_unlock_irqrestore(&dev->sync_lock, sflags);
0604         } else {
0605             dev->sync_fib = fibptr;
0606             spin_unlock_irqrestore(&dev->sync_lock, sflags);
0607             aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
0608                 (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
0609                 NULL, NULL, NULL, NULL, NULL);
0610         }
0611         if (wait) {
0612             fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
0613             if (wait_for_completion_interruptible(&fibptr->event_wait)) {
0614                 fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
0615                 return -EFAULT;
0616             }
0617             return 0;
0618         }
0619         return -EINPROGRESS;
0620     }
0621 
0622     if (aac_adapter_deliver(fibptr) != 0) {
0623         printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
0624         if (wait) {
0625             spin_unlock_irqrestore(&fibptr->event_lock, flags);
0626             spin_lock_irqsave(&dev->manage_lock, mflags);
0627             dev->management_fib_count--;
0628             spin_unlock_irqrestore(&dev->manage_lock, mflags);
0629         }
0630         return -EBUSY;
0631     }
0632 
0633 
0634     /*
0635      *  If the caller wanted us to wait for response wait now.
0636      */
0637 
0638     if (wait) {
0639         spin_unlock_irqrestore(&fibptr->event_lock, flags);
0640         /* Only set for first known interruptable command */
0641         if (wait < 0) {
0642             /*
0643              * *VERY* Dangerous to time out a command, the
0644              * assumption is made that we have no hope of
0645              * functioning because an interrupt routing or other
0646              * hardware failure has occurred.
0647              */
0648             unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
0649             while (!try_wait_for_completion(&fibptr->event_wait)) {
0650                 int blink;
0651                 if (time_is_before_eq_jiffies(timeout)) {
0652                     struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
0653                     atomic_dec(&q->numpending);
0654                     if (wait == -1) {
0655                             printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
0656                           "Usually a result of a PCI interrupt routing problem;\n"
0657                           "update mother board BIOS or consider utilizing one of\n"
0658                           "the SAFE mode kernel options (acpi, apic etc)\n");
0659                     }
0660                     return -ETIMEDOUT;
0661                 }
0662 
0663                 if (unlikely(aac_pci_offline(dev)))
0664                     return -EFAULT;
0665 
0666                 if ((blink = aac_adapter_check_health(dev)) > 0) {
0667                     if (wait == -1) {
0668                             printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
0669                           "Usually a result of a serious unrecoverable hardware problem\n",
0670                           blink);
0671                     }
0672                     return -EFAULT;
0673                 }
0674                 /*
0675                  * Allow other processes / CPUS to use core
0676                  */
0677                 schedule();
0678             }
0679         } else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
0680             /* Do nothing ... satisfy
0681              * wait_for_completion_interruptible must_check */
0682         }
0683 
0684         spin_lock_irqsave(&fibptr->event_lock, flags);
0685         if (fibptr->done == 0) {
0686             fibptr->done = 2; /* Tell interrupt we aborted */
0687             spin_unlock_irqrestore(&fibptr->event_lock, flags);
0688             return -ERESTARTSYS;
0689         }
0690         spin_unlock_irqrestore(&fibptr->event_lock, flags);
0691         BUG_ON(fibptr->done == 0);
0692 
0693         if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
0694             return -ETIMEDOUT;
0695         return 0;
0696     }
0697     /*
0698      *  If the user does not want a response than return success otherwise
0699      *  return pending
0700      */
0701     if (reply)
0702         return -EINPROGRESS;
0703     else
0704         return 0;
0705 }
0706 
0707 int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
0708         void *callback_data)
0709 {
0710     struct aac_dev *dev = fibptr->dev;
0711     int wait;
0712     unsigned long flags = 0;
0713     unsigned long mflags = 0;
0714     struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
0715             fibptr->hw_fib_va;
0716 
0717     fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
0718     if (callback) {
0719         wait = 0;
0720         fibptr->callback = callback;
0721         fibptr->callback_data = callback_data;
0722     } else
0723         wait = 1;
0724 
0725 
0726     hbacmd->iu_type = command;
0727 
0728     if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
0729         /* bit1 of request_id must be 0 */
0730         hbacmd->request_id =
0731             cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
0732         fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
0733     } else
0734         return -EINVAL;
0735 
0736 
0737     if (wait) {
0738         spin_lock_irqsave(&dev->manage_lock, mflags);
0739         if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
0740             spin_unlock_irqrestore(&dev->manage_lock, mflags);
0741             return -EBUSY;
0742         }
0743         dev->management_fib_count++;
0744         spin_unlock_irqrestore(&dev->manage_lock, mflags);
0745         spin_lock_irqsave(&fibptr->event_lock, flags);
0746     }
0747 
0748     if (aac_adapter_deliver(fibptr) != 0) {
0749         if (wait) {
0750             spin_unlock_irqrestore(&fibptr->event_lock, flags);
0751             spin_lock_irqsave(&dev->manage_lock, mflags);
0752             dev->management_fib_count--;
0753             spin_unlock_irqrestore(&dev->manage_lock, mflags);
0754         }
0755         return -EBUSY;
0756     }
0757     FIB_COUNTER_INCREMENT(aac_config.NativeSent);
0758 
0759     if (wait) {
0760 
0761         spin_unlock_irqrestore(&fibptr->event_lock, flags);
0762 
0763         if (unlikely(aac_pci_offline(dev)))
0764             return -EFAULT;
0765 
0766         fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
0767         if (wait_for_completion_interruptible(&fibptr->event_wait))
0768             fibptr->done = 2;
0769         fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
0770 
0771         spin_lock_irqsave(&fibptr->event_lock, flags);
0772         if ((fibptr->done == 0) || (fibptr->done == 2)) {
0773             fibptr->done = 2; /* Tell interrupt we aborted */
0774             spin_unlock_irqrestore(&fibptr->event_lock, flags);
0775             return -ERESTARTSYS;
0776         }
0777         spin_unlock_irqrestore(&fibptr->event_lock, flags);
0778         WARN_ON(fibptr->done == 0);
0779 
0780         if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
0781             return -ETIMEDOUT;
0782 
0783         return 0;
0784     }
0785 
0786     return -EINPROGRESS;
0787 }
0788 
0789 /**
0790  *  aac_consumer_get    -   get the top of the queue
0791  *  @dev: Adapter
0792  *  @q: Queue
0793  *  @entry: Return entry
0794  *
0795  *  Will return a pointer to the entry on the top of the queue requested that
0796  *  we are a consumer of, and return the address of the queue entry. It does
0797  *  not change the state of the queue.
0798  */
0799 
0800 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
0801 {
0802     u32 index;
0803     int status;
0804     if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
0805         status = 0;
0806     } else {
0807         /*
0808          *  The consumer index must be wrapped if we have reached
0809          *  the end of the queue, else we just use the entry
0810          *  pointed to by the header index
0811          */
0812         if (le32_to_cpu(*q->headers.consumer) >= q->entries)
0813             index = 0;
0814         else
0815             index = le32_to_cpu(*q->headers.consumer);
0816         *entry = q->base + index;
0817         status = 1;
0818     }
0819     return(status);
0820 }
0821 
0822 /**
0823  *  aac_consumer_free   -   free consumer entry
0824  *  @dev: Adapter
0825  *  @q: Queue
0826  *  @qid: Queue ident
0827  *
0828  *  Frees up the current top of the queue we are a consumer of. If the
0829  *  queue was full notify the producer that the queue is no longer full.
0830  */
0831 
0832 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
0833 {
0834     int wasfull = 0;
0835     u32 notify;
0836 
0837     if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
0838         wasfull = 1;
0839 
0840     if (le32_to_cpu(*q->headers.consumer) >= q->entries)
0841         *q->headers.consumer = cpu_to_le32(1);
0842     else
0843         le32_add_cpu(q->headers.consumer, 1);
0844 
0845     if (wasfull) {
0846         switch (qid) {
0847 
0848         case HostNormCmdQueue:
0849             notify = HostNormCmdNotFull;
0850             break;
0851         case HostNormRespQueue:
0852             notify = HostNormRespNotFull;
0853             break;
0854         default:
0855             BUG();
0856             return;
0857         }
0858         aac_adapter_notify(dev, notify);
0859     }
0860 }
0861 
0862 /**
0863  *  aac_fib_adapter_complete    -   complete adapter issued fib
0864  *  @fibptr: fib to complete
0865  *  @size: size of fib
0866  *
0867  *  Will do all necessary work to complete a FIB that was sent from
0868  *  the adapter.
0869  */
0870 
0871 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
0872 {
0873     struct hw_fib * hw_fib = fibptr->hw_fib_va;
0874     struct aac_dev * dev = fibptr->dev;
0875     struct aac_queue * q;
0876     unsigned long nointr = 0;
0877     unsigned long qflags;
0878 
0879     if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
0880         dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
0881         dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
0882         kfree(hw_fib);
0883         return 0;
0884     }
0885 
0886     if (hw_fib->header.XferState == 0) {
0887         if (dev->comm_interface == AAC_COMM_MESSAGE)
0888             kfree(hw_fib);
0889         return 0;
0890     }
0891     /*
0892      *  If we plan to do anything check the structure type first.
0893      */
0894     if (hw_fib->header.StructType != FIB_MAGIC &&
0895         hw_fib->header.StructType != FIB_MAGIC2 &&
0896         hw_fib->header.StructType != FIB_MAGIC2_64) {
0897         if (dev->comm_interface == AAC_COMM_MESSAGE)
0898             kfree(hw_fib);
0899         return -EINVAL;
0900     }
0901     /*
0902      *  This block handles the case where the adapter had sent us a
0903      *  command and we have finished processing the command. We
0904      *  call completeFib when we are done processing the command
0905      *  and want to send a response back to the adapter. This will
0906      *  send the completed cdb to the adapter.
0907      */
0908     if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
0909         if (dev->comm_interface == AAC_COMM_MESSAGE) {
0910             kfree (hw_fib);
0911         } else {
0912             u32 index;
0913             hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
0914             if (size) {
0915                 size += sizeof(struct aac_fibhdr);
0916                 if (size > le16_to_cpu(hw_fib->header.SenderSize))
0917                     return -EMSGSIZE;
0918                 hw_fib->header.Size = cpu_to_le16(size);
0919             }
0920             q = &dev->queues->queue[AdapNormRespQueue];
0921             spin_lock_irqsave(q->lock, qflags);
0922             aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
0923             *(q->headers.producer) = cpu_to_le32(index + 1);
0924             spin_unlock_irqrestore(q->lock, qflags);
0925             if (!(nointr & (int)aac_config.irq_mod))
0926                 aac_adapter_notify(dev, AdapNormRespQueue);
0927         }
0928     } else {
0929         printk(KERN_WARNING "aac_fib_adapter_complete: "
0930             "Unknown xferstate detected.\n");
0931         BUG();
0932     }
0933     return 0;
0934 }
0935 
0936 /**
0937  *  aac_fib_complete    -   fib completion handler
0938  *  @fibptr: FIB to complete
0939  *
0940  *  Will do all necessary work to complete a FIB.
0941  */
0942 
0943 int aac_fib_complete(struct fib *fibptr)
0944 {
0945     struct hw_fib * hw_fib = fibptr->hw_fib_va;
0946 
0947     if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
0948         fib_dealloc(fibptr);
0949         return 0;
0950     }
0951 
0952     /*
0953      *  Check for a fib which has already been completed or with a
0954      *  status wait timeout
0955      */
0956 
0957     if (hw_fib->header.XferState == 0 || fibptr->done == 2)
0958         return 0;
0959     /*
0960      *  If we plan to do anything check the structure type first.
0961      */
0962 
0963     if (hw_fib->header.StructType != FIB_MAGIC &&
0964         hw_fib->header.StructType != FIB_MAGIC2 &&
0965         hw_fib->header.StructType != FIB_MAGIC2_64)
0966         return -EINVAL;
0967     /*
0968      *  This block completes a cdb which orginated on the host and we
0969      *  just need to deallocate the cdb or reinit it. At this point the
0970      *  command is complete that we had sent to the adapter and this
0971      *  cdb could be reused.
0972      */
0973 
0974     if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
0975         (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
0976     {
0977         fib_dealloc(fibptr);
0978     }
0979     else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
0980     {
0981         /*
0982          *  This handles the case when the host has aborted the I/O
0983          *  to the adapter because the adapter is not responding
0984          */
0985         fib_dealloc(fibptr);
0986     } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
0987         fib_dealloc(fibptr);
0988     } else {
0989         BUG();
0990     }
0991     return 0;
0992 }
0993 
0994 /**
0995  *  aac_printf  -   handle printf from firmware
0996  *  @dev: Adapter
0997  *  @val: Message info
0998  *
0999  *  Print a message passed to us by the controller firmware on the
1000  *  Adaptec board
1001  */
1002 
1003 void aac_printf(struct aac_dev *dev, u32 val)
1004 {
1005     char *cp = dev->printfbuf;
1006     if (dev->printf_enabled)
1007     {
1008         int length = val & 0xffff;
1009         int level = (val >> 16) & 0xffff;
1010 
1011         /*
1012          *  The size of the printfbuf is set in port.c
1013          *  There is no variable or define for it
1014          */
1015         if (length > 255)
1016             length = 255;
1017         if (cp[length] != 0)
1018             cp[length] = 0;
1019         if (level == LOG_AAC_HIGH_ERROR)
1020             printk(KERN_WARNING "%s:%s", dev->name, cp);
1021         else
1022             printk(KERN_INFO "%s:%s", dev->name, cp);
1023     }
1024     memset(cp, 0, 256);
1025 }
1026 
1027 static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
1028 {
1029     return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
1030 }
1031 
1032 
1033 static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
1034 {
1035     switch (aac_aif_data(aifcmd, 1)) {
1036     case AifBuCacheDataLoss:
1037         if (aac_aif_data(aifcmd, 2))
1038             dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
1039             aac_aif_data(aifcmd, 2));
1040         else
1041             dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
1042         break;
1043     case AifBuCacheDataRecover:
1044         if (aac_aif_data(aifcmd, 2))
1045             dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
1046             aac_aif_data(aifcmd, 2));
1047         else
1048             dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
1049         break;
1050     }
1051 }
1052 
1053 #define AIF_SNIFF_TIMEOUT   (500*HZ)
1054 /**
1055  *  aac_handle_aif      -   Handle a message from the firmware
1056  *  @dev: Which adapter this fib is from
1057  *  @fibptr: Pointer to fibptr from adapter
1058  *
1059  *  This routine handles a driver notify fib from the adapter and
1060  *  dispatches it to the appropriate routine for handling.
1061  */
1062 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1063 {
1064     struct hw_fib * hw_fib = fibptr->hw_fib_va;
1065     struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
1066     u32 channel, id, lun, container;
1067     struct scsi_device *device;
1068     enum {
1069         NOTHING,
1070         DELETE,
1071         ADD,
1072         CHANGE
1073     } device_config_needed = NOTHING;
1074 
1075     /* Sniff for container changes */
1076 
1077     if (!dev || !dev->fsa_dev)
1078         return;
1079     container = channel = id = lun = (u32)-1;
1080 
1081     /*
1082      *  We have set this up to try and minimize the number of
1083      * re-configures that take place. As a result of this when
1084      * certain AIF's come in we will set a flag waiting for another
1085      * type of AIF before setting the re-config flag.
1086      */
1087     switch (le32_to_cpu(aifcmd->command)) {
1088     case AifCmdDriverNotify:
1089         switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1090         case AifRawDeviceRemove:
1091             container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1092             if ((container >> 28)) {
1093                 container = (u32)-1;
1094                 break;
1095             }
1096             channel = (container >> 24) & 0xF;
1097             if (channel >= dev->maximum_num_channels) {
1098                 container = (u32)-1;
1099                 break;
1100             }
1101             id = container & 0xFFFF;
1102             if (id >= dev->maximum_num_physicals) {
1103                 container = (u32)-1;
1104                 break;
1105             }
1106             lun = (container >> 16) & 0xFF;
1107             container = (u32)-1;
1108             channel = aac_phys_to_logical(channel);
1109             device_config_needed = DELETE;
1110             break;
1111 
1112         /*
1113          *  Morph or Expand complete
1114          */
1115         case AifDenMorphComplete:
1116         case AifDenVolumeExtendComplete:
1117             container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1118             if (container >= dev->maximum_num_containers)
1119                 break;
1120 
1121             /*
1122              *  Find the scsi_device associated with the SCSI
1123              * address. Make sure we have the right array, and if
1124              * so set the flag to initiate a new re-config once we
1125              * see an AifEnConfigChange AIF come through.
1126              */
1127 
1128             if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
1129                 device = scsi_device_lookup(dev->scsi_host_ptr,
1130                     CONTAINER_TO_CHANNEL(container),
1131                     CONTAINER_TO_ID(container),
1132                     CONTAINER_TO_LUN(container));
1133                 if (device) {
1134                     dev->fsa_dev[container].config_needed = CHANGE;
1135                     dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
1136                     dev->fsa_dev[container].config_waiting_stamp = jiffies;
1137                     scsi_device_put(device);
1138                 }
1139             }
1140         }
1141 
1142         /*
1143          *  If we are waiting on something and this happens to be
1144          * that thing then set the re-configure flag.
1145          */
1146         if (container != (u32)-1) {
1147             if (container >= dev->maximum_num_containers)
1148                 break;
1149             if ((dev->fsa_dev[container].config_waiting_on ==
1150                 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1151              time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1152                 dev->fsa_dev[container].config_waiting_on = 0;
1153         } else for (container = 0;
1154             container < dev->maximum_num_containers; ++container) {
1155             if ((dev->fsa_dev[container].config_waiting_on ==
1156                 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1157              time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1158                 dev->fsa_dev[container].config_waiting_on = 0;
1159         }
1160         break;
1161 
1162     case AifCmdEventNotify:
1163         switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
1164         case AifEnBatteryEvent:
1165             dev->cache_protected =
1166                 (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
1167             break;
1168         /*
1169          *  Add an Array.
1170          */
1171         case AifEnAddContainer:
1172             container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1173             if (container >= dev->maximum_num_containers)
1174                 break;
1175             dev->fsa_dev[container].config_needed = ADD;
1176             dev->fsa_dev[container].config_waiting_on =
1177                 AifEnConfigChange;
1178             dev->fsa_dev[container].config_waiting_stamp = jiffies;
1179             break;
1180 
1181         /*
1182          *  Delete an Array.
1183          */
1184         case AifEnDeleteContainer:
1185             container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1186             if (container >= dev->maximum_num_containers)
1187                 break;
1188             dev->fsa_dev[container].config_needed = DELETE;
1189             dev->fsa_dev[container].config_waiting_on =
1190                 AifEnConfigChange;
1191             dev->fsa_dev[container].config_waiting_stamp = jiffies;
1192             break;
1193 
1194         /*
1195          *  Container change detected. If we currently are not
1196          * waiting on something else, setup to wait on a Config Change.
1197          */
1198         case AifEnContainerChange:
1199             container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1200             if (container >= dev->maximum_num_containers)
1201                 break;
1202             if (dev->fsa_dev[container].config_waiting_on &&
1203              time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1204                 break;
1205             dev->fsa_dev[container].config_needed = CHANGE;
1206             dev->fsa_dev[container].config_waiting_on =
1207                 AifEnConfigChange;
1208             dev->fsa_dev[container].config_waiting_stamp = jiffies;
1209             break;
1210 
1211         case AifEnConfigChange:
1212             break;
1213 
1214         case AifEnAddJBOD:
1215         case AifEnDeleteJBOD:
1216             container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
1217             if ((container >> 28)) {
1218                 container = (u32)-1;
1219                 break;
1220             }
1221             channel = (container >> 24) & 0xF;
1222             if (channel >= dev->maximum_num_channels) {
1223                 container = (u32)-1;
1224                 break;
1225             }
1226             id = container & 0xFFFF;
1227             if (id >= dev->maximum_num_physicals) {
1228                 container = (u32)-1;
1229                 break;
1230             }
1231             lun = (container >> 16) & 0xFF;
1232             container = (u32)-1;
1233             channel = aac_phys_to_logical(channel);
1234             device_config_needed =
1235               (((__le32 *)aifcmd->data)[0] ==
1236                 cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
1237             if (device_config_needed == ADD) {
1238                 device = scsi_device_lookup(dev->scsi_host_ptr,
1239                     channel,
1240                     id,
1241                     lun);
1242                 if (device) {
1243                     scsi_remove_device(device);
1244                     scsi_device_put(device);
1245                 }
1246             }
1247             break;
1248 
1249         case AifEnEnclosureManagement:
1250             /*
1251              * If in JBOD mode, automatic exposure of new
1252              * physical target to be suppressed until configured.
1253              */
1254             if (dev->jbod)
1255                 break;
1256             switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
1257             case EM_DRIVE_INSERTION:
1258             case EM_DRIVE_REMOVAL:
1259             case EM_SES_DRIVE_INSERTION:
1260             case EM_SES_DRIVE_REMOVAL:
1261                 container = le32_to_cpu(
1262                     ((__le32 *)aifcmd->data)[2]);
1263                 if ((container >> 28)) {
1264                     container = (u32)-1;
1265                     break;
1266                 }
1267                 channel = (container >> 24) & 0xF;
1268                 if (channel >= dev->maximum_num_channels) {
1269                     container = (u32)-1;
1270                     break;
1271                 }
1272                 id = container & 0xFFFF;
1273                 lun = (container >> 16) & 0xFF;
1274                 container = (u32)-1;
1275                 if (id >= dev->maximum_num_physicals) {
1276                     /* legacy dev_t ? */
1277                     if ((0x2000 <= id) || lun || channel ||
1278                       ((channel = (id >> 7) & 0x3F) >=
1279                       dev->maximum_num_channels))
1280                         break;
1281                     lun = (id >> 4) & 7;
1282                     id &= 0xF;
1283                 }
1284                 channel = aac_phys_to_logical(channel);
1285                 device_config_needed =
1286                   ((((__le32 *)aifcmd->data)[3]
1287                     == cpu_to_le32(EM_DRIVE_INSERTION)) ||
1288                     (((__le32 *)aifcmd->data)[3]
1289                     == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
1290                   ADD : DELETE;
1291                 break;
1292             }
1293             break;
1294         case AifBuManagerEvent:
1295             aac_handle_aif_bu(dev, aifcmd);
1296             break;
1297         }
1298 
1299         /*
1300          *  If we are waiting on something and this happens to be
1301          * that thing then set the re-configure flag.
1302          */
1303         if (container != (u32)-1) {
1304             if (container >= dev->maximum_num_containers)
1305                 break;
1306             if ((dev->fsa_dev[container].config_waiting_on ==
1307                 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1308              time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1309                 dev->fsa_dev[container].config_waiting_on = 0;
1310         } else for (container = 0;
1311             container < dev->maximum_num_containers; ++container) {
1312             if ((dev->fsa_dev[container].config_waiting_on ==
1313                 le32_to_cpu(*(__le32 *)aifcmd->data)) &&
1314              time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
1315                 dev->fsa_dev[container].config_waiting_on = 0;
1316         }
1317         break;
1318 
1319     case AifCmdJobProgress:
1320         /*
1321          *  These are job progress AIF's. When a Clear is being
1322          * done on a container it is initially created then hidden from
1323          * the OS. When the clear completes we don't get a config
1324          * change so we monitor the job status complete on a clear then
1325          * wait for a container change.
1326          */
1327 
1328         if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1329             (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1330              ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1331             for (container = 0;
1332                 container < dev->maximum_num_containers;
1333                 ++container) {
1334                 /*
1335                  * Stomp on all config sequencing for all
1336                  * containers?
1337                  */
1338                 dev->fsa_dev[container].config_waiting_on =
1339                     AifEnContainerChange;
1340                 dev->fsa_dev[container].config_needed = ADD;
1341                 dev->fsa_dev[container].config_waiting_stamp =
1342                     jiffies;
1343             }
1344         }
1345         if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1346             ((__le32 *)aifcmd->data)[6] == 0 &&
1347             ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1348             for (container = 0;
1349                 container < dev->maximum_num_containers;
1350                 ++container) {
1351                 /*
1352                  * Stomp on all config sequencing for all
1353                  * containers?
1354                  */
1355                 dev->fsa_dev[container].config_waiting_on =
1356                     AifEnContainerChange;
1357                 dev->fsa_dev[container].config_needed = DELETE;
1358                 dev->fsa_dev[container].config_waiting_stamp =
1359                     jiffies;
1360             }
1361         }
1362         break;
1363     }
1364 
1365     container = 0;
1366 retry_next:
1367     if (device_config_needed == NOTHING) {
1368         for (; container < dev->maximum_num_containers; ++container) {
1369             if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1370                 (dev->fsa_dev[container].config_needed != NOTHING) &&
1371                 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1372                 device_config_needed =
1373                     dev->fsa_dev[container].config_needed;
1374                 dev->fsa_dev[container].config_needed = NOTHING;
1375                 channel = CONTAINER_TO_CHANNEL(container);
1376                 id = CONTAINER_TO_ID(container);
1377                 lun = CONTAINER_TO_LUN(container);
1378                 break;
1379             }
1380         }
1381     }
1382     if (device_config_needed == NOTHING)
1383         return;
1384 
1385     /*
1386      *  If we decided that a re-configuration needs to be done,
1387      * schedule it here on the way out the door, please close the door
1388      * behind you.
1389      */
1390 
1391     /*
1392      *  Find the scsi_device associated with the SCSI address,
1393      * and mark it as changed, invalidating the cache. This deals
1394      * with changes to existing device IDs.
1395      */
1396 
1397     if (!dev || !dev->scsi_host_ptr)
1398         return;
1399     /*
1400      * force reload of disk info via aac_probe_container
1401      */
1402     if ((channel == CONTAINER_CHANNEL) &&
1403       (device_config_needed != NOTHING)) {
1404         if (dev->fsa_dev[container].valid == 1)
1405             dev->fsa_dev[container].valid = 2;
1406         aac_probe_container(dev, container);
1407     }
1408     device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1409     if (device) {
1410         switch (device_config_needed) {
1411         case DELETE:
1412 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1413             scsi_remove_device(device);
1414 #else
1415             if (scsi_device_online(device)) {
1416                 scsi_device_set_state(device, SDEV_OFFLINE);
1417                 sdev_printk(KERN_INFO, device,
1418                     "Device offlined - %s\n",
1419                     (channel == CONTAINER_CHANNEL) ?
1420                         "array deleted" :
1421                         "enclosure services event");
1422             }
1423 #endif
1424             break;
1425         case ADD:
1426             if (!scsi_device_online(device)) {
1427                 sdev_printk(KERN_INFO, device,
1428                     "Device online - %s\n",
1429                     (channel == CONTAINER_CHANNEL) ?
1430                         "array created" :
1431                         "enclosure services event");
1432                 scsi_device_set_state(device, SDEV_RUNNING);
1433             }
1434             fallthrough;
1435         case CHANGE:
1436             if ((channel == CONTAINER_CHANNEL)
1437              && (!dev->fsa_dev[container].valid)) {
1438 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1439                 scsi_remove_device(device);
1440 #else
1441                 if (!scsi_device_online(device))
1442                     break;
1443                 scsi_device_set_state(device, SDEV_OFFLINE);
1444                 sdev_printk(KERN_INFO, device,
1445                     "Device offlined - %s\n",
1446                     "array failed");
1447 #endif
1448                 break;
1449             }
1450             scsi_rescan_device(&device->sdev_gendev);
1451             break;
1452 
1453         default:
1454             break;
1455         }
1456         scsi_device_put(device);
1457         device_config_needed = NOTHING;
1458     }
1459     if (device_config_needed == ADD)
1460         scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1461     if (channel == CONTAINER_CHANNEL) {
1462         container++;
1463         device_config_needed = NOTHING;
1464         goto retry_next;
1465     }
1466 }
1467 
1468 static void aac_schedule_bus_scan(struct aac_dev *aac)
1469 {
1470     if (aac->sa_firmware)
1471         aac_schedule_safw_scan_worker(aac);
1472     else
1473         aac_schedule_src_reinit_aif_worker(aac);
1474 }
1475 
1476 static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1477 {
1478     int index, quirks;
1479     int retval;
1480     struct Scsi_Host *host = aac->scsi_host_ptr;
1481     int jafo = 0;
1482     int bled;
1483     u64 dmamask;
1484     int num_of_fibs = 0;
1485 
1486     /*
1487      * Assumptions:
1488      *  - host is locked, unless called by the aacraid thread.
1489      *    (a matter of convenience, due to legacy issues surrounding
1490      *    eh_host_adapter_reset).
1491      *  - in_reset is asserted, so no new i/o is getting to the
1492      *    card.
1493      *  - The card is dead, or will be very shortly ;-/ so no new
1494      *    commands are completing in the interrupt service.
1495      */
1496     aac_adapter_disable_int(aac);
1497     if (aac->thread && aac->thread->pid != current->pid) {
1498         spin_unlock_irq(host->host_lock);
1499         kthread_stop(aac->thread);
1500         aac->thread = NULL;
1501         jafo = 1;
1502     }
1503 
1504     /*
1505      *  If a positive health, means in a known DEAD PANIC
1506      * state and the adapter could be reset to `try again'.
1507      */
1508     bled = forced ? 0 : aac_adapter_check_health(aac);
1509     retval = aac_adapter_restart(aac, bled, reset_type);
1510 
1511     if (retval)
1512         goto out;
1513 
1514     /*
1515      *  Loop through the fibs, close the synchronous FIBS
1516      */
1517     retval = 1;
1518     num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
1519     for (index = 0; index <  num_of_fibs; index++) {
1520 
1521         struct fib *fib = &aac->fibs[index];
1522         __le32 XferState = fib->hw_fib_va->header.XferState;
1523         bool is_response_expected = false;
1524 
1525         if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1526            (XferState & cpu_to_le32(ResponseExpected)))
1527             is_response_expected = true;
1528 
1529         if (is_response_expected
1530           || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
1531             unsigned long flagv;
1532             spin_lock_irqsave(&fib->event_lock, flagv);
1533             complete(&fib->event_wait);
1534             spin_unlock_irqrestore(&fib->event_lock, flagv);
1535             schedule();
1536             retval = 0;
1537         }
1538     }
1539     /* Give some extra time for ioctls to complete. */
1540     if (retval == 0)
1541         ssleep(2);
1542     index = aac->cardtype;
1543 
1544     /*
1545      * Re-initialize the adapter, first free resources, then carefully
1546      * apply the initialization sequence to come back again. Only risk
1547      * is a change in Firmware dropping cache, it is assumed the caller
1548      * will ensure that i/o is queisced and the card is flushed in that
1549      * case.
1550      */
1551     aac_free_irq(aac);
1552     aac_fib_map_free(aac);
1553     dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
1554               aac->comm_phys);
1555     aac_adapter_ioremap(aac, 0);
1556     aac->comm_addr = NULL;
1557     aac->comm_phys = 0;
1558     kfree(aac->queues);
1559     aac->queues = NULL;
1560     kfree(aac->fsa_dev);
1561     aac->fsa_dev = NULL;
1562 
1563     dmamask = DMA_BIT_MASK(32);
1564     quirks = aac_get_driver_ident(index)->quirks;
1565     if (quirks & AAC_QUIRK_31BIT)
1566         retval = dma_set_mask(&aac->pdev->dev, dmamask);
1567     else if (!(quirks & AAC_QUIRK_SRC))
1568         retval = dma_set_mask(&aac->pdev->dev, dmamask);
1569     else
1570         retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
1571 
1572     if (quirks & AAC_QUIRK_31BIT && !retval) {
1573         dmamask = DMA_BIT_MASK(31);
1574         retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
1575     }
1576 
1577     if (retval)
1578         goto out;
1579 
1580     if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1581         goto out;
1582 
1583     if (jafo) {
1584         aac->thread = kthread_run(aac_command_thread, aac, "%s",
1585                       aac->name);
1586         if (IS_ERR(aac->thread)) {
1587             retval = PTR_ERR(aac->thread);
1588             aac->thread = NULL;
1589             goto out;
1590         }
1591     }
1592     (void)aac_get_adapter_info(aac);
1593     if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1594         host->sg_tablesize = 34;
1595         host->max_sectors = (host->sg_tablesize * 8) + 112;
1596     }
1597     if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1598         host->sg_tablesize = 17;
1599         host->max_sectors = (host->sg_tablesize * 8) + 112;
1600     }
1601     aac_get_config_status(aac, 1);
1602     aac_get_containers(aac);
1603     /*
1604      * This is where the assumption that the Adapter is quiesced
1605      * is important.
1606      */
1607     scsi_host_complete_all_commands(host, DID_RESET);
1608 
1609     retval = 0;
1610 out:
1611     aac->in_reset = 0;
1612 
1613     /*
1614      * Issue bus rescan to catch any configuration that might have
1615      * occurred
1616      */
1617     if (!retval && !is_kdump_kernel()) {
1618         dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
1619         aac_schedule_bus_scan(aac);
1620     }
1621 
1622     if (jafo) {
1623         spin_lock_irq(host->host_lock);
1624     }
1625     return retval;
1626 }
1627 
1628 int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1629 {
1630     unsigned long flagv = 0;
1631     int retval, unblock_retval;
1632     struct Scsi_Host *host = aac->scsi_host_ptr;
1633     int bled;
1634 
1635     if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1636         return -EBUSY;
1637 
1638     if (aac->in_reset) {
1639         spin_unlock_irqrestore(&aac->fib_lock, flagv);
1640         return -EBUSY;
1641     }
1642     aac->in_reset = 1;
1643     spin_unlock_irqrestore(&aac->fib_lock, flagv);
1644 
1645     /*
1646      * Wait for all commands to complete to this specific
1647      * target (block maximum 60 seconds). Although not necessary,
1648      * it does make us a good storage citizen.
1649      */
1650     scsi_host_block(host);
1651 
1652     /* Quiesce build, flush cache, write through mode */
1653     if (forced < 2)
1654         aac_send_shutdown(aac);
1655     spin_lock_irqsave(host->host_lock, flagv);
1656     bled = forced ? forced :
1657             (aac_check_reset != 0 && aac_check_reset != 1);
1658     retval = _aac_reset_adapter(aac, bled, reset_type);
1659     spin_unlock_irqrestore(host->host_lock, flagv);
1660 
1661     unblock_retval = scsi_host_unblock(host, SDEV_RUNNING);
1662     if (!retval)
1663         retval = unblock_retval;
1664     if ((forced < 2) && (retval == -ENODEV)) {
1665         /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1666         struct fib * fibctx = aac_fib_alloc(aac);
1667         if (fibctx) {
1668             struct aac_pause *cmd;
1669             int status;
1670 
1671             aac_fib_init(fibctx);
1672 
1673             cmd = (struct aac_pause *) fib_data(fibctx);
1674 
1675             cmd->command = cpu_to_le32(VM_ContainerConfig);
1676             cmd->type = cpu_to_le32(CT_PAUSE_IO);
1677             cmd->timeout = cpu_to_le32(1);
1678             cmd->min = cpu_to_le32(1);
1679             cmd->noRescan = cpu_to_le32(1);
1680             cmd->count = cpu_to_le32(0);
1681 
1682             status = aac_fib_send(ContainerCommand,
1683               fibctx,
1684               sizeof(struct aac_pause),
1685               FsaNormal,
1686               -2 /* Timeout silently */, 1,
1687               NULL, NULL);
1688 
1689             if (status >= 0)
1690                 aac_fib_complete(fibctx);
1691             /* FIB should be freed only after getting
1692              * the response from the F/W */
1693             if (status != -ERESTARTSYS)
1694                 aac_fib_free(fibctx);
1695         }
1696     }
1697 
1698     return retval;
1699 }
1700 
1701 int aac_check_health(struct aac_dev * aac)
1702 {
1703     int BlinkLED;
1704     unsigned long time_now, flagv = 0;
1705     struct list_head * entry;
1706 
1707     /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1708     if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1709         return 0;
1710 
1711     if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1712         spin_unlock_irqrestore(&aac->fib_lock, flagv);
1713         return 0; /* OK */
1714     }
1715 
1716     aac->in_reset = 1;
1717 
1718     /* Fake up an AIF:
1719      *  aac_aifcmd.command = AifCmdEventNotify = 1
1720      *  aac_aifcmd.seqnum = 0xFFFFFFFF
1721      *  aac_aifcmd.data[0] = AifEnExpEvent = 23
1722      *  aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1723      *  aac.aifcmd.data[2] = AifHighPriority = 3
1724      *  aac.aifcmd.data[3] = BlinkLED
1725      */
1726 
1727     time_now = jiffies/HZ;
1728     entry = aac->fib_list.next;
1729 
1730     /*
1731      * For each Context that is on the
1732      * fibctxList, make a copy of the
1733      * fib, and then set the event to wake up the
1734      * thread that is waiting for it.
1735      */
1736     while (entry != &aac->fib_list) {
1737         /*
1738          * Extract the fibctx
1739          */
1740         struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1741         struct hw_fib * hw_fib;
1742         struct fib * fib;
1743         /*
1744          * Check if the queue is getting
1745          * backlogged
1746          */
1747         if (fibctx->count > 20) {
1748             /*
1749              * It's *not* jiffies folks,
1750              * but jiffies / HZ, so do not
1751              * panic ...
1752              */
1753             u32 time_last = fibctx->jiffies;
1754             /*
1755              * Has it been > 2 minutes
1756              * since the last read off
1757              * the queue?
1758              */
1759             if ((time_now - time_last) > aif_timeout) {
1760                 entry = entry->next;
1761                 aac_close_fib_context(aac, fibctx);
1762                 continue;
1763             }
1764         }
1765         /*
1766          * Warning: no sleep allowed while
1767          * holding spinlock
1768          */
1769         hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1770         fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1771         if (fib && hw_fib) {
1772             struct aac_aifcmd * aif;
1773 
1774             fib->hw_fib_va = hw_fib;
1775             fib->dev = aac;
1776             aac_fib_init(fib);
1777             fib->type = FSAFS_NTC_FIB_CONTEXT;
1778             fib->size = sizeof (struct fib);
1779             fib->data = hw_fib->data;
1780             aif = (struct aac_aifcmd *)hw_fib->data;
1781             aif->command = cpu_to_le32(AifCmdEventNotify);
1782             aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1783             ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1784             ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1785             ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1786             ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1787 
1788             /*
1789              * Put the FIB onto the
1790              * fibctx's fibs
1791              */
1792             list_add_tail(&fib->fiblink, &fibctx->fib_list);
1793             fibctx->count++;
1794             /*
1795              * Set the event to wake up the
1796              * thread that will waiting.
1797              */
1798             complete(&fibctx->completion);
1799         } else {
1800             printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1801             kfree(fib);
1802             kfree(hw_fib);
1803         }
1804         entry = entry->next;
1805     }
1806 
1807     spin_unlock_irqrestore(&aac->fib_lock, flagv);
1808 
1809     if (BlinkLED < 0) {
1810         printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1811                 aac->name, BlinkLED);
1812         goto out;
1813     }
1814 
1815     printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1816 
1817 out:
1818     aac->in_reset = 0;
1819     return BlinkLED;
1820 }
1821 
1822 static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
1823 {
1824     return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
1825 }
1826 
1827 static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
1828                                 int bus,
1829                                 int target)
1830 {
1831     if (bus != CONTAINER_CHANNEL)
1832         bus = aac_phys_to_logical(bus);
1833 
1834     return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
1835 }
1836 
1837 static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
1838 {
1839     if (bus != CONTAINER_CHANNEL)
1840         bus = aac_phys_to_logical(bus);
1841 
1842     return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
1843 }
1844 
1845 static void aac_put_safw_scsi_device(struct scsi_device *sdev)
1846 {
1847     if (sdev)
1848         scsi_device_put(sdev);
1849 }
1850 
1851 static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
1852 {
1853     struct scsi_device *sdev;
1854 
1855     sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1856     scsi_remove_device(sdev);
1857     aac_put_safw_scsi_device(sdev);
1858 }
1859 
1860 static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
1861     int bus, int target)
1862 {
1863     return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
1864 }
1865 
1866 static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
1867 {
1868     if (is_safw_raid_volume(dev, bus, target))
1869         return dev->fsa_dev[target].valid;
1870     else
1871         return aac_is_safw_scan_count_equal(dev, bus, target);
1872 }
1873 
1874 static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
1875 {
1876     int is_exposed = 0;
1877     struct scsi_device *sdev;
1878 
1879     sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1880     if (sdev)
1881         is_exposed = 1;
1882     aac_put_safw_scsi_device(sdev);
1883 
1884     return is_exposed;
1885 }
1886 
1887 static int aac_update_safw_host_devices(struct aac_dev *dev)
1888 {
1889     int i;
1890     int bus;
1891     int target;
1892     int is_exposed = 0;
1893     int rcode = 0;
1894 
1895     rcode = aac_setup_safw_adapter(dev);
1896     if (unlikely(rcode < 0)) {
1897         goto out;
1898     }
1899 
1900     for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
1901 
1902         bus = get_bus_number(i);
1903         target = get_target_number(i);
1904 
1905         is_exposed = aac_is_safw_device_exposed(dev, bus, target);
1906 
1907         if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
1908             aac_add_safw_device(dev, bus, target);
1909         else if (!aac_is_safw_target_valid(dev, bus, target) &&
1910                                 is_exposed)
1911             aac_remove_safw_device(dev, bus, target);
1912     }
1913 out:
1914     return rcode;
1915 }
1916 
1917 static int aac_scan_safw_host(struct aac_dev *dev)
1918 {
1919     int rcode = 0;
1920 
1921     rcode = aac_update_safw_host_devices(dev);
1922     if (rcode)
1923         aac_schedule_safw_scan_worker(dev);
1924 
1925     return rcode;
1926 }
1927 
1928 int aac_scan_host(struct aac_dev *dev)
1929 {
1930     int rcode = 0;
1931 
1932     mutex_lock(&dev->scan_mutex);
1933     if (dev->sa_firmware)
1934         rcode = aac_scan_safw_host(dev);
1935     else
1936         scsi_scan_host(dev->scsi_host_ptr);
1937     mutex_unlock(&dev->scan_mutex);
1938 
1939     return rcode;
1940 }
1941 
1942 void aac_src_reinit_aif_worker(struct work_struct *work)
1943 {
1944     struct aac_dev *dev = container_of(to_delayed_work(work),
1945                 struct aac_dev, src_reinit_aif_worker);
1946 
1947     wait_event(dev->scsi_host_ptr->host_wait,
1948             !scsi_host_in_recovery(dev->scsi_host_ptr));
1949     aac_reinit_aif(dev, dev->cardtype);
1950 }
1951 
1952 /**
1953  *  aac_handle_sa_aif - Handle a message from the firmware
1954  *  @dev: Which adapter this fib is from
1955  *  @fibptr: Pointer to fibptr from adapter
1956  *
1957  *  This routine handles a driver notify fib from the adapter and
1958  *  dispatches it to the appropriate routine for handling.
1959  */
1960 static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
1961 {
1962     int i;
1963     u32 events = 0;
1964 
1965     if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
1966         events = SA_AIF_HOTPLUG;
1967     else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
1968         events = SA_AIF_HARDWARE;
1969     else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
1970         events = SA_AIF_PDEV_CHANGE;
1971     else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
1972         events = SA_AIF_LDEV_CHANGE;
1973     else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
1974         events = SA_AIF_BPSTAT_CHANGE;
1975     else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
1976         events = SA_AIF_BPCFG_CHANGE;
1977 
1978     switch (events) {
1979     case SA_AIF_HOTPLUG:
1980     case SA_AIF_HARDWARE:
1981     case SA_AIF_PDEV_CHANGE:
1982     case SA_AIF_LDEV_CHANGE:
1983     case SA_AIF_BPCFG_CHANGE:
1984 
1985         aac_scan_host(dev);
1986 
1987         break;
1988 
1989     case SA_AIF_BPSTAT_CHANGE:
1990         /* currently do nothing */
1991         break;
1992     }
1993 
1994     for (i = 1; i <= 10; ++i) {
1995         events = src_readl(dev, MUnit.IDR);
1996         if (events & (1<<23)) {
1997             pr_warn(" AIF not cleared by firmware - %d/%d)\n",
1998                 i, 10);
1999             ssleep(1);
2000         }
2001     }
2002 }
2003 
2004 static int get_fib_count(struct aac_dev *dev)
2005 {
2006     unsigned int num = 0;
2007     struct list_head *entry;
2008     unsigned long flagv;
2009 
2010     /*
2011      * Warning: no sleep allowed while
2012      * holding spinlock. We take the estimate
2013      * and pre-allocate a set of fibs outside the
2014      * lock.
2015      */
2016     num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
2017             / sizeof(struct hw_fib); /* some extra */
2018     spin_lock_irqsave(&dev->fib_lock, flagv);
2019     entry = dev->fib_list.next;
2020     while (entry != &dev->fib_list) {
2021         entry = entry->next;
2022         ++num;
2023     }
2024     spin_unlock_irqrestore(&dev->fib_lock, flagv);
2025 
2026     return num;
2027 }
2028 
2029 static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2030                         struct fib **fib_pool,
2031                         unsigned int num)
2032 {
2033     struct hw_fib **hw_fib_p;
2034     struct fib **fib_p;
2035 
2036     hw_fib_p = hw_fib_pool;
2037     fib_p = fib_pool;
2038     while (hw_fib_p < &hw_fib_pool[num]) {
2039         *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
2040         if (!(*(hw_fib_p++))) {
2041             --hw_fib_p;
2042             break;
2043         }
2044 
2045         *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
2046         if (!(*(fib_p++))) {
2047             kfree(*(--hw_fib_p));
2048             break;
2049         }
2050     }
2051 
2052     /*
2053      * Get the actual number of allocated fibs
2054      */
2055     num = hw_fib_p - hw_fib_pool;
2056     return num;
2057 }
2058 
2059 static void wakeup_fibctx_threads(struct aac_dev *dev,
2060                         struct hw_fib **hw_fib_pool,
2061                         struct fib **fib_pool,
2062                         struct fib *fib,
2063                         struct hw_fib *hw_fib,
2064                         unsigned int num)
2065 {
2066     unsigned long flagv;
2067     struct list_head *entry;
2068     struct hw_fib **hw_fib_p;
2069     struct fib **fib_p;
2070     u32 time_now, time_last;
2071     struct hw_fib *hw_newfib;
2072     struct fib *newfib;
2073     struct aac_fib_context *fibctx;
2074 
2075     time_now = jiffies/HZ;
2076     spin_lock_irqsave(&dev->fib_lock, flagv);
2077     entry = dev->fib_list.next;
2078     /*
2079      * For each Context that is on the
2080      * fibctxList, make a copy of the
2081      * fib, and then set the event to wake up the
2082      * thread that is waiting for it.
2083      */
2084 
2085     hw_fib_p = hw_fib_pool;
2086     fib_p = fib_pool;
2087     while (entry != &dev->fib_list) {
2088         /*
2089          * Extract the fibctx
2090          */
2091         fibctx = list_entry(entry, struct aac_fib_context,
2092                 next);
2093         /*
2094          * Check if the queue is getting
2095          * backlogged
2096          */
2097         if (fibctx->count > 20) {
2098             /*
2099              * It's *not* jiffies folks,
2100              * but jiffies / HZ so do not
2101              * panic ...
2102              */
2103             time_last = fibctx->jiffies;
2104             /*
2105              * Has it been > 2 minutes
2106              * since the last read off
2107              * the queue?
2108              */
2109             if ((time_now - time_last) > aif_timeout) {
2110                 entry = entry->next;
2111                 aac_close_fib_context(dev, fibctx);
2112                 continue;
2113             }
2114         }
2115         /*
2116          * Warning: no sleep allowed while
2117          * holding spinlock
2118          */
2119         if (hw_fib_p >= &hw_fib_pool[num]) {
2120             pr_warn("aifd: didn't allocate NewFib\n");
2121             entry = entry->next;
2122             continue;
2123         }
2124 
2125         hw_newfib = *hw_fib_p;
2126         *(hw_fib_p++) = NULL;
2127         newfib = *fib_p;
2128         *(fib_p++) = NULL;
2129         /*
2130          * Make the copy of the FIB
2131          */
2132         memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
2133         memcpy(newfib, fib, sizeof(struct fib));
2134         newfib->hw_fib_va = hw_newfib;
2135         /*
2136          * Put the FIB onto the
2137          * fibctx's fibs
2138          */
2139         list_add_tail(&newfib->fiblink, &fibctx->fib_list);
2140         fibctx->count++;
2141         /*
2142          * Set the event to wake up the
2143          * thread that is waiting.
2144          */
2145         complete(&fibctx->completion);
2146 
2147         entry = entry->next;
2148     }
2149     /*
2150      *  Set the status of this FIB
2151      */
2152     *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2153     aac_fib_adapter_complete(fib, sizeof(u32));
2154     spin_unlock_irqrestore(&dev->fib_lock, flagv);
2155 
2156 }
2157 
2158 static void aac_process_events(struct aac_dev *dev)
2159 {
2160     struct hw_fib *hw_fib;
2161     struct fib *fib;
2162     unsigned long flags;
2163     spinlock_t *t_lock;
2164 
2165     t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2166     spin_lock_irqsave(t_lock, flags);
2167 
2168     while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
2169         struct list_head *entry;
2170         struct aac_aifcmd *aifcmd;
2171         unsigned int  num;
2172         struct hw_fib **hw_fib_pool, **hw_fib_p;
2173         struct fib **fib_pool, **fib_p;
2174 
2175         set_current_state(TASK_RUNNING);
2176 
2177         entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
2178         list_del(entry);
2179 
2180         t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2181         spin_unlock_irqrestore(t_lock, flags);
2182 
2183         fib = list_entry(entry, struct fib, fiblink);
2184         hw_fib = fib->hw_fib_va;
2185         if (dev->sa_firmware) {
2186             /* Thor AIF */
2187             aac_handle_sa_aif(dev, fib);
2188             aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2189             goto free_fib;
2190         }
2191         /*
2192          *  We will process the FIB here or pass it to a
2193          *  worker thread that is TBD. We Really can't
2194          *  do anything at this point since we don't have
2195          *  anything defined for this thread to do.
2196          */
2197         memset(fib, 0, sizeof(struct fib));
2198         fib->type = FSAFS_NTC_FIB_CONTEXT;
2199         fib->size = sizeof(struct fib);
2200         fib->hw_fib_va = hw_fib;
2201         fib->data = hw_fib->data;
2202         fib->dev = dev;
2203         /*
2204          *  We only handle AifRequest fibs from the adapter.
2205          */
2206 
2207         aifcmd = (struct aac_aifcmd *) hw_fib->data;
2208         if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
2209             /* Handle Driver Notify Events */
2210             aac_handle_aif(dev, fib);
2211             *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
2212             aac_fib_adapter_complete(fib, (u16)sizeof(u32));
2213             goto free_fib;
2214         }
2215         /*
2216          * The u32 here is important and intended. We are using
2217          * 32bit wrapping time to fit the adapter field
2218          */
2219 
2220         /* Sniff events */
2221         if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
2222          || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
2223             aac_handle_aif(dev, fib);
2224         }
2225 
2226         /*
2227          * get number of fibs to process
2228          */
2229         num = get_fib_count(dev);
2230         if (!num)
2231             goto free_fib;
2232 
2233         hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
2234                         GFP_KERNEL);
2235         if (!hw_fib_pool)
2236             goto free_fib;
2237 
2238         fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
2239         if (!fib_pool)
2240             goto free_hw_fib_pool;
2241 
2242         /*
2243          * Fill up fib pointer pools with actual fibs
2244          * and hw_fibs
2245          */
2246         num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2247         if (!num)
2248             goto free_mem;
2249 
2250         /*
2251          * wakeup the thread that is waiting for
2252          * the response from fw (ioctl)
2253          */
2254         wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
2255                                 fib, hw_fib, num);
2256 
2257 free_mem:
2258         /* Free up the remaining resources */
2259         hw_fib_p = hw_fib_pool;
2260         fib_p = fib_pool;
2261         while (hw_fib_p < &hw_fib_pool[num]) {
2262             kfree(*hw_fib_p);
2263             kfree(*fib_p);
2264             ++fib_p;
2265             ++hw_fib_p;
2266         }
2267         kfree(fib_pool);
2268 free_hw_fib_pool:
2269         kfree(hw_fib_pool);
2270 free_fib:
2271         kfree(fib);
2272         t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2273         spin_lock_irqsave(t_lock, flags);
2274     }
2275     /*
2276      *  There are no more AIF's
2277      */
2278     t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2279     spin_unlock_irqrestore(t_lock, flags);
2280 }
2281 
2282 static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
2283                             u32 datasize)
2284 {
2285     struct aac_srb *srbcmd;
2286     struct sgmap64 *sg64;
2287     dma_addr_t addr;
2288     char *dma_buf;
2289     struct fib *fibptr;
2290     int ret = -ENOMEM;
2291     u32 vbus, vid;
2292 
2293     fibptr = aac_fib_alloc(dev);
2294     if (!fibptr)
2295         goto out;
2296 
2297     dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
2298                      GFP_KERNEL);
2299     if (!dma_buf)
2300         goto fib_free_out;
2301 
2302     aac_fib_init(fibptr);
2303 
2304     vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
2305     vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
2306 
2307     srbcmd = (struct aac_srb *)fib_data(fibptr);
2308 
2309     srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2310     srbcmd->channel = cpu_to_le32(vbus);
2311     srbcmd->id = cpu_to_le32(vid);
2312     srbcmd->lun = 0;
2313     srbcmd->flags = cpu_to_le32(SRB_DataOut);
2314     srbcmd->timeout = cpu_to_le32(10);
2315     srbcmd->retry_limit = 0;
2316     srbcmd->cdb_size = cpu_to_le32(12);
2317     srbcmd->count = cpu_to_le32(datasize);
2318 
2319     memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2320     srbcmd->cdb[0] = BMIC_OUT;
2321     srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
2322     memcpy(dma_buf, (char *)wellness_str, datasize);
2323 
2324     sg64 = (struct sgmap64 *)&srbcmd->sg;
2325     sg64->count = cpu_to_le32(1);
2326     sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
2327     sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2328     sg64->sg[0].count = cpu_to_le32(datasize);
2329 
2330     ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
2331                 FsaNormal, 1, 1, NULL, NULL);
2332 
2333     dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
2334 
2335     /*
2336      * Do not set XferState to zero unless
2337      * receives a response from F/W
2338      */
2339     if (ret >= 0)
2340         aac_fib_complete(fibptr);
2341 
2342     /*
2343      * FIB should be freed only after
2344      * getting the response from the F/W
2345      */
2346     if (ret != -ERESTARTSYS)
2347         goto fib_free_out;
2348 
2349 out:
2350     return ret;
2351 fib_free_out:
2352     aac_fib_free(fibptr);
2353     goto out;
2354 }
2355 
2356 static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
2357 {
2358     struct tm cur_tm;
2359     char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2360     u32 datasize = sizeof(wellness_str);
2361     time64_t local_time;
2362     int ret = -ENODEV;
2363 
2364     if (!dev->sa_firmware)
2365         goto out;
2366 
2367     local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
2368     time64_to_tm(local_time, 0, &cur_tm);
2369     cur_tm.tm_mon += 1;
2370     cur_tm.tm_year += 1900;
2371     wellness_str[8] = bin2bcd(cur_tm.tm_hour);
2372     wellness_str[9] = bin2bcd(cur_tm.tm_min);
2373     wellness_str[10] = bin2bcd(cur_tm.tm_sec);
2374     wellness_str[12] = bin2bcd(cur_tm.tm_mon);
2375     wellness_str[13] = bin2bcd(cur_tm.tm_mday);
2376     wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
2377     wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
2378 
2379     ret = aac_send_wellness_command(dev, wellness_str, datasize);
2380 
2381 out:
2382     return ret;
2383 }
2384 
2385 static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
2386 {
2387     int ret = -ENOMEM;
2388     struct fib *fibptr;
2389     __le32 *info;
2390 
2391     fibptr = aac_fib_alloc(dev);
2392     if (!fibptr)
2393         goto out;
2394 
2395     aac_fib_init(fibptr);
2396     info = (__le32 *)fib_data(fibptr);
2397     *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
2398     ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
2399                     1, 1, NULL, NULL);
2400 
2401     /*
2402      * Do not set XferState to zero unless
2403      * receives a response from F/W
2404      */
2405     if (ret >= 0)
2406         aac_fib_complete(fibptr);
2407 
2408     /*
2409      * FIB should be freed only after
2410      * getting the response from the F/W
2411      */
2412     if (ret != -ERESTARTSYS)
2413         aac_fib_free(fibptr);
2414 
2415 out:
2416     return ret;
2417 }
2418 
2419 /**
2420  *  aac_command_thread  -   command processing thread
2421  *  @data: Adapter to monitor
2422  *
2423  *  Waits on the commandready event in it's queue. When the event gets set
2424  *  it will pull FIBs off it's queue. It will continue to pull FIBs off
2425  *  until the queue is empty. When the queue is empty it will wait for
2426  *  more FIBs.
2427  */
2428 
2429 int aac_command_thread(void *data)
2430 {
2431     struct aac_dev *dev = data;
2432     DECLARE_WAITQUEUE(wait, current);
2433     unsigned long next_jiffies = jiffies + HZ;
2434     unsigned long next_check_jiffies = next_jiffies;
2435     long difference = HZ;
2436 
2437     /*
2438      *  We can only have one thread per adapter for AIF's.
2439      */
2440     if (dev->aif_thread)
2441         return -EINVAL;
2442 
2443     /*
2444      *  Let the DPC know it has a place to send the AIF's to.
2445      */
2446     dev->aif_thread = 1;
2447     add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2448     set_current_state(TASK_INTERRUPTIBLE);
2449     dprintk ((KERN_INFO "aac_command_thread start\n"));
2450     while (1) {
2451 
2452         aac_process_events(dev);
2453 
2454         /*
2455          *  Background activity
2456          */
2457         if ((time_before(next_check_jiffies,next_jiffies))
2458          && ((difference = next_check_jiffies - jiffies) <= 0)) {
2459             next_check_jiffies = next_jiffies;
2460             if (aac_adapter_check_health(dev) == 0) {
2461                 difference = ((long)(unsigned)check_interval)
2462                        * HZ;
2463                 next_check_jiffies = jiffies + difference;
2464             } else if (!dev->queues)
2465                 break;
2466         }
2467         if (!time_before(next_check_jiffies,next_jiffies)
2468          && ((difference = next_jiffies - jiffies) <= 0)) {
2469             struct timespec64 now;
2470             int ret;
2471 
2472             /* Don't even try to talk to adapter if its sick */
2473             ret = aac_adapter_check_health(dev);
2474             if (ret || !dev->queues)
2475                 break;
2476             next_check_jiffies = jiffies
2477                        + ((long)(unsigned)check_interval)
2478                        * HZ;
2479             ktime_get_real_ts64(&now);
2480 
2481             /* Synchronize our watches */
2482             if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2483              && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2484                 difference = HZ + HZ / 2 -
2485                          now.tv_nsec / (NSEC_PER_SEC / HZ);
2486             else {
2487                 if (now.tv_nsec > NSEC_PER_SEC / 2)
2488                     ++now.tv_sec;
2489 
2490                 if (dev->sa_firmware)
2491                     ret =
2492                     aac_send_safw_hostttime(dev, &now);
2493                 else
2494                     ret = aac_send_hosttime(dev, &now);
2495 
2496                 difference = (long)(unsigned)update_interval*HZ;
2497             }
2498             next_jiffies = jiffies + difference;
2499             if (time_before(next_check_jiffies,next_jiffies))
2500                 difference = next_check_jiffies - jiffies;
2501         }
2502         if (difference <= 0)
2503             difference = 1;
2504         set_current_state(TASK_INTERRUPTIBLE);
2505 
2506         if (kthread_should_stop())
2507             break;
2508 
2509         /*
2510          * we probably want usleep_range() here instead of the
2511          * jiffies computation
2512          */
2513         schedule_timeout(difference);
2514 
2515         if (kthread_should_stop())
2516             break;
2517     }
2518     if (dev->queues)
2519         remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
2520     dev->aif_thread = 0;
2521     return 0;
2522 }
2523 
2524 int aac_acquire_irq(struct aac_dev *dev)
2525 {
2526     int i;
2527     int j;
2528     int ret = 0;
2529 
2530     if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
2531         for (i = 0; i < dev->max_msix; i++) {
2532             dev->aac_msix[i].vector_no = i;
2533             dev->aac_msix[i].dev = dev;
2534             if (request_irq(pci_irq_vector(dev->pdev, i),
2535                     dev->a_ops.adapter_intr,
2536                     0, "aacraid", &(dev->aac_msix[i]))) {
2537                 printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
2538                         dev->name, dev->id, i);
2539                 for (j = 0 ; j < i ; j++)
2540                     free_irq(pci_irq_vector(dev->pdev, j),
2541                          &(dev->aac_msix[j]));
2542                 pci_disable_msix(dev->pdev);
2543                 ret = -1;
2544             }
2545         }
2546     } else {
2547         dev->aac_msix[0].vector_no = 0;
2548         dev->aac_msix[0].dev = dev;
2549 
2550         if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
2551             IRQF_SHARED, "aacraid",
2552             &(dev->aac_msix[0])) < 0) {
2553             if (dev->msi)
2554                 pci_disable_msi(dev->pdev);
2555             printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
2556                     dev->name, dev->id);
2557             ret = -1;
2558         }
2559     }
2560     return ret;
2561 }
2562 
2563 void aac_free_irq(struct aac_dev *dev)
2564 {
2565     int i;
2566 
2567     if (aac_is_src(dev)) {
2568         if (dev->max_msix > 1) {
2569             for (i = 0; i < dev->max_msix; i++)
2570                 free_irq(pci_irq_vector(dev->pdev, i),
2571                      &(dev->aac_msix[i]));
2572         } else {
2573             free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
2574         }
2575     } else {
2576         free_irq(dev->pdev->irq, dev);
2577     }
2578     if (dev->msi)
2579         pci_disable_msi(dev->pdev);
2580     else if (dev->max_msix > 1)
2581         pci_disable_msix(dev->pdev);
2582 }