Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  Adaptec AAC series RAID controller driver
0004  *  (c) Copyright 2001 Red Hat Inc.
0005  *
0006  * based on the old aacraid driver that is..
0007  * Adaptec aacraid device driver for Linux.
0008  *
0009  * Copyright (c) 2000-2010 Adaptec, Inc.
0010  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
0011  *       2016-2017 Microsemi Corp. (aacraid@microsemi.com)
0012  *
0013  * Module Name:
0014  *  dpcsup.c
0015  *
0016  * Abstract: All DPC processing routines for the cyclone board occur here.
0017  */
0018 
0019 #include <linux/kernel.h>
0020 #include <linux/init.h>
0021 #include <linux/types.h>
0022 #include <linux/spinlock.h>
0023 #include <linux/slab.h>
0024 #include <linux/completion.h>
0025 #include <linux/blkdev.h>
0026 
0027 #include "aacraid.h"
0028 
0029 /**
0030  *  aac_response_normal -   Handle command replies
0031  *  @q: Queue to read from
0032  *
0033  *  This DPC routine will be run when the adapter interrupts us to let us
0034  *  know there is a response on our normal priority queue. We will pull off
0035  *  all QE there are and wake up all the waiters before exiting. We will
0036  *  take a spinlock out on the queue before operating on it.
0037  */
0038 
0039 unsigned int aac_response_normal(struct aac_queue * q)
0040 {
0041     struct aac_dev * dev = q->dev;
0042     struct aac_entry *entry;
0043     struct hw_fib * hwfib;
0044     struct fib * fib;
0045     int consumed = 0;
0046     unsigned long flags, mflags;
0047 
0048     spin_lock_irqsave(q->lock, flags);
0049     /*
0050      *  Keep pulling response QEs off the response queue and waking
0051      *  up the waiters until there are no more QEs. We then return
0052      *  back to the system. If no response was requested we just
0053      *  deallocate the Fib here and continue.
0054      */
0055     while(aac_consumer_get(dev, q, &entry))
0056     {
0057         int fast;
0058         u32 index = le32_to_cpu(entry->addr);
0059         fast = index & 0x01;
0060         fib = &dev->fibs[index >> 2];
0061         hwfib = fib->hw_fib_va;
0062         
0063         aac_consumer_free(dev, q, HostNormRespQueue);
0064         /*
0065          *  Remove this fib from the Outstanding I/O queue.
0066          *  But only if it has not already been timed out.
0067          *
0068          *  If the fib has been timed out already, then just 
0069          *  continue. The caller has already been notified that
0070          *  the fib timed out.
0071          */
0072         atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
0073 
0074         if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
0075             spin_unlock_irqrestore(q->lock, flags);
0076             aac_fib_complete(fib);
0077             aac_fib_free(fib);
0078             spin_lock_irqsave(q->lock, flags);
0079             continue;
0080         }
0081         spin_unlock_irqrestore(q->lock, flags);
0082 
0083         if (fast) {
0084             /*
0085              *  Doctor the fib
0086              */
0087             *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
0088             hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
0089             fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
0090         }
0091 
0092         FIB_COUNTER_INCREMENT(aac_config.FibRecved);
0093 
0094         if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
0095         {
0096             __le32 *pstatus = (__le32 *)hwfib->data;
0097             if (*pstatus & cpu_to_le32(0xffff0000))
0098                 *pstatus = cpu_to_le32(ST_OK);
0099         }
0100         if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 
0101         {
0102             if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) {
0103                 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
0104             } else {
0105                 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
0106             }
0107             /*
0108              *  NOTE:  we cannot touch the fib after this
0109              *      call, because it may have been deallocated.
0110              */
0111             fib->callback(fib->callback_data, fib);
0112         } else {
0113             unsigned long flagv;
0114             spin_lock_irqsave(&fib->event_lock, flagv);
0115             if (!fib->done) {
0116                 fib->done = 1;
0117                 complete(&fib->event_wait);
0118             }
0119             spin_unlock_irqrestore(&fib->event_lock, flagv);
0120 
0121             spin_lock_irqsave(&dev->manage_lock, mflags);
0122             dev->management_fib_count--;
0123             spin_unlock_irqrestore(&dev->manage_lock, mflags);
0124 
0125             FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
0126             if (fib->done == 2) {
0127                 spin_lock_irqsave(&fib->event_lock, flagv);
0128                 fib->done = 0;
0129                 spin_unlock_irqrestore(&fib->event_lock, flagv);
0130                 aac_fib_complete(fib);
0131                 aac_fib_free(fib);
0132             }
0133         }
0134         consumed++;
0135         spin_lock_irqsave(q->lock, flags);
0136     }
0137 
0138     if (consumed > aac_config.peak_fibs)
0139         aac_config.peak_fibs = consumed;
0140     if (consumed == 0) 
0141         aac_config.zero_fibs++;
0142 
0143     spin_unlock_irqrestore(q->lock, flags);
0144     return 0;
0145 }
0146 
0147 
0148 /**
0149  *  aac_command_normal  -   handle commands
0150  *  @q: queue to process
0151  *
0152  *  This DPC routine will be queued when the adapter interrupts us to 
0153  *  let us know there is a command on our normal priority queue. We will 
0154  *  pull off all QE there are and wake up all the waiters before exiting.
0155  *  We will take a spinlock out on the queue before operating on it.
0156  */
0157  
0158 unsigned int aac_command_normal(struct aac_queue *q)
0159 {
0160     struct aac_dev * dev = q->dev;
0161     struct aac_entry *entry;
0162     unsigned long flags;
0163 
0164     spin_lock_irqsave(q->lock, flags);
0165 
0166     /*
0167      *  Keep pulling response QEs off the response queue and waking
0168      *  up the waiters until there are no more QEs. We then return
0169      *  back to the system.
0170      */
0171     while(aac_consumer_get(dev, q, &entry))
0172     {
0173         struct fib fibctx;
0174         struct hw_fib * hw_fib;
0175         u32 index;
0176         struct fib *fib = &fibctx;
0177         
0178         index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
0179         hw_fib = &dev->aif_base_va[index];
0180         
0181         /*
0182          *  Allocate a FIB at all costs. For non queued stuff
0183          *  we can just use the stack so we are happy. We need
0184          *  a fib object in order to manage the linked lists
0185          */
0186         if (dev->aif_thread)
0187             if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
0188                 fib = &fibctx;
0189         
0190         memset(fib, 0, sizeof(struct fib));
0191         INIT_LIST_HEAD(&fib->fiblink);
0192         fib->type = FSAFS_NTC_FIB_CONTEXT;
0193         fib->size = sizeof(struct fib);
0194         fib->hw_fib_va = hw_fib;
0195         fib->data = hw_fib->data;
0196         fib->dev = dev;
0197         
0198                 
0199         if (dev->aif_thread && fib != &fibctx) {
0200                 list_add_tail(&fib->fiblink, &q->cmdq);
0201                 aac_consumer_free(dev, q, HostNormCmdQueue);
0202                 wake_up_interruptible(&q->cmdready);
0203         } else {
0204                 aac_consumer_free(dev, q, HostNormCmdQueue);
0205             spin_unlock_irqrestore(q->lock, flags);
0206             /*
0207              *  Set the status of this FIB
0208              */
0209             *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
0210             aac_fib_adapter_complete(fib, sizeof(u32));
0211             spin_lock_irqsave(q->lock, flags);
0212         }       
0213     }
0214     spin_unlock_irqrestore(q->lock, flags);
0215     return 0;
0216 }
0217 
0218 /*
0219  *
0220  * aac_aif_callback
0221  * @context: the context set in the fib - here it is scsi cmd
0222  * @fibptr: pointer to the fib
0223  *
0224  * Handles the AIFs - new method (SRC)
0225  *
0226  */
0227 
0228 static void aac_aif_callback(void *context, struct fib * fibptr)
0229 {
0230     struct fib *fibctx;
0231     struct aac_dev *dev;
0232     struct aac_aifcmd *cmd;
0233 
0234     fibctx = (struct fib *)context;
0235     BUG_ON(fibptr == NULL);
0236     dev = fibptr->dev;
0237 
0238     if ((fibptr->hw_fib_va->header.XferState &
0239         cpu_to_le32(NoMoreAifDataAvailable)) ||
0240         dev->sa_firmware) {
0241         aac_fib_complete(fibptr);
0242         aac_fib_free(fibptr);
0243         return;
0244     }
0245 
0246     aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
0247 
0248     aac_fib_init(fibctx);
0249     cmd = (struct aac_aifcmd *) fib_data(fibctx);
0250     cmd->command = cpu_to_le32(AifReqEvent);
0251 
0252     aac_fib_send(AifRequest,
0253         fibctx,
0254         sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
0255         FsaNormal,
0256         0, 1,
0257         (fib_callback)aac_aif_callback, fibctx);
0258 }
0259 
0260 
0261 /*
0262  *  aac_intr_normal -   Handle command replies
0263  *  @dev: Device
0264  *  @index: completion reference
0265  *
0266  *  This DPC routine will be run when the adapter interrupts us to let us
0267  *  know there is a response on our normal priority queue. We will pull off
0268  *  all QE there are and wake up all the waiters before exiting.
0269  */
0270 unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
0271     int isFastResponse, struct hw_fib *aif_fib)
0272 {
0273     unsigned long mflags;
0274     dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
0275     if (isAif == 1) {   /* AIF - common */
0276         struct hw_fib * hw_fib;
0277         struct fib * fib;
0278         struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
0279         unsigned long flags;
0280 
0281         /*
0282          *  Allocate a FIB. For non queued stuff we can just use
0283          * the stack so we are happy. We need a fib object in order to
0284          * manage the linked lists.
0285          */
0286         if ((!dev->aif_thread)
0287          || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
0288             return 1;
0289         if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
0290             kfree (fib);
0291             return 1;
0292         }
0293         if (dev->sa_firmware) {
0294             fib->hbacmd_size = index;   /* store event type */
0295         } else if (aif_fib != NULL) {
0296             memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
0297         } else {
0298             memcpy(hw_fib, (struct hw_fib *)
0299                 (((uintptr_t)(dev->regs.sa)) + index),
0300                 sizeof(struct hw_fib));
0301         }
0302         INIT_LIST_HEAD(&fib->fiblink);
0303         fib->type = FSAFS_NTC_FIB_CONTEXT;
0304         fib->size = sizeof(struct fib);
0305         fib->hw_fib_va = hw_fib;
0306         fib->data = hw_fib->data;
0307         fib->dev = dev;
0308     
0309         spin_lock_irqsave(q->lock, flags);
0310         list_add_tail(&fib->fiblink, &q->cmdq);
0311             wake_up_interruptible(&q->cmdready);
0312         spin_unlock_irqrestore(q->lock, flags);
0313         return 1;
0314     } else if (isAif == 2) {    /* AIF - new (SRC) */
0315         struct fib *fibctx;
0316         struct aac_aifcmd *cmd;
0317 
0318         fibctx = aac_fib_alloc(dev);
0319         if (!fibctx)
0320             return 1;
0321         aac_fib_init(fibctx);
0322 
0323         cmd = (struct aac_aifcmd *) fib_data(fibctx);
0324         cmd->command = cpu_to_le32(AifReqEvent);
0325 
0326         return aac_fib_send(AifRequest,
0327             fibctx,
0328             sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
0329             FsaNormal,
0330             0, 1,
0331             (fib_callback)aac_aif_callback, fibctx);
0332     } else {
0333         struct fib *fib = &dev->fibs[index];
0334         int start_callback = 0;
0335 
0336         /*
0337          *  Remove this fib from the Outstanding I/O queue.
0338          *  But only if it has not already been timed out.
0339          *
0340          *  If the fib has been timed out already, then just 
0341          *  continue. The caller has already been notified that
0342          *  the fib timed out.
0343          */
0344         atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
0345 
0346         if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
0347             aac_fib_complete(fib);
0348             aac_fib_free(fib);
0349             return 0;
0350         }
0351 
0352         FIB_COUNTER_INCREMENT(aac_config.FibRecved);
0353 
0354         if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
0355 
0356             if (isFastResponse)
0357                 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
0358 
0359             if (fib->callback) {
0360                 start_callback = 1;
0361             } else {
0362                 unsigned long flagv;
0363                 int completed = 0;
0364 
0365                 dprintk((KERN_INFO "event_wait up\n"));
0366                 spin_lock_irqsave(&fib->event_lock, flagv);
0367                 if (fib->done == 2) {
0368                     fib->done = 1;
0369                     completed = 1;
0370                 } else {
0371                     fib->done = 1;
0372                     complete(&fib->event_wait);
0373                 }
0374                 spin_unlock_irqrestore(&fib->event_lock, flagv);
0375 
0376                 spin_lock_irqsave(&dev->manage_lock, mflags);
0377                 dev->management_fib_count--;
0378                 spin_unlock_irqrestore(&dev->manage_lock,
0379                     mflags);
0380 
0381                 FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
0382                 if (completed)
0383                     aac_fib_complete(fib);
0384             }
0385         } else {
0386             struct hw_fib *hwfib = fib->hw_fib_va;
0387 
0388             if (isFastResponse) {
0389                 /* Doctor the fib */
0390                 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
0391                 hwfib->header.XferState |=
0392                     cpu_to_le32(AdapterProcessed);
0393                 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
0394             }
0395 
0396             if (hwfib->header.Command ==
0397                 cpu_to_le16(NuFileSystem)) {
0398                 __le32 *pstatus = (__le32 *)hwfib->data;
0399 
0400                 if (*pstatus & cpu_to_le32(0xffff0000))
0401                     *pstatus = cpu_to_le32(ST_OK);
0402             }
0403             if (hwfib->header.XferState &
0404                 cpu_to_le32(NoResponseExpected | Async)) {
0405                 if (hwfib->header.XferState & cpu_to_le32(
0406                     NoResponseExpected)) {
0407                     FIB_COUNTER_INCREMENT(
0408                         aac_config.NoResponseRecved);
0409                 } else {
0410                     FIB_COUNTER_INCREMENT(
0411                         aac_config.AsyncRecved);
0412                 }
0413                 start_callback = 1;
0414             } else {
0415                 unsigned long flagv;
0416                 int completed = 0;
0417 
0418                 dprintk((KERN_INFO "event_wait up\n"));
0419                 spin_lock_irqsave(&fib->event_lock, flagv);
0420                 if (fib->done == 2) {
0421                     fib->done = 1;
0422                     completed = 1;
0423                 } else {
0424                     fib->done = 1;
0425                     complete(&fib->event_wait);
0426                 }
0427                 spin_unlock_irqrestore(&fib->event_lock, flagv);
0428 
0429                 spin_lock_irqsave(&dev->manage_lock, mflags);
0430                 dev->management_fib_count--;
0431                 spin_unlock_irqrestore(&dev->manage_lock,
0432                     mflags);
0433 
0434                 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
0435                 if (completed)
0436                     aac_fib_complete(fib);
0437             }
0438         }
0439 
0440 
0441         if (start_callback) {
0442             /*
0443              * NOTE:  we cannot touch the fib after this
0444              *  call, because it may have been deallocated.
0445              */
0446             if (likely(fib->callback && fib->callback_data)) {
0447                 fib->callback(fib->callback_data, fib);
0448             } else {
0449                 aac_fib_complete(fib);
0450                 aac_fib_free(fib);
0451             }
0452 
0453         }
0454         return 0;
0455     }
0456 }