Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  *  linux/drivers/scsi/esas2r/esas2r_int.c
0003  *      esas2r interrupt handling
0004  *
0005  *  Copyright (c) 2001-2013 ATTO Technology, Inc.
0006  *  (mailto:linuxdrivers@attotech.com)
0007  */
0008 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0009 /*
0010  *  This program is free software; you can redistribute it and/or modify
0011  *  it under the terms of the GNU General Public License as published by
0012  *  the Free Software Foundation; version 2 of the License.
0013  *
0014  *  This program is distributed in the hope that it will be useful,
0015  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
0016  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0017  *  GNU General Public License for more details.
0018  *
0019  *  NO WARRANTY
0020  *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
0021  *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
0022  *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
0023  *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
0024  *  solely responsible for determining the appropriateness of using and
0025  *  distributing the Program and assumes all risks associated with its
0026  *  exercise of rights under this Agreement, including but not limited to
0027  *  the risks and costs of program errors, damage to or loss of data,
0028  *  programs or equipment, and unavailability or interruption of operations.
0029  *
0030  *  DISCLAIMER OF LIABILITY
0031  *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
0032  *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
0033  *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
0034  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
0035  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
0036  *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
0037  *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
0038  *
0039  *  You should have received a copy of the GNU General Public License
0040  *  along with this program; if not, write to the Free Software
0041  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
0042  */
0043 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
0044 
0045 #include "esas2r.h"
0046 
0047 /* Local function prototypes */
0048 static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
0049 static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
0050 static void esas2r_process_bus_reset(struct esas2r_adapter *a);
0051 
0052 /*
0053  * Poll the adapter for interrupts and service them.
0054  * This function handles both legacy interrupts and MSI.
0055  */
0056 void esas2r_polled_interrupt(struct esas2r_adapter *a)
0057 {
0058     u32 intstat;
0059     u32 doorbell;
0060 
0061     esas2r_disable_chip_interrupts(a);
0062 
0063     intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
0064 
0065     if (intstat & MU_INTSTAT_POST_OUT) {
0066         /* clear the interrupt */
0067 
0068         esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
0069                         MU_OLIS_INT);
0070         esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
0071 
0072         esas2r_get_outbound_responses(a);
0073     }
0074 
0075     if (intstat & MU_INTSTAT_DRBL) {
0076         doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
0077         if (doorbell != 0)
0078             esas2r_doorbell_interrupt(a, doorbell);
0079     }
0080 
0081     esas2r_enable_chip_interrupts(a);
0082 
0083     if (atomic_read(&a->disable_cnt) == 0)
0084         esas2r_do_deferred_processes(a);
0085 }
0086 
0087 /*
0088  * Legacy and MSI interrupt handlers.  Note that the legacy interrupt handler
0089  * schedules a TASKLET to process events, whereas the MSI handler just
0090  * processes interrupt events directly.
0091  */
0092 irqreturn_t esas2r_interrupt(int irq, void *dev_id)
0093 {
0094     struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
0095 
0096     if (!esas2r_adapter_interrupt_pending(a))
0097         return IRQ_NONE;
0098 
0099     set_bit(AF2_INT_PENDING, &a->flags2);
0100     esas2r_schedule_tasklet(a);
0101 
0102     return IRQ_HANDLED;
0103 }
0104 
0105 void esas2r_adapter_interrupt(struct esas2r_adapter *a)
0106 {
0107     u32 doorbell;
0108 
0109     if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
0110         /* clear the interrupt */
0111         esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
0112                         MU_OLIS_INT);
0113         esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
0114         esas2r_get_outbound_responses(a);
0115     }
0116 
0117     if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
0118         doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
0119         if (doorbell != 0)
0120             esas2r_doorbell_interrupt(a, doorbell);
0121     }
0122 
0123     a->int_mask = ESAS2R_INT_STS_MASK;
0124 
0125     esas2r_enable_chip_interrupts(a);
0126 
0127     if (likely(atomic_read(&a->disable_cnt) == 0))
0128         esas2r_do_deferred_processes(a);
0129 }
0130 
0131 irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
0132 {
0133     struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
0134     u32 intstat;
0135     u32 doorbell;
0136 
0137     intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
0138 
0139     if (likely(intstat & MU_INTSTAT_POST_OUT)) {
0140         /* clear the interrupt */
0141 
0142         esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
0143                         MU_OLIS_INT);
0144         esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
0145 
0146         esas2r_get_outbound_responses(a);
0147     }
0148 
0149     if (unlikely(intstat & MU_INTSTAT_DRBL)) {
0150         doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
0151         if (doorbell != 0)
0152             esas2r_doorbell_interrupt(a, doorbell);
0153     }
0154 
0155     /*
0156      * Work around a chip bug and force a new MSI to be sent if one is
0157      * still pending.
0158      */
0159     esas2r_disable_chip_interrupts(a);
0160     esas2r_enable_chip_interrupts(a);
0161 
0162     if (likely(atomic_read(&a->disable_cnt) == 0))
0163         esas2r_do_deferred_processes(a);
0164 
0165     esas2r_do_tasklet_tasks(a);
0166 
0167     return 1;
0168 }
0169 
0170 
0171 
0172 static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
0173                        struct esas2r_request *rq,
0174                        struct atto_vda_ob_rsp *rsp)
0175 {
0176 
0177     /*
0178      * For I/O requests, only copy the response if an error
0179      * occurred and setup a callback to do error processing.
0180      */
0181     if (unlikely(rq->req_stat != RS_SUCCESS)) {
0182         memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
0183 
0184         if (rq->req_stat == RS_ABORTED) {
0185             if (rq->timeout > RQ_MAX_TIMEOUT)
0186                 rq->req_stat = RS_TIMEOUT;
0187         } else if (rq->req_stat == RS_SCSI_ERROR) {
0188             u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
0189 
0190             esas2r_trace("scsistatus: %x", scsistatus);
0191 
0192             /* Any of these are a good result. */
0193             if (scsistatus == SAM_STAT_GOOD || scsistatus ==
0194                 SAM_STAT_CONDITION_MET || scsistatus ==
0195                 SAM_STAT_INTERMEDIATE || scsistatus ==
0196                 SAM_STAT_INTERMEDIATE_CONDITION_MET) {
0197                 rq->req_stat = RS_SUCCESS;
0198                 rq->func_rsp.scsi_rsp.scsi_stat =
0199                     SAM_STAT_GOOD;
0200             }
0201         }
0202     }
0203 }
0204 
0205 static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
0206 {
0207     struct atto_vda_ob_rsp *rsp;
0208     u32 rspput_ptr;
0209     u32 rspget_ptr;
0210     struct esas2r_request *rq;
0211     u32 handle;
0212     unsigned long flags;
0213 
0214     LIST_HEAD(comp_list);
0215 
0216     esas2r_trace_enter();
0217 
0218     spin_lock_irqsave(&a->queue_lock, flags);
0219 
0220     /* Get the outbound limit and pointers */
0221     rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
0222     rspget_ptr = a->last_read;
0223 
0224     esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
0225 
0226     /* If we don't have anything to process, get out */
0227     if (unlikely(rspget_ptr == rspput_ptr)) {
0228         spin_unlock_irqrestore(&a->queue_lock, flags);
0229         esas2r_trace_exit();
0230         return;
0231     }
0232 
0233     /* Make sure the firmware is healthy */
0234     if (unlikely(rspput_ptr >= a->list_size)) {
0235         spin_unlock_irqrestore(&a->queue_lock, flags);
0236         esas2r_bugon();
0237         esas2r_local_reset_adapter(a);
0238         esas2r_trace_exit();
0239         return;
0240     }
0241 
0242     do {
0243         rspget_ptr++;
0244 
0245         if (rspget_ptr >= a->list_size)
0246             rspget_ptr = 0;
0247 
0248         rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
0249               + rspget_ptr;
0250 
0251         handle = rsp->handle;
0252 
0253         /* Verify the handle range */
0254         if (unlikely(LOWORD(handle) == 0
0255                  || LOWORD(handle) > num_requests +
0256                  num_ae_requests + 1)) {
0257             esas2r_bugon();
0258             continue;
0259         }
0260 
0261         /* Get the request for this handle */
0262         rq = a->req_table[LOWORD(handle)];
0263 
0264         if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
0265             esas2r_bugon();
0266             continue;
0267         }
0268 
0269         list_del(&rq->req_list);
0270 
0271         /* Get the completion status */
0272         rq->req_stat = rsp->req_stat;
0273 
0274         esas2r_trace("handle: %x", handle);
0275         esas2r_trace("rq: %p", rq);
0276         esas2r_trace("req_status: %x", rq->req_stat);
0277 
0278         if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
0279             esas2r_handle_outbound_rsp_err(a, rq, rsp);
0280         } else {
0281             /*
0282              * Copy the outbound completion struct for non-I/O
0283              * requests.
0284              */
0285             memcpy(&rq->func_rsp, &rsp->func_rsp,
0286                    sizeof(rsp->func_rsp));
0287         }
0288 
0289         /* Queue the request for completion. */
0290         list_add_tail(&rq->comp_list, &comp_list);
0291 
0292     } while (rspget_ptr != rspput_ptr);
0293 
0294     a->last_read = rspget_ptr;
0295     spin_unlock_irqrestore(&a->queue_lock, flags);
0296 
0297     esas2r_comp_list_drain(a, &comp_list);
0298     esas2r_trace_exit();
0299 }
0300 
0301 /*
0302  * Perform all deferred processes for the adapter.  Deferred
0303  * processes can only be done while the current interrupt
0304  * disable_cnt for the adapter is zero.
0305  */
0306 void esas2r_do_deferred_processes(struct esas2r_adapter *a)
0307 {
0308     int startreqs = 2;
0309     struct esas2r_request *rq;
0310     unsigned long flags;
0311 
0312     /*
0313      * startreqs is used to control starting requests
0314      * that are on the deferred queue
0315      *  = 0 - do not start any requests
0316      *  = 1 - can start discovery requests
0317      *  = 2 - can start any request
0318      */
0319 
0320     if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
0321         test_bit(AF_FLASHING, &a->flags))
0322         startreqs = 0;
0323     else if (test_bit(AF_DISC_PENDING, &a->flags))
0324         startreqs = 1;
0325 
0326     atomic_inc(&a->disable_cnt);
0327 
0328     /* Clear off the completed list to be processed later. */
0329 
0330     if (esas2r_is_tasklet_pending(a)) {
0331         esas2r_schedule_tasklet(a);
0332 
0333         startreqs = 0;
0334     }
0335 
0336     /*
0337      * If we can start requests then traverse the defer queue
0338      * looking for requests to start or complete
0339      */
0340     if (startreqs && !list_empty(&a->defer_list)) {
0341         LIST_HEAD(comp_list);
0342         struct list_head *element, *next;
0343 
0344         spin_lock_irqsave(&a->queue_lock, flags);
0345 
0346         list_for_each_safe(element, next, &a->defer_list) {
0347             rq = list_entry(element, struct esas2r_request,
0348                     req_list);
0349 
0350             if (rq->req_stat != RS_PENDING) {
0351                 list_del(element);
0352                 list_add_tail(&rq->comp_list, &comp_list);
0353             }
0354             /*
0355              * Process discovery and OS requests separately.  We
0356              * can't hold up discovery requests when discovery is
0357              * pending.  In general, there may be different sets of
0358              * conditions for starting different types of requests.
0359              */
0360             else if (rq->req_type == RT_DISC_REQ) {
0361                 list_del(element);
0362                 esas2r_disc_local_start_request(a, rq);
0363             } else if (startreqs == 2) {
0364                 list_del(element);
0365                 esas2r_local_start_request(a, rq);
0366 
0367                 /*
0368                  * Flashing could have been set by last local
0369                  * start
0370                  */
0371                 if (test_bit(AF_FLASHING, &a->flags))
0372                     break;
0373             }
0374         }
0375 
0376         spin_unlock_irqrestore(&a->queue_lock, flags);
0377         esas2r_comp_list_drain(a, &comp_list);
0378     }
0379 
0380     atomic_dec(&a->disable_cnt);
0381 }
0382 
0383 /*
0384  * Process an adapter reset (or one that is about to happen)
0385  * by making sure all outstanding requests are completed that
0386  * haven't been already.
0387  */
0388 void esas2r_process_adapter_reset(struct esas2r_adapter *a)
0389 {
0390     struct esas2r_request *rq = &a->general_req;
0391     unsigned long flags;
0392     struct esas2r_disc_context *dc;
0393 
0394     LIST_HEAD(comp_list);
0395     struct list_head *element;
0396 
0397     esas2r_trace_enter();
0398 
0399     spin_lock_irqsave(&a->queue_lock, flags);
0400 
0401     /* abort the active discovery, if any.   */
0402 
0403     if (rq->interrupt_cx) {
0404         dc = (struct esas2r_disc_context *)rq->interrupt_cx;
0405 
0406         dc->disc_evt = 0;
0407 
0408         clear_bit(AF_DISC_IN_PROG, &a->flags);
0409     }
0410 
0411     /*
0412      * just clear the interrupt callback for now.  it will be dequeued if
0413      * and when we find it on the active queue and we don't want the
0414      * callback called.  also set the dummy completion callback in case we
0415      * were doing an I/O request.
0416      */
0417 
0418     rq->interrupt_cx = NULL;
0419     rq->interrupt_cb = NULL;
0420 
0421     rq->comp_cb = esas2r_dummy_complete;
0422 
0423     /* Reset the read and write pointers */
0424 
0425     *a->outbound_copy =
0426         a->last_write =
0427             a->last_read = a->list_size - 1;
0428 
0429     set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
0430 
0431     /* Kill all the requests on the active list */
0432     list_for_each(element, &a->defer_list) {
0433         rq = list_entry(element, struct esas2r_request, req_list);
0434 
0435         if (rq->req_stat == RS_STARTED)
0436             if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
0437                 list_add_tail(&rq->comp_list, &comp_list);
0438     }
0439 
0440     spin_unlock_irqrestore(&a->queue_lock, flags);
0441     esas2r_comp_list_drain(a, &comp_list);
0442     esas2r_process_bus_reset(a);
0443     esas2r_trace_exit();
0444 }
0445 
0446 static void esas2r_process_bus_reset(struct esas2r_adapter *a)
0447 {
0448     struct esas2r_request *rq;
0449     struct list_head *element;
0450     unsigned long flags;
0451 
0452     LIST_HEAD(comp_list);
0453 
0454     esas2r_trace_enter();
0455 
0456     esas2r_hdebug("reset detected");
0457 
0458     spin_lock_irqsave(&a->queue_lock, flags);
0459 
0460     /* kill all the requests on the deferred queue */
0461     list_for_each(element, &a->defer_list) {
0462         rq = list_entry(element, struct esas2r_request, req_list);
0463         if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
0464             list_add_tail(&rq->comp_list, &comp_list);
0465     }
0466 
0467     spin_unlock_irqrestore(&a->queue_lock, flags);
0468 
0469     esas2r_comp_list_drain(a, &comp_list);
0470 
0471     if (atomic_read(&a->disable_cnt) == 0)
0472         esas2r_do_deferred_processes(a);
0473 
0474     clear_bit(AF_OS_RESET, &a->flags);
0475 
0476     esas2r_trace_exit();
0477 }
0478 
0479 static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
0480 {
0481 
0482     clear_bit(AF_CHPRST_NEEDED, &a->flags);
0483     clear_bit(AF_BUSRST_NEEDED, &a->flags);
0484     clear_bit(AF_BUSRST_DETECTED, &a->flags);
0485     clear_bit(AF_BUSRST_PENDING, &a->flags);
0486     /*
0487      * Make sure we don't get attempt more than 3 resets
0488      * when the uptime between resets does not exceed one
0489      * minute.  This will stop any situation where there is
0490      * really something wrong with the hardware.  The way
0491      * this works is that we start with uptime ticks at 0.
0492      * Each time we do a reset, we add 20 seconds worth to
0493      * the count.  Each time a timer tick occurs, as long
0494      * as a chip reset is not pending, we decrement the
0495      * tick count.  If the uptime ticks ever gets to 60
0496      * seconds worth, we disable the adapter from that
0497      * point forward.  Three strikes, you're out.
0498      */
0499     if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
0500                           ESAS2R_CHP_UPTIME_MAX)) {
0501         esas2r_hdebug("*** adapter disabled ***");
0502 
0503         /*
0504          * Ok, some kind of hard failure.  Make sure we
0505          * exit this loop with chip interrupts
0506          * permanently disabled so we don't lock up the
0507          * entire system.  Also flag degraded mode to
0508          * prevent the heartbeat from trying to recover.
0509          */
0510 
0511         set_bit(AF_DEGRADED_MODE, &a->flags);
0512         set_bit(AF_DISABLED, &a->flags);
0513         clear_bit(AF_CHPRST_PENDING, &a->flags);
0514         clear_bit(AF_DISC_PENDING, &a->flags);
0515 
0516         esas2r_disable_chip_interrupts(a);
0517         a->int_mask = 0;
0518         esas2r_process_adapter_reset(a);
0519 
0520         esas2r_log(ESAS2R_LOG_CRIT,
0521                "Adapter disabled because of hardware failure");
0522     } else {
0523         bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
0524 
0525         if (!alrdyrst)
0526             /*
0527              * Only disable interrupts if this is
0528              * the first reset attempt.
0529              */
0530             esas2r_disable_chip_interrupts(a);
0531 
0532         if ((test_bit(AF_POWER_MGT, &a->flags)) &&
0533             !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
0534             /*
0535              * Don't reset the chip on the first
0536              * deferred power up attempt.
0537              */
0538         } else {
0539             esas2r_hdebug("*** resetting chip ***");
0540             esas2r_reset_chip(a);
0541         }
0542 
0543         /* Kick off the reinitialization */
0544         a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
0545         a->chip_init_time = jiffies_to_msecs(jiffies);
0546         if (!test_bit(AF_POWER_MGT, &a->flags)) {
0547             esas2r_process_adapter_reset(a);
0548 
0549             if (!alrdyrst) {
0550                 /* Remove devices now that I/O is cleaned up. */
0551                 a->prev_dev_cnt =
0552                     esas2r_targ_db_get_tgt_cnt(a);
0553                 esas2r_targ_db_remove_all(a, false);
0554             }
0555         }
0556 
0557         a->int_mask = 0;
0558     }
0559 }
0560 
0561 static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
0562 {
0563     while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
0564         /*
0565          * Balance the enable in esas2r_initadapter_hw.
0566          * Esas2r_power_down already took care of it for power
0567          * management.
0568          */
0569         if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
0570             !test_bit(AF_POWER_MGT, &a->flags))
0571             esas2r_disable_chip_interrupts(a);
0572 
0573         /* Reinitialize the chip. */
0574         esas2r_check_adapter(a);
0575         esas2r_init_adapter_hw(a, 0);
0576 
0577         if (test_bit(AF_CHPRST_NEEDED, &a->flags))
0578             break;
0579 
0580         if (test_bit(AF_POWER_MGT, &a->flags)) {
0581             /* Recovery from power management. */
0582             if (test_bit(AF_FIRST_INIT, &a->flags)) {
0583                 /* Chip reset during normal power up */
0584                 esas2r_log(ESAS2R_LOG_CRIT,
0585                        "The firmware was reset during a normal power-up sequence");
0586             } else {
0587                 /* Deferred power up complete. */
0588                 clear_bit(AF_POWER_MGT, &a->flags);
0589                 esas2r_send_reset_ae(a, true);
0590             }
0591         } else {
0592             /* Recovery from online chip reset. */
0593             if (test_bit(AF_FIRST_INIT, &a->flags)) {
0594                 /* Chip reset during driver load */
0595             } else {
0596                 /* Chip reset after driver load */
0597                 esas2r_send_reset_ae(a, false);
0598             }
0599 
0600             esas2r_log(ESAS2R_LOG_CRIT,
0601                    "Recovering from a chip reset while the chip was online");
0602         }
0603 
0604         clear_bit(AF_CHPRST_STARTED, &a->flags);
0605         esas2r_enable_chip_interrupts(a);
0606 
0607         /*
0608          * Clear this flag last!  this indicates that the chip has been
0609          * reset already during initialization.
0610          */
0611         clear_bit(AF_CHPRST_DETECTED, &a->flags);
0612     }
0613 }
0614 
0615 
0616 /* Perform deferred tasks when chip interrupts are disabled */
0617 void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
0618 {
0619 
0620     if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
0621         test_bit(AF_CHPRST_DETECTED, &a->flags)) {
0622         if (test_bit(AF_CHPRST_NEEDED, &a->flags))
0623             esas2r_chip_rst_needed_during_tasklet(a);
0624 
0625         esas2r_handle_chip_rst_during_tasklet(a);
0626     }
0627 
0628     if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
0629         esas2r_hdebug("hard resetting bus");
0630 
0631         clear_bit(AF_BUSRST_NEEDED, &a->flags);
0632 
0633         if (test_bit(AF_FLASHING, &a->flags))
0634             set_bit(AF_BUSRST_DETECTED, &a->flags);
0635         else
0636             esas2r_write_register_dword(a, MU_DOORBELL_IN,
0637                             DRBL_RESET_BUS);
0638     }
0639 
0640     if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
0641         esas2r_process_bus_reset(a);
0642 
0643         esas2r_log_dev(ESAS2R_LOG_WARN,
0644                    &(a->host->shost_gendev),
0645                    "scsi_report_bus_reset() called");
0646 
0647         scsi_report_bus_reset(a->host, 0);
0648 
0649         clear_bit(AF_BUSRST_DETECTED, &a->flags);
0650         clear_bit(AF_BUSRST_PENDING, &a->flags);
0651 
0652         esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
0653     }
0654 
0655     if (test_bit(AF_PORT_CHANGE, &a->flags)) {
0656         clear_bit(AF_PORT_CHANGE, &a->flags);
0657 
0658         esas2r_targ_db_report_changes(a);
0659     }
0660 
0661     if (atomic_read(&a->disable_cnt) == 0)
0662         esas2r_do_deferred_processes(a);
0663 }
0664 
0665 static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
0666 {
0667     if (!(doorbell & DRBL_FORCE_INT)) {
0668         esas2r_trace_enter();
0669         esas2r_trace("doorbell: %x", doorbell);
0670     }
0671 
0672     /* First clear the doorbell bits */
0673     esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
0674 
0675     if (doorbell & DRBL_RESET_BUS)
0676         set_bit(AF_BUSRST_DETECTED, &a->flags);
0677 
0678     if (doorbell & DRBL_FORCE_INT)
0679         clear_bit(AF_HEARTBEAT, &a->flags);
0680 
0681     if (doorbell & DRBL_PANIC_REASON_MASK) {
0682         esas2r_hdebug("*** Firmware Panic ***");
0683         esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
0684     }
0685 
0686     if (doorbell & DRBL_FW_RESET) {
0687         set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
0688         esas2r_local_reset_adapter(a);
0689     }
0690 
0691     if (!(doorbell & DRBL_FORCE_INT)) {
0692         esas2r_trace_exit();
0693     }
0694 }
0695 
0696 void esas2r_force_interrupt(struct esas2r_adapter *a)
0697 {
0698     esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
0699                     DRBL_DRV_VER);
0700 }
0701 
0702 
0703 static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
0704                  u16 target, u32 length)
0705 {
0706     struct esas2r_target *t = a->targetdb + target;
0707     u32 cplen = length;
0708     unsigned long flags;
0709 
0710     if (cplen > sizeof(t->lu_event))
0711         cplen = sizeof(t->lu_event);
0712 
0713     esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
0714     esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
0715 
0716     spin_lock_irqsave(&a->mem_lock, flags);
0717 
0718     t->new_target_state = TS_INVALID;
0719 
0720     if (ae->lu.dwevent  & VDAAE_LU_LOST) {
0721         t->new_target_state = TS_NOT_PRESENT;
0722     } else {
0723         switch (ae->lu.bystate) {
0724         case VDAAE_LU_NOT_PRESENT:
0725         case VDAAE_LU_OFFLINE:
0726         case VDAAE_LU_DELETED:
0727         case VDAAE_LU_FACTORY_DISABLED:
0728             t->new_target_state = TS_NOT_PRESENT;
0729             break;
0730 
0731         case VDAAE_LU_ONLINE:
0732         case VDAAE_LU_DEGRADED:
0733             t->new_target_state = TS_PRESENT;
0734             break;
0735         }
0736     }
0737 
0738     if (t->new_target_state != TS_INVALID) {
0739         memcpy(&t->lu_event, &ae->lu, cplen);
0740 
0741         esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
0742     }
0743 
0744     spin_unlock_irqrestore(&a->mem_lock, flags);
0745 }
0746 
0747 
0748 
0749 void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
0750 {
0751     union atto_vda_ae *ae =
0752         (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
0753     u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
0754     union atto_vda_ae *last =
0755         (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
0756                       + length);
0757 
0758     esas2r_trace_enter();
0759     esas2r_trace("length: %d", length);
0760 
0761     if (length > sizeof(struct atto_vda_ae_data)
0762         || (length & 3) != 0
0763         || length == 0) {
0764         esas2r_log(ESAS2R_LOG_WARN,
0765                "The AE request response length (%p) is too long: %d",
0766                rq, length);
0767 
0768         esas2r_hdebug("aereq->length (0x%x) too long", length);
0769         esas2r_bugon();
0770 
0771         last = ae;
0772     }
0773 
0774     while (ae < last) {
0775         u16 target;
0776 
0777         esas2r_trace("ae: %p", ae);
0778         esas2r_trace("ae->hdr: %p", &(ae->hdr));
0779 
0780         length = ae->hdr.bylength;
0781 
0782         if (length > (u32)((u8 *)last - (u8 *)ae)
0783             || (length & 3) != 0
0784             || length == 0) {
0785             esas2r_log(ESAS2R_LOG_CRIT,
0786                    "the async event length is invalid (%p): %d",
0787                    ae, length);
0788 
0789             esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
0790             esas2r_bugon();
0791 
0792             break;
0793         }
0794 
0795         esas2r_nuxi_ae_data(ae);
0796 
0797         esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
0798                       sizeof(union atto_vda_ae));
0799 
0800         switch (ae->hdr.bytype) {
0801         case VDAAE_HDR_TYPE_RAID:
0802 
0803             if (ae->raid.dwflags & (VDAAE_GROUP_STATE
0804                         | VDAAE_RBLD_STATE
0805                         | VDAAE_MEMBER_CHG
0806                         | VDAAE_PART_CHG)) {
0807                 esas2r_log(ESAS2R_LOG_INFO,
0808                        "RAID event received - name:%s rebuild_state:%d group_state:%d",
0809                        ae->raid.acname,
0810                        ae->raid.byrebuild_state,
0811                        ae->raid.bygroup_state);
0812             }
0813 
0814             break;
0815 
0816         case VDAAE_HDR_TYPE_LU:
0817             esas2r_log(ESAS2R_LOG_INFO,
0818                    "LUN event received: event:%d target_id:%d LUN:%d state:%d",
0819                    ae->lu.dwevent,
0820                    ae->lu.id.tgtlun.wtarget_id,
0821                    ae->lu.id.tgtlun.bylun,
0822                    ae->lu.bystate);
0823 
0824             target = ae->lu.id.tgtlun.wtarget_id;
0825 
0826             if (target < ESAS2R_MAX_TARGETS)
0827                 esas2r_lun_event(a, ae, target, length);
0828 
0829             break;
0830 
0831         case VDAAE_HDR_TYPE_DISK:
0832             esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
0833             break;
0834 
0835         default:
0836 
0837             /* Silently ignore the rest and let the apps deal with
0838              * them.
0839              */
0840 
0841             break;
0842         }
0843 
0844         ae = (union atto_vda_ae *)((u8 *)ae + length);
0845     }
0846 
0847     /* Now requeue it. */
0848     esas2r_start_ae_request(a, rq);
0849     esas2r_trace_exit();
0850 }
0851 
0852 /* Send an asynchronous event for a chip reset or power management. */
0853 void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
0854 {
0855     struct atto_vda_ae_hdr ae;
0856 
0857     if (pwr_mgt)
0858         ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
0859     else
0860         ae.bytype = VDAAE_HDR_TYPE_RESET;
0861 
0862     ae.byversion = VDAAE_HDR_VER_0;
0863     ae.byflags = 0;
0864     ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
0865 
0866     if (pwr_mgt) {
0867         esas2r_hdebug("*** sending power management AE ***");
0868     } else {
0869         esas2r_hdebug("*** sending reset AE ***");
0870     }
0871 
0872     esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
0873                   sizeof(union atto_vda_ae));
0874 }
0875 
0876 void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
0877 {}
0878 
0879 static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
0880                        struct esas2r_request *rq)
0881 {
0882     u8 snslen, snslen2;
0883 
0884     snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
0885 
0886     if (snslen > rq->sense_len)
0887         snslen = rq->sense_len;
0888 
0889     if (snslen) {
0890         if (rq->sense_buf)
0891             memcpy(rq->sense_buf, rq->data_buf, snslen);
0892         else
0893             rq->sense_buf = (u8 *)rq->data_buf;
0894 
0895         /* See about possible sense data */
0896         if (snslen2 > 0x0c) {
0897             u8 *s = (u8 *)rq->data_buf;
0898 
0899             esas2r_trace_enter();
0900 
0901             /* Report LUNS data has changed */
0902             if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
0903                 esas2r_trace("rq->target_id: %d",
0904                          rq->target_id);
0905                 esas2r_target_state_changed(a, rq->target_id,
0906                                 TS_LUN_CHANGE);
0907             }
0908 
0909             esas2r_trace("add_sense_key=%x", s[0x0c]);
0910             esas2r_trace("add_sense_qual=%x", s[0x0d]);
0911             esas2r_trace_exit();
0912         }
0913     }
0914 
0915     rq->sense_len = snslen;
0916 }
0917 
0918 
0919 void esas2r_complete_request(struct esas2r_adapter *a,
0920                  struct esas2r_request *rq)
0921 {
0922     if (rq->vrq->scsi.function == VDA_FUNC_FLASH
0923         && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
0924         clear_bit(AF_FLASHING, &a->flags);
0925 
0926     /* See if we setup a callback to do special processing */
0927 
0928     if (rq->interrupt_cb) {
0929         (*rq->interrupt_cb)(a, rq);
0930 
0931         if (rq->req_stat == RS_PENDING) {
0932             esas2r_start_request(a, rq);
0933             return;
0934         }
0935     }
0936 
0937     if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
0938         && unlikely(rq->req_stat != RS_SUCCESS)) {
0939         esas2r_check_req_rsp_sense(a, rq);
0940         esas2r_log_request_failure(a, rq);
0941     }
0942 
0943     (*rq->comp_cb)(a, rq);
0944 }