Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
0004  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
0005  */
0006 
0007 #include "efct_driver.h"
0008 #include "efct_hw.h"
0009 
0010 #define enable_tsend_auto_resp(efct)    1
0011 #define enable_treceive_auto_resp(efct) 0
0012 
0013 #define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
0014 
0015 #define scsi_io_printf(io, fmt, ...) \
0016     efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
0017         io->node->display_name, io->instance_index,\
0018         io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
0019 
0020 #define EFCT_LOG_ENABLE_SCSI_TRACE(efct)                \
0021         (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
0022 
0023 #define scsi_io_trace(io, fmt, ...) \
0024     do { \
0025         if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
0026             scsi_io_printf(io, fmt, ##__VA_ARGS__); \
0027     } while (0)
0028 
0029 struct efct_io *
0030 efct_scsi_io_alloc(struct efct_node *node)
0031 {
0032     struct efct *efct;
0033     struct efct_xport *xport;
0034     struct efct_io *io;
0035     unsigned long flags;
0036 
0037     efct = node->efct;
0038 
0039     xport = efct->xport;
0040 
0041     io = efct_io_pool_io_alloc(efct->xport->io_pool);
0042     if (!io) {
0043         efc_log_err(efct, "IO alloc Failed\n");
0044         atomic_add_return(1, &xport->io_alloc_failed_count);
0045         return NULL;
0046     }
0047 
0048     /* initialize refcount */
0049     kref_init(&io->ref);
0050     io->release = _efct_scsi_io_free;
0051 
0052     /* set generic fields */
0053     io->efct = efct;
0054     io->node = node;
0055     kref_get(&node->ref);
0056 
0057     /* set type and name */
0058     io->io_type = EFCT_IO_TYPE_IO;
0059     io->display_name = "scsi_io";
0060 
0061     io->cmd_ini = false;
0062     io->cmd_tgt = true;
0063 
0064     /* Add to node's active_ios list */
0065     INIT_LIST_HEAD(&io->list_entry);
0066     spin_lock_irqsave(&node->active_ios_lock, flags);
0067     list_add(&io->list_entry, &node->active_ios);
0068 
0069     spin_unlock_irqrestore(&node->active_ios_lock, flags);
0070 
0071     return io;
0072 }
0073 
0074 void
0075 _efct_scsi_io_free(struct kref *arg)
0076 {
0077     struct efct_io *io = container_of(arg, struct efct_io, ref);
0078     struct efct *efct = io->efct;
0079     struct efct_node *node = io->node;
0080     unsigned long flags = 0;
0081 
0082     scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
0083 
0084     if (io->io_free) {
0085         efc_log_err(efct, "IO already freed.\n");
0086         return;
0087     }
0088 
0089     spin_lock_irqsave(&node->active_ios_lock, flags);
0090     list_del_init(&io->list_entry);
0091     spin_unlock_irqrestore(&node->active_ios_lock, flags);
0092 
0093     kref_put(&node->ref, node->release);
0094     io->node = NULL;
0095     efct_io_pool_io_free(efct->xport->io_pool, io);
0096 }
0097 
0098 void
0099 efct_scsi_io_free(struct efct_io *io)
0100 {
0101     scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
0102     WARN_ON(!refcount_read(&io->ref.refcount));
0103     kref_put(&io->ref, io->release);
0104 }
0105 
0106 static void
0107 efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
0108           u32 ext_status, void *app)
0109 {
0110     u32 flags = 0;
0111     struct efct_io *io = app;
0112     struct efct *efct;
0113     enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
0114     efct_scsi_io_cb_t cb;
0115 
0116     if (!io || !io->efct) {
0117         pr_err("%s: IO can not be NULL\n", __func__);
0118         return;
0119     }
0120 
0121     scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
0122 
0123     efct = io->efct;
0124 
0125     io->transferred += length;
0126 
0127     if (!io->scsi_tgt_cb) {
0128         efct_scsi_check_pending(efct);
0129         return;
0130     }
0131 
0132     /* Call target server completion */
0133     cb = io->scsi_tgt_cb;
0134 
0135     /* Clear the callback before invoking the callback */
0136     io->scsi_tgt_cb = NULL;
0137 
0138     /* if status was good, and auto-good-response was set,
0139      * then callback target-server with IO_CMPL_RSP_SENT,
0140      * otherwise send IO_CMPL
0141      */
0142     if (status == 0 && io->auto_resp)
0143         flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
0144     else
0145         flags |= EFCT_SCSI_IO_CMPL;
0146 
0147     switch (status) {
0148     case SLI4_FC_WCQE_STATUS_SUCCESS:
0149         scsi_stat = EFCT_SCSI_STATUS_GOOD;
0150         break;
0151     case SLI4_FC_WCQE_STATUS_DI_ERROR:
0152         if (ext_status & SLI4_FC_DI_ERROR_GE)
0153             scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
0154         else if (ext_status & SLI4_FC_DI_ERROR_AE)
0155             scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
0156         else if (ext_status & SLI4_FC_DI_ERROR_RE)
0157             scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
0158         else
0159             scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
0160         break;
0161     case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
0162         switch (ext_status) {
0163         case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
0164         case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
0165             scsi_stat = EFCT_SCSI_STATUS_ABORTED;
0166             break;
0167         case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
0168             scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
0169             break;
0170         case SLI4_FC_LOCAL_REJECT_NO_XRI:
0171             scsi_stat = EFCT_SCSI_STATUS_NO_IO;
0172             break;
0173         default:
0174             /*we have seen 0x0d(TX_DMA_FAILED err)*/
0175             scsi_stat = EFCT_SCSI_STATUS_ERROR;
0176             break;
0177         }
0178         break;
0179 
0180     case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
0181         /* target IO timed out */
0182         scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
0183         break;
0184 
0185     case SLI4_FC_WCQE_STATUS_SHUTDOWN:
0186         /* Target IO cancelled by HW */
0187         scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
0188         break;
0189 
0190     default:
0191         scsi_stat = EFCT_SCSI_STATUS_ERROR;
0192         break;
0193     }
0194 
0195     cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
0196 
0197     efct_scsi_check_pending(efct);
0198 }
0199 
0200 static int
0201 efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
0202              struct efct_scsi_sgl *sgl, u32 sgl_count,
0203              enum efct_hw_io_type type)
0204 {
0205     int rc;
0206     u32 i;
0207     struct efct *efct = hw->os;
0208 
0209     /* Initialize HW SGL */
0210     rc = efct_hw_io_init_sges(hw, hio, type);
0211     if (rc) {
0212         efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
0213         return -EIO;
0214     }
0215 
0216     for (i = 0; i < sgl_count; i++) {
0217         /* Add data SGE */
0218         rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
0219         if (rc) {
0220             efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
0221                     sgl_count, rc);
0222             return rc;
0223         }
0224     }
0225 
0226     return 0;
0227 }
0228 
0229 static void efc_log_sgl(struct efct_io *io)
0230 {
0231     struct efct_hw_io *hio = io->hio;
0232     struct sli4_sge *data = NULL;
0233     u32 *dword = NULL;
0234     u32 i;
0235     u32 n_sge;
0236 
0237     scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
0238               upper_32_bits(hio->def_sgl.phys),
0239               lower_32_bits(hio->def_sgl.phys));
0240     n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
0241     for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
0242         dword = (u32 *)data;
0243 
0244         scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
0245                   i, dword[0], dword[1], dword[2], dword[3]);
0246 
0247         if (dword[2] & (1U << 31))
0248             break;
0249     }
0250 }
0251 
0252 static void
0253 efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
0254                  u8 *mqe, void *arg)
0255 {
0256     struct efct_io *io = arg;
0257 
0258     if (io) {
0259         efct_hw_done_t cb = io->hw_cb;
0260 
0261         if (!io->hw_cb)
0262             return;
0263 
0264         io->hw_cb = NULL;
0265         (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
0266     }
0267 }
0268 
0269 static int
0270 efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
0271 {
0272     int rc = 0;
0273     struct efct *efct = io->efct;
0274 
0275     /* Got a HW IO;
0276      * update ini/tgt_task_tag with HW IO info and dispatch
0277      */
0278     io->hio = hio;
0279     if (io->cmd_tgt)
0280         io->tgt_task_tag = hio->indicator;
0281     else if (io->cmd_ini)
0282         io->init_task_tag = hio->indicator;
0283     io->hw_tag = hio->reqtag;
0284 
0285     hio->eq = io->hw_priv;
0286 
0287     /* Copy WQ steering */
0288     switch (io->wq_steering) {
0289     case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
0290         hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
0291         break;
0292     case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
0293         hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
0294         break;
0295     case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
0296         hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
0297         break;
0298     }
0299 
0300     switch (io->io_type) {
0301     case EFCT_IO_TYPE_IO:
0302         rc = efct_scsi_build_sgls(&efct->hw, io->hio,
0303                       io->sgl, io->sgl_count, io->hio_type);
0304         if (rc)
0305             break;
0306 
0307         if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
0308             efc_log_sgl(io);
0309 
0310         if (io->app_id)
0311             io->iparam.fcp_tgt.app_id = io->app_id;
0312 
0313         io->iparam.fcp_tgt.vpi = io->node->vpi;
0314         io->iparam.fcp_tgt.rpi = io->node->rpi;
0315         io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
0316         io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
0317         io->iparam.fcp_tgt.xmit_len = io->wire_len;
0318 
0319         rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
0320                      &io->iparam, io->hw_cb, io);
0321         break;
0322     default:
0323         scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
0324         rc = -EIO;
0325         break;
0326     }
0327     return rc;
0328 }
0329 
0330 static int
0331 efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
0332 {
0333     int rc;
0334 
0335     switch (io->io_type) {
0336     case EFCT_IO_TYPE_ABORT: {
0337         struct efct_hw_io *hio_to_abort = NULL;
0338 
0339         hio_to_abort = io->io_to_abort->hio;
0340 
0341         if (!hio_to_abort) {
0342             /*
0343              * If "IO to abort" does not have an
0344              * associated HW IO, immediately make callback with
0345              * success. The command must have been sent to
0346              * the backend, but the data phase has not yet
0347              * started, so we don't have a HW IO.
0348              *
0349              * Note: since the backend shims should be
0350              * taking a reference on io_to_abort, it should not
0351              * be possible to have been completed and freed by
0352              * the backend before the abort got here.
0353              */
0354             scsi_io_printf(io, "IO: not active\n");
0355             ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
0356                     SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
0357             rc = 0;
0358             break;
0359         }
0360 
0361         /* HW IO is valid, abort it */
0362         scsi_io_printf(io, "aborting\n");
0363         rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
0364                       io->send_abts, io->hw_cb, io);
0365         if (rc) {
0366             int status = SLI4_FC_WCQE_STATUS_SUCCESS;
0367             efct_hw_done_t cb = io->hw_cb;
0368 
0369             if (rc != -ENOENT && rc != -EINPROGRESS) {
0370                 status = -1;
0371                 scsi_io_printf(io, "Failed to abort IO rc=%d\n",
0372                            rc);
0373             }
0374             cb(io->hio, 0, status, 0, io);
0375             rc = 0;
0376         }
0377 
0378         break;
0379     }
0380     default:
0381         scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
0382         rc = -EIO;
0383         break;
0384     }
0385     return rc;
0386 }
0387 
0388 static struct efct_io *
0389 efct_scsi_dispatch_pending(struct efct *efct)
0390 {
0391     struct efct_xport *xport = efct->xport;
0392     struct efct_io *io = NULL;
0393     struct efct_hw_io *hio;
0394     unsigned long flags = 0;
0395     int status;
0396 
0397     spin_lock_irqsave(&xport->io_pending_lock, flags);
0398 
0399     if (!list_empty(&xport->io_pending_list)) {
0400         io = list_first_entry(&xport->io_pending_list, struct efct_io,
0401                       io_pending_link);
0402         list_del_init(&io->io_pending_link);
0403     }
0404 
0405     if (!io) {
0406         spin_unlock_irqrestore(&xport->io_pending_lock, flags);
0407         return NULL;
0408     }
0409 
0410     if (io->io_type == EFCT_IO_TYPE_ABORT) {
0411         hio = NULL;
0412     } else {
0413         hio = efct_hw_io_alloc(&efct->hw);
0414         if (!hio) {
0415             /*
0416              * No HW IO available.Put IO back on
0417              * the front of pending list
0418              */
0419             list_add(&xport->io_pending_list, &io->io_pending_link);
0420             io = NULL;
0421         } else {
0422             hio->eq = io->hw_priv;
0423         }
0424     }
0425 
0426     /* Must drop the lock before dispatching the IO */
0427     spin_unlock_irqrestore(&xport->io_pending_lock, flags);
0428 
0429     if (!io)
0430         return NULL;
0431 
0432     /*
0433      * We pulled an IO off the pending list,
0434      * and either got an HW IO or don't need one
0435      */
0436     atomic_sub_return(1, &xport->io_pending_count);
0437     if (!hio)
0438         status = efct_scsi_io_dispatch_no_hw_io(io);
0439     else
0440         status = efct_scsi_io_dispatch_hw_io(io, hio);
0441     if (status) {
0442         /*
0443          * Invoke the HW callback, but do so in the
0444          * separate execution context,provided by the
0445          * NOP mailbox completion processing context
0446          * by using efct_hw_async_call()
0447          */
0448         if (efct_hw_async_call(&efct->hw,
0449                        efct_scsi_check_pending_async_cb, io)) {
0450             efc_log_debug(efct, "call hw async failed\n");
0451         }
0452     }
0453 
0454     return io;
0455 }
0456 
0457 void
0458 efct_scsi_check_pending(struct efct *efct)
0459 {
0460     struct efct_xport *xport = efct->xport;
0461     struct efct_io *io = NULL;
0462     int count = 0;
0463     unsigned long flags = 0;
0464     int dispatch = 0;
0465 
0466     /* Guard against recursion */
0467     if (atomic_add_return(1, &xport->io_pending_recursing)) {
0468         /* This function is already running.  Decrement and return. */
0469         atomic_sub_return(1, &xport->io_pending_recursing);
0470         return;
0471     }
0472 
0473     while (efct_scsi_dispatch_pending(efct))
0474         count++;
0475 
0476     if (count) {
0477         atomic_sub_return(1, &xport->io_pending_recursing);
0478         return;
0479     }
0480 
0481     /*
0482      * If nothing was removed from the list,
0483      * we might be in a case where we need to abort an
0484      * active IO and the abort is on the pending list.
0485      * Look for an abort we can dispatch.
0486      */
0487 
0488     spin_lock_irqsave(&xport->io_pending_lock, flags);
0489 
0490     list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
0491         if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
0492             /* This IO has a HW IO, so it is
0493              * active.  Dispatch the abort.
0494              */
0495             dispatch = 1;
0496             list_del_init(&io->io_pending_link);
0497             atomic_sub_return(1, &xport->io_pending_count);
0498             break;
0499         }
0500     }
0501 
0502     spin_unlock_irqrestore(&xport->io_pending_lock, flags);
0503 
0504     if (dispatch) {
0505         if (efct_scsi_io_dispatch_no_hw_io(io)) {
0506             if (efct_hw_async_call(&efct->hw,
0507                 efct_scsi_check_pending_async_cb, io)) {
0508                 efc_log_debug(efct, "hw async failed\n");
0509             }
0510         }
0511     }
0512 
0513     atomic_sub_return(1, &xport->io_pending_recursing);
0514 }
0515 
0516 int
0517 efct_scsi_io_dispatch(struct efct_io *io, void *cb)
0518 {
0519     struct efct_hw_io *hio;
0520     struct efct *efct = io->efct;
0521     struct efct_xport *xport = efct->xport;
0522     unsigned long flags = 0;
0523 
0524     io->hw_cb = cb;
0525 
0526     /*
0527      * if this IO already has a HW IO, then this is either
0528      * not the first phase of the IO. Send it to the HW.
0529      */
0530     if (io->hio)
0531         return efct_scsi_io_dispatch_hw_io(io, io->hio);
0532 
0533     /*
0534      * We don't already have a HW IO associated with the IO. First check
0535      * the pending list. If not empty, add IO to the tail and process the
0536      * pending list.
0537      */
0538     spin_lock_irqsave(&xport->io_pending_lock, flags);
0539     if (!list_empty(&xport->io_pending_list)) {
0540         /*
0541          * If this is a low latency request,
0542          * the put at the front of the IO pending
0543          * queue, otherwise put it at the end of the queue.
0544          */
0545         if (io->low_latency) {
0546             INIT_LIST_HEAD(&io->io_pending_link);
0547             list_add(&xport->io_pending_list, &io->io_pending_link);
0548         } else {
0549             INIT_LIST_HEAD(&io->io_pending_link);
0550             list_add_tail(&io->io_pending_link,
0551                       &xport->io_pending_list);
0552         }
0553         spin_unlock_irqrestore(&xport->io_pending_lock, flags);
0554         atomic_add_return(1, &xport->io_pending_count);
0555         atomic_add_return(1, &xport->io_total_pending);
0556 
0557         /* process pending list */
0558         efct_scsi_check_pending(efct);
0559         return 0;
0560     }
0561     spin_unlock_irqrestore(&xport->io_pending_lock, flags);
0562 
0563     /*
0564      * We don't have a HW IO associated with the IO and there's nothing
0565      * on the pending list. Attempt to allocate a HW IO and dispatch it.
0566      */
0567     hio = efct_hw_io_alloc(&io->efct->hw);
0568     if (!hio) {
0569         /* Couldn't get a HW IO. Save this IO on the pending list */
0570         spin_lock_irqsave(&xport->io_pending_lock, flags);
0571         INIT_LIST_HEAD(&io->io_pending_link);
0572         list_add_tail(&io->io_pending_link, &xport->io_pending_list);
0573         spin_unlock_irqrestore(&xport->io_pending_lock, flags);
0574 
0575         atomic_add_return(1, &xport->io_total_pending);
0576         atomic_add_return(1, &xport->io_pending_count);
0577         return 0;
0578     }
0579 
0580     /* We successfully allocated a HW IO; dispatch to HW */
0581     return efct_scsi_io_dispatch_hw_io(io, hio);
0582 }
0583 
0584 int
0585 efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
0586 {
0587     struct efct *efct = io->efct;
0588     struct efct_xport *xport = efct->xport;
0589     unsigned long flags = 0;
0590 
0591     io->hw_cb = cb;
0592 
0593     /*
0594      * For aborts, we don't need a HW IO, but we still want
0595      * to pass through the pending list to preserve ordering.
0596      * Thus, if the pending list is not empty, add this abort
0597      * to the pending list and process the pending list.
0598      */
0599     spin_lock_irqsave(&xport->io_pending_lock, flags);
0600     if (!list_empty(&xport->io_pending_list)) {
0601         INIT_LIST_HEAD(&io->io_pending_link);
0602         list_add_tail(&io->io_pending_link, &xport->io_pending_list);
0603         spin_unlock_irqrestore(&xport->io_pending_lock, flags);
0604         atomic_add_return(1, &xport->io_pending_count);
0605         atomic_add_return(1, &xport->io_total_pending);
0606 
0607         /* process pending list */
0608         efct_scsi_check_pending(efct);
0609         return 0;
0610     }
0611     spin_unlock_irqrestore(&xport->io_pending_lock, flags);
0612 
0613     /* nothing on pending list, dispatch abort */
0614     return efct_scsi_io_dispatch_no_hw_io(io);
0615 }
0616 
0617 static inline int
0618 efct_scsi_xfer_data(struct efct_io *io, u32 flags,
0619             struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
0620             enum efct_hw_io_type type, int enable_ar,
0621             efct_scsi_io_cb_t cb, void *arg)
0622 {
0623     struct efct *efct;
0624     size_t residual = 0;
0625 
0626     io->sgl_count = sgl_count;
0627 
0628     efct = io->efct;
0629 
0630     scsi_io_trace(io, "%s wire_len %llu\n",
0631               (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
0632               xwire_len);
0633 
0634     io->hio_type = type;
0635 
0636     io->scsi_tgt_cb = cb;
0637     io->scsi_tgt_cb_arg = arg;
0638 
0639     residual = io->exp_xfer_len - io->transferred;
0640     io->wire_len = (xwire_len < residual) ? xwire_len : residual;
0641     residual = (xwire_len - io->wire_len);
0642 
0643     memset(&io->iparam, 0, sizeof(io->iparam));
0644     io->iparam.fcp_tgt.ox_id = io->init_task_tag;
0645     io->iparam.fcp_tgt.offset = io->transferred;
0646     io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
0647     io->iparam.fcp_tgt.timeout = io->timeout;
0648 
0649     /* if this is the last data phase and there is no residual, enable
0650      * auto-good-response
0651      */
0652     if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
0653         ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
0654         (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
0655         io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
0656         io->auto_resp = true;
0657     } else {
0658         io->auto_resp = false;
0659     }
0660 
0661     /* save this transfer length */
0662     io->xfer_req = io->wire_len;
0663 
0664     /* Adjust the transferred count to account for overrun
0665      * when the residual is calculated in efct_scsi_send_resp
0666      */
0667     io->transferred += residual;
0668 
0669     /* Adjust the SGL size if there is overrun */
0670 
0671     if (residual) {
0672         struct efct_scsi_sgl  *sgl_ptr = &io->sgl[sgl_count - 1];
0673 
0674         while (residual) {
0675             size_t len = sgl_ptr->len;
0676 
0677             if (len > residual) {
0678                 sgl_ptr->len = len - residual;
0679                 residual = 0;
0680             } else {
0681                 sgl_ptr->len = 0;
0682                 residual -= len;
0683                 io->sgl_count--;
0684             }
0685             sgl_ptr--;
0686         }
0687     }
0688 
0689     /* Set latency and WQ steering */
0690     io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
0691     io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
0692                 EFCT_SCSI_WQ_STEERING_SHIFT;
0693     io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
0694                 EFCT_SCSI_WQ_CLASS_SHIFT;
0695 
0696     if (efct->xport) {
0697         struct efct_xport *xport = efct->xport;
0698 
0699         if (type == EFCT_HW_IO_TARGET_READ) {
0700             xport->fcp_stats.input_requests++;
0701             xport->fcp_stats.input_bytes += xwire_len;
0702         } else if (type == EFCT_HW_IO_TARGET_WRITE) {
0703             xport->fcp_stats.output_requests++;
0704             xport->fcp_stats.output_bytes += xwire_len;
0705         }
0706     }
0707     return efct_scsi_io_dispatch(io, efct_target_io_cb);
0708 }
0709 
0710 int
0711 efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
0712                struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
0713                efct_scsi_io_cb_t cb, void *arg)
0714 {
0715     return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
0716                    len, EFCT_HW_IO_TARGET_READ,
0717                    enable_tsend_auto_resp(io->efct), cb, arg);
0718 }
0719 
0720 int
0721 efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
0722                struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
0723                efct_scsi_io_cb_t cb, void *arg)
0724 {
0725     return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
0726                    EFCT_HW_IO_TARGET_WRITE,
0727                    enable_treceive_auto_resp(io->efct), cb, arg);
0728 }
0729 
0730 int
0731 efct_scsi_send_resp(struct efct_io *io, u32 flags,
0732             struct efct_scsi_cmd_resp *rsp,
0733             efct_scsi_io_cb_t cb, void *arg)
0734 {
0735     struct efct *efct;
0736     int residual;
0737     /* Always try auto resp */
0738     bool auto_resp = true;
0739     u8 scsi_status = 0;
0740     u16 scsi_status_qualifier = 0;
0741     u8 *sense_data = NULL;
0742     u32 sense_data_length = 0;
0743 
0744     efct = io->efct;
0745 
0746     if (rsp) {
0747         scsi_status = rsp->scsi_status;
0748         scsi_status_qualifier = rsp->scsi_status_qualifier;
0749         sense_data = rsp->sense_data;
0750         sense_data_length = rsp->sense_data_length;
0751         residual = rsp->residual;
0752     } else {
0753         residual = io->exp_xfer_len - io->transferred;
0754     }
0755 
0756     io->wire_len = 0;
0757     io->hio_type = EFCT_HW_IO_TARGET_RSP;
0758 
0759     io->scsi_tgt_cb = cb;
0760     io->scsi_tgt_cb_arg = arg;
0761 
0762     memset(&io->iparam, 0, sizeof(io->iparam));
0763     io->iparam.fcp_tgt.ox_id = io->init_task_tag;
0764     io->iparam.fcp_tgt.offset = 0;
0765     io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
0766     io->iparam.fcp_tgt.timeout = io->timeout;
0767 
0768     /* Set low latency queueing request */
0769     io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
0770     io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
0771                 EFCT_SCSI_WQ_STEERING_SHIFT;
0772     io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
0773                 EFCT_SCSI_WQ_CLASS_SHIFT;
0774 
0775     if (scsi_status != 0 || residual || sense_data_length) {
0776         struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
0777         u8 *sns_data;
0778 
0779         if (!fcprsp) {
0780             efc_log_err(efct, "NULL response buffer\n");
0781             return -EIO;
0782         }
0783 
0784         sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
0785 
0786         auto_resp = false;
0787 
0788         memset(fcprsp, 0, sizeof(*fcprsp));
0789 
0790         io->wire_len += sizeof(*fcprsp);
0791 
0792         fcprsp->resp.fr_status = scsi_status;
0793         fcprsp->resp.fr_retry_delay =
0794             cpu_to_be16(scsi_status_qualifier);
0795 
0796         /* set residual status if necessary */
0797         if (residual != 0) {
0798             /* FCP: if data transferred is less than the
0799              * amount expected, then this is an underflow.
0800              * If data transferred would have been greater
0801              * than the amount expected this is an overflow
0802              */
0803             if (residual > 0) {
0804                 fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
0805                 fcprsp->ext.fr_resid =  cpu_to_be32(residual);
0806             } else {
0807                 fcprsp->resp.fr_flags |= FCP_RESID_OVER;
0808                 fcprsp->ext.fr_resid = cpu_to_be32(-residual);
0809             }
0810         }
0811 
0812         if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
0813             if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
0814                 efc_log_err(efct, "Sense exceeds max size.\n");
0815                 return -EIO;
0816             }
0817 
0818             fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
0819             memcpy(sns_data, sense_data, sense_data_length);
0820             fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
0821             io->wire_len += sense_data_length;
0822         }
0823 
0824         io->sgl[0].addr = io->rspbuf.phys;
0825         io->sgl[0].dif_addr = 0;
0826         io->sgl[0].len = io->wire_len;
0827         io->sgl_count = 1;
0828     }
0829 
0830     if (auto_resp)
0831         io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
0832 
0833     return efct_scsi_io_dispatch(io, efct_target_io_cb);
0834 }
0835 
0836 static int
0837 efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status,
0838             u32 ext_status, void *app)
0839 {
0840     struct efct_io *io = app;
0841     struct efct *efct;
0842     enum efct_scsi_io_status bls_status;
0843 
0844     efct = io->efct;
0845 
0846     /* BLS isn't really a "SCSI" concept, but use SCSI status */
0847     if (status) {
0848         io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
0849         bls_status = EFCT_SCSI_STATUS_ERROR;
0850     } else {
0851         bls_status = EFCT_SCSI_STATUS_GOOD;
0852     }
0853 
0854     if (io->bls_cb) {
0855         efct_scsi_io_cb_t bls_cb = io->bls_cb;
0856         void *bls_cb_arg = io->bls_cb_arg;
0857 
0858         io->bls_cb = NULL;
0859         io->bls_cb_arg = NULL;
0860 
0861         /* invoke callback */
0862         bls_cb(io, bls_status, 0, bls_cb_arg);
0863     }
0864 
0865     efct_scsi_check_pending(efct);
0866     return 0;
0867 }
0868 
0869 static int
0870 efct_target_send_bls_resp(struct efct_io *io,
0871               efct_scsi_io_cb_t cb, void *arg)
0872 {
0873     struct efct_node *node = io->node;
0874     struct sli_bls_params *bls = &io->iparam.bls;
0875     struct efct *efct = node->efct;
0876     struct fc_ba_acc *acc;
0877     int rc;
0878 
0879     /* fill out IO structure with everything needed to send BA_ACC */
0880     memset(&io->iparam, 0, sizeof(io->iparam));
0881     bls->ox_id = io->init_task_tag;
0882     bls->rx_id = io->abort_rx_id;
0883     bls->vpi = io->node->vpi;
0884     bls->rpi = io->node->rpi;
0885     bls->s_id = U32_MAX;
0886     bls->d_id = io->node->node_fc_id;
0887     bls->rpi_registered = true;
0888 
0889     acc = (void *)bls->payload;
0890     acc->ba_ox_id = cpu_to_be16(bls->ox_id);
0891     acc->ba_rx_id = cpu_to_be16(bls->rx_id);
0892     acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
0893 
0894     /* generic io fields have already been populated */
0895 
0896     /* set type and BLS-specific fields */
0897     io->io_type = EFCT_IO_TYPE_BLS_RESP;
0898     io->display_name = "bls_rsp";
0899     io->hio_type = EFCT_HW_BLS_ACC;
0900     io->bls_cb = cb;
0901     io->bls_cb_arg = arg;
0902 
0903     /* dispatch IO */
0904     rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
0905                   efct_target_bls_resp_cb, io);
0906     return rc;
0907 }
0908 
0909 static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
0910                 u32 ext_status, void *app)
0911 {
0912     struct efct_io *io = app;
0913 
0914     efct_scsi_io_free(io);
0915     return 0;
0916 }
0917 
0918 struct efct_io *
0919 efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
0920 {
0921     struct efct_node *node = io->node;
0922     struct sli_bls_params *bls = &io->iparam.bls;
0923     struct efct *efct = node->efct;
0924     struct fc_ba_rjt *acc;
0925     int rc;
0926 
0927     /* fill out BLS Response-specific fields */
0928     io->io_type = EFCT_IO_TYPE_BLS_RESP;
0929     io->display_name = "ba_rjt";
0930     io->hio_type = EFCT_HW_BLS_RJT;
0931     io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
0932 
0933     /* fill out iparam fields */
0934     memset(&io->iparam, 0, sizeof(io->iparam));
0935     bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
0936     bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
0937     bls->vpi = io->node->vpi;
0938     bls->rpi = io->node->rpi;
0939     bls->s_id = U32_MAX;
0940     bls->d_id = io->node->node_fc_id;
0941     bls->rpi_registered = true;
0942 
0943     acc = (void *)bls->payload;
0944     acc->br_reason = ELS_RJT_UNAB;
0945     acc->br_explan = ELS_EXPL_NONE;
0946 
0947     rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
0948                   io);
0949     if (rc) {
0950         efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
0951         efct_scsi_io_free(io);
0952         io = NULL;
0953     }
0954     return io;
0955 }
0956 
0957 int
0958 efct_scsi_send_tmf_resp(struct efct_io *io,
0959             enum efct_scsi_tmf_resp rspcode,
0960             u8 addl_rsp_info[3],
0961             efct_scsi_io_cb_t cb, void *arg)
0962 {
0963     int rc;
0964     struct {
0965         struct fcp_resp_with_ext rsp_ext;
0966         struct fcp_resp_rsp_info info;
0967     } *fcprsp;
0968     u8 fcp_rspcode;
0969 
0970     io->wire_len = 0;
0971 
0972     switch (rspcode) {
0973     case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
0974         fcp_rspcode = FCP_TMF_CMPL;
0975         break;
0976     case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
0977     case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
0978         fcp_rspcode = FCP_TMF_CMPL;
0979         break;
0980     case EFCT_SCSI_TMF_FUNCTION_REJECTED:
0981         fcp_rspcode = FCP_TMF_REJECTED;
0982         break;
0983     case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
0984         fcp_rspcode = FCP_TMF_INVALID_LUN;
0985         break;
0986     case EFCT_SCSI_TMF_SERVICE_DELIVERY:
0987         fcp_rspcode = FCP_TMF_FAILED;
0988         break;
0989     default:
0990         fcp_rspcode = FCP_TMF_REJECTED;
0991         break;
0992     }
0993 
0994     io->hio_type = EFCT_HW_IO_TARGET_RSP;
0995 
0996     io->scsi_tgt_cb = cb;
0997     io->scsi_tgt_cb_arg = arg;
0998 
0999     if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
1000         rc = efct_target_send_bls_resp(io, cb, arg);
1001         return rc;
1002     }
1003 
1004     /* populate the FCP TMF response */
1005     fcprsp = io->rspbuf.virt;
1006     memset(fcprsp, 0, sizeof(*fcprsp));
1007 
1008     fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
1009 
1010     if (addl_rsp_info) {
1011         memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
1012                sizeof(fcprsp->info._fr_resvd));
1013     }
1014     fcprsp->info.rsp_code = fcp_rspcode;
1015 
1016     io->wire_len = sizeof(*fcprsp);
1017 
1018     fcprsp->rsp_ext.ext.fr_rsp_len =
1019             cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
1020 
1021     io->sgl[0].addr = io->rspbuf.phys;
1022     io->sgl[0].dif_addr = 0;
1023     io->sgl[0].len = io->wire_len;
1024     io->sgl_count = 1;
1025 
1026     memset(&io->iparam, 0, sizeof(io->iparam));
1027     io->iparam.fcp_tgt.ox_id = io->init_task_tag;
1028     io->iparam.fcp_tgt.offset = 0;
1029     io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
1030     io->iparam.fcp_tgt.timeout = io->timeout;
1031 
1032     rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
1033 
1034     return rc;
1035 }
1036 
1037 static int
1038 efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
1039              u32 ext_status, void *app)
1040 {
1041     struct efct_io *io = app;
1042     struct efct *efct;
1043     enum efct_scsi_io_status scsi_status;
1044     efct_scsi_io_cb_t abort_cb;
1045     void *abort_cb_arg;
1046 
1047     efct = io->efct;
1048 
1049     if (!io->abort_cb)
1050         goto done;
1051 
1052     abort_cb = io->abort_cb;
1053     abort_cb_arg = io->abort_cb_arg;
1054 
1055     io->abort_cb = NULL;
1056     io->abort_cb_arg = NULL;
1057 
1058     switch (status) {
1059     case SLI4_FC_WCQE_STATUS_SUCCESS:
1060         scsi_status = EFCT_SCSI_STATUS_GOOD;
1061         break;
1062     case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
1063         switch (ext_status) {
1064         case SLI4_FC_LOCAL_REJECT_NO_XRI:
1065             scsi_status = EFCT_SCSI_STATUS_NO_IO;
1066             break;
1067         case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
1068             scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
1069             break;
1070         default:
1071             /*we have seen 0x15 (abort in progress)*/
1072             scsi_status = EFCT_SCSI_STATUS_ERROR;
1073             break;
1074         }
1075         break;
1076     case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
1077         scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
1078         break;
1079     default:
1080         scsi_status = EFCT_SCSI_STATUS_ERROR;
1081         break;
1082     }
1083     /* invoke callback */
1084     abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
1085 
1086 done:
1087     /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
1088     kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
1089 
1090     efct_io_pool_io_free(efct->xport->io_pool, io);
1091 
1092     efct_scsi_check_pending(efct);
1093     return 0;
1094 }
1095 
1096 int
1097 efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
1098 {
1099     struct efct *efct;
1100     struct efct_xport *xport;
1101     int rc;
1102     struct efct_io *abort_io = NULL;
1103 
1104     efct = io->efct;
1105     xport = efct->xport;
1106 
1107     /* take a reference on IO being aborted */
1108     if (kref_get_unless_zero(&io->ref) == 0) {
1109         /* command no longer active */
1110         scsi_io_printf(io, "command no longer active\n");
1111         return -EIO;
1112     }
1113 
1114     /*
1115      * allocate a new IO to send the abort request. Use efct_io_alloc()
1116      * directly, as we need an IO object that will not fail allocation
1117      * due to allocations being disabled (in efct_scsi_io_alloc())
1118      */
1119     abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
1120     if (!abort_io) {
1121         atomic_add_return(1, &xport->io_alloc_failed_count);
1122         kref_put(&io->ref, io->release);
1123         return -EIO;
1124     }
1125 
1126     /* Save the target server callback and argument */
1127     /* set generic fields */
1128     abort_io->cmd_tgt = true;
1129     abort_io->node = io->node;
1130 
1131     /* set type and abort-specific fields */
1132     abort_io->io_type = EFCT_IO_TYPE_ABORT;
1133     abort_io->display_name = "tgt_abort";
1134     abort_io->io_to_abort = io;
1135     abort_io->send_abts = false;
1136     abort_io->abort_cb = cb;
1137     abort_io->abort_cb_arg = arg;
1138 
1139     /* now dispatch IO */
1140     rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
1141     if (rc)
1142         kref_put(&io->ref, io->release);
1143     return rc;
1144 }
1145 
1146 void
1147 efct_scsi_io_complete(struct efct_io *io)
1148 {
1149     if (io->io_free) {
1150         efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
1151                   io->tag);
1152         return;
1153     }
1154 
1155     scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
1156     kref_put(&io->ref, io->release);
1157 }