Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  QLogic FCoE Offload Driver
0004  *  Copyright (c) 2016-2018 Cavium Inc.
0005  */
0006 #include <linux/spinlock.h>
0007 #include <linux/vmalloc.h>
0008 #include "qedf.h"
0009 #include <scsi/scsi_tcq.h>
0010 
0011 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
0012     unsigned int timer_msec)
0013 {
0014     queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
0015         msecs_to_jiffies(timer_msec));
0016 }
0017 
0018 static void qedf_cmd_timeout(struct work_struct *work)
0019 {
0020 
0021     struct qedf_ioreq *io_req =
0022         container_of(work, struct qedf_ioreq, timeout_work.work);
0023     struct qedf_ctx *qedf;
0024     struct qedf_rport *fcport;
0025 
0026     fcport = io_req->fcport;
0027     if (io_req->fcport == NULL) {
0028         QEDF_INFO(NULL, QEDF_LOG_IO,  "fcport is NULL.\n");
0029         return;
0030     }
0031 
0032     qedf = fcport->qedf;
0033 
0034     switch (io_req->cmd_type) {
0035     case QEDF_ABTS:
0036         if (qedf == NULL) {
0037             QEDF_INFO(NULL, QEDF_LOG_IO,
0038                   "qedf is NULL for ABTS xid=0x%x.\n",
0039                   io_req->xid);
0040             return;
0041         }
0042 
0043         QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
0044             io_req->xid);
0045         /* Cleanup timed out ABTS */
0046         qedf_initiate_cleanup(io_req, true);
0047         complete(&io_req->abts_done);
0048 
0049         /*
0050          * Need to call kref_put for reference taken when initiate_abts
0051          * was called since abts_compl won't be called now that we've
0052          * cleaned up the task.
0053          */
0054         kref_put(&io_req->refcount, qedf_release_cmd);
0055 
0056         /* Clear in abort bit now that we're done with the command */
0057         clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
0058 
0059         /*
0060          * Now that the original I/O and the ABTS are complete see
0061          * if we need to reconnect to the target.
0062          */
0063         qedf_restart_rport(fcport);
0064         break;
0065     case QEDF_ELS:
0066         if (!qedf) {
0067             QEDF_INFO(NULL, QEDF_LOG_IO,
0068                   "qedf is NULL for ELS xid=0x%x.\n",
0069                   io_req->xid);
0070             return;
0071         }
0072         /* ELS request no longer outstanding since it timed out */
0073         clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
0074 
0075         kref_get(&io_req->refcount);
0076         /*
0077          * Don't attempt to clean an ELS timeout as any subseqeunt
0078          * ABTS or cleanup requests just hang.  For now just free
0079          * the resources of the original I/O and the RRQ
0080          */
0081         QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
0082               io_req->xid);
0083         qedf_initiate_cleanup(io_req, true);
0084         io_req->event = QEDF_IOREQ_EV_ELS_TMO;
0085         /* Call callback function to complete command */
0086         if (io_req->cb_func && io_req->cb_arg) {
0087             io_req->cb_func(io_req->cb_arg);
0088             io_req->cb_arg = NULL;
0089         }
0090         kref_put(&io_req->refcount, qedf_release_cmd);
0091         break;
0092     case QEDF_SEQ_CLEANUP:
0093         QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
0094             "xid=0x%x.\n", io_req->xid);
0095         qedf_initiate_cleanup(io_req, true);
0096         io_req->event = QEDF_IOREQ_EV_ELS_TMO;
0097         qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
0098         break;
0099     default:
0100         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
0101               "Hit default case, xid=0x%x.\n", io_req->xid);
0102         break;
0103     }
0104 }
0105 
0106 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
0107 {
0108     struct io_bdt *bdt_info;
0109     struct qedf_ctx *qedf = cmgr->qedf;
0110     size_t bd_tbl_sz;
0111     u16 min_xid = 0;
0112     u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
0113     int num_ios;
0114     int i;
0115     struct qedf_ioreq *io_req;
0116 
0117     num_ios = max_xid - min_xid + 1;
0118 
0119     /* Free fcoe_bdt_ctx structures */
0120     if (!cmgr->io_bdt_pool) {
0121         QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
0122         goto free_cmd_pool;
0123     }
0124 
0125     bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
0126     for (i = 0; i < num_ios; i++) {
0127         bdt_info = cmgr->io_bdt_pool[i];
0128         if (bdt_info->bd_tbl) {
0129             dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
0130                 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
0131             bdt_info->bd_tbl = NULL;
0132         }
0133     }
0134 
0135     /* Destroy io_bdt pool */
0136     for (i = 0; i < num_ios; i++) {
0137         kfree(cmgr->io_bdt_pool[i]);
0138         cmgr->io_bdt_pool[i] = NULL;
0139     }
0140 
0141     kfree(cmgr->io_bdt_pool);
0142     cmgr->io_bdt_pool = NULL;
0143 
0144 free_cmd_pool:
0145 
0146     for (i = 0; i < num_ios; i++) {
0147         io_req = &cmgr->cmds[i];
0148         kfree(io_req->sgl_task_params);
0149         kfree(io_req->task_params);
0150         /* Make sure we free per command sense buffer */
0151         if (io_req->sense_buffer)
0152             dma_free_coherent(&qedf->pdev->dev,
0153                 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
0154                 io_req->sense_buffer_dma);
0155         cancel_delayed_work_sync(&io_req->rrq_work);
0156     }
0157 
0158     /* Free command manager itself */
0159     vfree(cmgr);
0160 }
0161 
0162 static void qedf_handle_rrq(struct work_struct *work)
0163 {
0164     struct qedf_ioreq *io_req =
0165         container_of(work, struct qedf_ioreq, rrq_work.work);
0166 
0167     atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
0168     qedf_send_rrq(io_req);
0169 
0170 }
0171 
0172 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
0173 {
0174     struct qedf_cmd_mgr *cmgr;
0175     struct io_bdt *bdt_info;
0176     struct qedf_ioreq *io_req;
0177     u16 xid;
0178     int i;
0179     int num_ios;
0180     u16 min_xid = 0;
0181     u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
0182 
0183     /* Make sure num_queues is already set before calling this function */
0184     if (!qedf->num_queues) {
0185         QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
0186         return NULL;
0187     }
0188 
0189     if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
0190         QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
0191                "max_xid 0x%x.\n", min_xid, max_xid);
0192         return NULL;
0193     }
0194 
0195     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
0196            "0x%x.\n", min_xid, max_xid);
0197 
0198     num_ios = max_xid - min_xid + 1;
0199 
0200     cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
0201     if (!cmgr) {
0202         QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
0203         return NULL;
0204     }
0205 
0206     cmgr->qedf = qedf;
0207     spin_lock_init(&cmgr->lock);
0208 
0209     /*
0210      * Initialize I/O request fields.
0211      */
0212     xid = 0;
0213 
0214     for (i = 0; i < num_ios; i++) {
0215         io_req = &cmgr->cmds[i];
0216         INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
0217 
0218         io_req->xid = xid++;
0219 
0220         INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
0221 
0222         /* Allocate DMA memory to hold sense buffer */
0223         io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
0224             QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
0225             GFP_KERNEL);
0226         if (!io_req->sense_buffer) {
0227             QEDF_ERR(&qedf->dbg_ctx,
0228                  "Failed to alloc sense buffer.\n");
0229             goto mem_err;
0230         }
0231 
0232         /* Allocate task parameters to pass to f/w init funcions */
0233         io_req->task_params = kzalloc(sizeof(*io_req->task_params),
0234                           GFP_KERNEL);
0235         if (!io_req->task_params) {
0236             QEDF_ERR(&(qedf->dbg_ctx),
0237                  "Failed to allocate task_params for xid=0x%x\n",
0238                  i);
0239             goto mem_err;
0240         }
0241 
0242         /*
0243          * Allocate scatter/gather list info to pass to f/w init
0244          * functions.
0245          */
0246         io_req->sgl_task_params = kzalloc(
0247             sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
0248         if (!io_req->sgl_task_params) {
0249             QEDF_ERR(&(qedf->dbg_ctx),
0250                  "Failed to allocate sgl_task_params for xid=0x%x\n",
0251                  i);
0252             goto mem_err;
0253         }
0254     }
0255 
0256     /* Allocate pool of io_bdts - one for each qedf_ioreq */
0257     cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
0258         GFP_KERNEL);
0259 
0260     if (!cmgr->io_bdt_pool) {
0261         QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
0262         goto mem_err;
0263     }
0264 
0265     for (i = 0; i < num_ios; i++) {
0266         cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
0267             GFP_KERNEL);
0268         if (!cmgr->io_bdt_pool[i]) {
0269             QEDF_WARN(&(qedf->dbg_ctx),
0270                   "Failed to alloc io_bdt_pool[%d].\n", i);
0271             goto mem_err;
0272         }
0273     }
0274 
0275     for (i = 0; i < num_ios; i++) {
0276         bdt_info = cmgr->io_bdt_pool[i];
0277         bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
0278             QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
0279             &bdt_info->bd_tbl_dma, GFP_KERNEL);
0280         if (!bdt_info->bd_tbl) {
0281             QEDF_WARN(&(qedf->dbg_ctx),
0282                   "Failed to alloc bdt_tbl[%d].\n", i);
0283             goto mem_err;
0284         }
0285     }
0286     atomic_set(&cmgr->free_list_cnt, num_ios);
0287     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
0288         "cmgr->free_list_cnt=%d.\n",
0289         atomic_read(&cmgr->free_list_cnt));
0290 
0291     return cmgr;
0292 
0293 mem_err:
0294     qedf_cmd_mgr_free(cmgr);
0295     return NULL;
0296 }
0297 
0298 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
0299 {
0300     struct qedf_ctx *qedf = fcport->qedf;
0301     struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
0302     struct qedf_ioreq *io_req = NULL;
0303     struct io_bdt *bd_tbl;
0304     u16 xid;
0305     uint32_t free_sqes;
0306     int i;
0307     unsigned long flags;
0308 
0309     free_sqes = atomic_read(&fcport->free_sqes);
0310 
0311     if (!free_sqes) {
0312         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
0313             "Returning NULL, free_sqes=%d.\n ",
0314             free_sqes);
0315         goto out_failed;
0316     }
0317 
0318     /* Limit the number of outstanding R/W tasks */
0319     if ((atomic_read(&fcport->num_active_ios) >=
0320         NUM_RW_TASKS_PER_CONNECTION)) {
0321         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
0322             "Returning NULL, num_active_ios=%d.\n",
0323             atomic_read(&fcport->num_active_ios));
0324         goto out_failed;
0325     }
0326 
0327     /* Limit global TIDs certain tasks */
0328     if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
0329         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
0330             "Returning NULL, free_list_cnt=%d.\n",
0331             atomic_read(&cmd_mgr->free_list_cnt));
0332         goto out_failed;
0333     }
0334 
0335     spin_lock_irqsave(&cmd_mgr->lock, flags);
0336     for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
0337         io_req = &cmd_mgr->cmds[cmd_mgr->idx];
0338         cmd_mgr->idx++;
0339         if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
0340             cmd_mgr->idx = 0;
0341 
0342         /* Check to make sure command was previously freed */
0343         if (!io_req->alloc)
0344             break;
0345     }
0346 
0347     if (i == FCOE_PARAMS_NUM_TASKS) {
0348         spin_unlock_irqrestore(&cmd_mgr->lock, flags);
0349         goto out_failed;
0350     }
0351 
0352     if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
0353         QEDF_ERR(&qedf->dbg_ctx,
0354              "io_req found to be dirty ox_id = 0x%x.\n",
0355              io_req->xid);
0356 
0357     /* Clear any flags now that we've reallocated the xid */
0358     io_req->flags = 0;
0359     io_req->alloc = 1;
0360     spin_unlock_irqrestore(&cmd_mgr->lock, flags);
0361 
0362     atomic_inc(&fcport->num_active_ios);
0363     atomic_dec(&fcport->free_sqes);
0364     xid = io_req->xid;
0365     atomic_dec(&cmd_mgr->free_list_cnt);
0366 
0367     io_req->cmd_mgr = cmd_mgr;
0368     io_req->fcport = fcport;
0369 
0370     /* Clear any stale sc_cmd back pointer */
0371     io_req->sc_cmd = NULL;
0372     io_req->lun = -1;
0373 
0374     /* Hold the io_req against deletion */
0375     kref_init(&io_req->refcount);   /* ID: 001 */
0376     atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
0377 
0378     /* Bind io_bdt for this io_req */
0379     /* Have a static link between io_req and io_bdt_pool */
0380     bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
0381     if (bd_tbl == NULL) {
0382         QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
0383         kref_put(&io_req->refcount, qedf_release_cmd);
0384         goto out_failed;
0385     }
0386     bd_tbl->io_req = io_req;
0387     io_req->cmd_type = cmd_type;
0388     io_req->tm_flags = 0;
0389 
0390     /* Reset sequence offset data */
0391     io_req->rx_buf_off = 0;
0392     io_req->tx_buf_off = 0;
0393     io_req->rx_id = 0xffff; /* No OX_ID */
0394 
0395     return io_req;
0396 
0397 out_failed:
0398     /* Record failure for stats and return NULL to caller */
0399     qedf->alloc_failures++;
0400     return NULL;
0401 }
0402 
0403 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
0404 {
0405     struct qedf_mp_req *mp_req = &(io_req->mp_req);
0406     struct qedf_ctx *qedf = io_req->fcport->qedf;
0407     uint64_t sz = sizeof(struct scsi_sge);
0408 
0409     /* clear tm flags */
0410     if (mp_req->mp_req_bd) {
0411         dma_free_coherent(&qedf->pdev->dev, sz,
0412             mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
0413         mp_req->mp_req_bd = NULL;
0414     }
0415     if (mp_req->mp_resp_bd) {
0416         dma_free_coherent(&qedf->pdev->dev, sz,
0417             mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
0418         mp_req->mp_resp_bd = NULL;
0419     }
0420     if (mp_req->req_buf) {
0421         dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
0422             mp_req->req_buf, mp_req->req_buf_dma);
0423         mp_req->req_buf = NULL;
0424     }
0425     if (mp_req->resp_buf) {
0426         dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
0427             mp_req->resp_buf, mp_req->resp_buf_dma);
0428         mp_req->resp_buf = NULL;
0429     }
0430 }
0431 
0432 void qedf_release_cmd(struct kref *ref)
0433 {
0434     struct qedf_ioreq *io_req =
0435         container_of(ref, struct qedf_ioreq, refcount);
0436     struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
0437     struct qedf_rport *fcport = io_req->fcport;
0438     unsigned long flags;
0439 
0440     if (io_req->cmd_type == QEDF_SCSI_CMD) {
0441         QEDF_WARN(&fcport->qedf->dbg_ctx,
0442               "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
0443               io_req, io_req->xid);
0444         WARN_ON(io_req->sc_cmd);
0445     }
0446 
0447     if (io_req->cmd_type == QEDF_ELS ||
0448         io_req->cmd_type == QEDF_TASK_MGMT_CMD)
0449         qedf_free_mp_resc(io_req);
0450 
0451     atomic_inc(&cmd_mgr->free_list_cnt);
0452     atomic_dec(&fcport->num_active_ios);
0453     atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
0454     if (atomic_read(&fcport->num_active_ios) < 0) {
0455         QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
0456         WARN_ON(1);
0457     }
0458 
0459     /* Increment task retry identifier now that the request is released */
0460     io_req->task_retry_identifier++;
0461     io_req->fcport = NULL;
0462 
0463     clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
0464     io_req->cpu = 0;
0465     spin_lock_irqsave(&cmd_mgr->lock, flags);
0466     io_req->fcport = NULL;
0467     io_req->alloc = 0;
0468     spin_unlock_irqrestore(&cmd_mgr->lock, flags);
0469 }
0470 
0471 static int qedf_map_sg(struct qedf_ioreq *io_req)
0472 {
0473     struct scsi_cmnd *sc = io_req->sc_cmd;
0474     struct Scsi_Host *host = sc->device->host;
0475     struct fc_lport *lport = shost_priv(host);
0476     struct qedf_ctx *qedf = lport_priv(lport);
0477     struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
0478     struct scatterlist *sg;
0479     int byte_count = 0;
0480     int sg_count = 0;
0481     int bd_count = 0;
0482     u32 sg_len;
0483     u64 addr;
0484     int i = 0;
0485 
0486     sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
0487         scsi_sg_count(sc), sc->sc_data_direction);
0488     sg = scsi_sglist(sc);
0489 
0490     io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
0491 
0492     if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
0493         io_req->sge_type = QEDF_IOREQ_FAST_SGE;
0494 
0495     scsi_for_each_sg(sc, sg, sg_count, i) {
0496         sg_len = (u32)sg_dma_len(sg);
0497         addr = (u64)sg_dma_address(sg);
0498 
0499         /*
0500          * Intermediate s/g element so check if start address
0501          * is page aligned.  Only required for writes and only if the
0502          * number of scatter/gather elements is 8 or more.
0503          */
0504         if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
0505             (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
0506             io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
0507 
0508         bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
0509         bd[bd_count].sge_addr.hi  = cpu_to_le32(U64_HI(addr));
0510         bd[bd_count].sge_len = cpu_to_le32(sg_len);
0511 
0512         bd_count++;
0513         byte_count += sg_len;
0514     }
0515 
0516     /* To catch a case where FAST and SLOW nothing is set, set FAST */
0517     if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
0518         io_req->sge_type = QEDF_IOREQ_FAST_SGE;
0519 
0520     if (byte_count != scsi_bufflen(sc))
0521         QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
0522               "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
0523                scsi_bufflen(sc), io_req->xid);
0524 
0525     return bd_count;
0526 }
0527 
0528 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
0529 {
0530     struct scsi_cmnd *sc = io_req->sc_cmd;
0531     struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
0532     int bd_count;
0533 
0534     if (scsi_sg_count(sc)) {
0535         bd_count = qedf_map_sg(io_req);
0536         if (bd_count == 0)
0537             return -ENOMEM;
0538     } else {
0539         bd_count = 0;
0540         bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
0541         bd[0].sge_len = 0;
0542     }
0543     io_req->bd_tbl->bd_valid = bd_count;
0544 
0545     return 0;
0546 }
0547 
0548 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
0549                   struct fcp_cmnd *fcp_cmnd)
0550 {
0551     struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
0552 
0553     /* fcp_cmnd is 32 bytes */
0554     memset(fcp_cmnd, 0, FCP_CMND_LEN);
0555 
0556     /* 8 bytes: SCSI LUN info */
0557     int_to_scsilun(sc_cmd->device->lun,
0558             (struct scsi_lun *)&fcp_cmnd->fc_lun);
0559 
0560     /* 4 bytes: flag info */
0561     fcp_cmnd->fc_pri_ta = 0;
0562     fcp_cmnd->fc_tm_flags = io_req->tm_flags;
0563     fcp_cmnd->fc_flags = io_req->io_req_flags;
0564     fcp_cmnd->fc_cmdref = 0;
0565 
0566     /* Populate data direction */
0567     if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
0568         fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
0569     } else {
0570         if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
0571             fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
0572         else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
0573             fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
0574     }
0575 
0576     fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
0577 
0578     /* 16 bytes: CDB information */
0579     if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
0580         memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
0581 
0582     /* 4 bytes: FCP data length */
0583     fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
0584 }
0585 
0586 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
0587     struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
0588     struct fcoe_wqe *sqe)
0589 {
0590     enum fcoe_task_type task_type;
0591     struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
0592     struct io_bdt *bd_tbl = io_req->bd_tbl;
0593     u8 fcp_cmnd[32];
0594     u32 tmp_fcp_cmnd[8];
0595     int bd_count = 0;
0596     struct qedf_ctx *qedf = fcport->qedf;
0597     uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
0598     struct regpair sense_data_buffer_phys_addr;
0599     u32 tx_io_size = 0;
0600     u32 rx_io_size = 0;
0601     int i, cnt;
0602 
0603     /* Note init_initiator_rw_fcoe_task memsets the task context */
0604     io_req->task = task_ctx;
0605     memset(task_ctx, 0, sizeof(struct fcoe_task_context));
0606     memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
0607     memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
0608 
0609     /* Set task type bassed on DMA directio of command */
0610     if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
0611         task_type = FCOE_TASK_TYPE_READ_INITIATOR;
0612     } else {
0613         if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
0614             task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
0615             tx_io_size = io_req->data_xfer_len;
0616         } else {
0617             task_type = FCOE_TASK_TYPE_READ_INITIATOR;
0618             rx_io_size = io_req->data_xfer_len;
0619         }
0620     }
0621 
0622     /* Setup the fields for fcoe_task_params */
0623     io_req->task_params->context = task_ctx;
0624     io_req->task_params->sqe = sqe;
0625     io_req->task_params->task_type = task_type;
0626     io_req->task_params->tx_io_size = tx_io_size;
0627     io_req->task_params->rx_io_size = rx_io_size;
0628     io_req->task_params->conn_cid = fcport->fw_cid;
0629     io_req->task_params->itid = io_req->xid;
0630     io_req->task_params->cq_rss_number = cq_idx;
0631     io_req->task_params->is_tape_device = fcport->dev_type;
0632 
0633     /* Fill in information for scatter/gather list */
0634     if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
0635         bd_count = bd_tbl->bd_valid;
0636         io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
0637         io_req->sgl_task_params->sgl_phys_addr.lo =
0638             U64_LO(bd_tbl->bd_tbl_dma);
0639         io_req->sgl_task_params->sgl_phys_addr.hi =
0640             U64_HI(bd_tbl->bd_tbl_dma);
0641         io_req->sgl_task_params->num_sges = bd_count;
0642         io_req->sgl_task_params->total_buffer_size =
0643             scsi_bufflen(io_req->sc_cmd);
0644         if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
0645             io_req->sgl_task_params->small_mid_sge = 1;
0646         else
0647             io_req->sgl_task_params->small_mid_sge = 0;
0648     }
0649 
0650     /* Fill in physical address of sense buffer */
0651     sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
0652     sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
0653 
0654     /* fill FCP_CMND IU */
0655     qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
0656 
0657     /* Swap fcp_cmnd since FC is big endian */
0658     cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
0659     for (i = 0; i < cnt; i++) {
0660         tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
0661     }
0662     memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
0663 
0664     init_initiator_rw_fcoe_task(io_req->task_params,
0665                     io_req->sgl_task_params,
0666                     sense_data_buffer_phys_addr,
0667                     io_req->task_retry_identifier, fcp_cmnd);
0668 
0669     /* Increment SGL type counters */
0670     if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
0671         qedf->slow_sge_ios++;
0672     else
0673         qedf->fast_sge_ios++;
0674 }
0675 
0676 void qedf_init_mp_task(struct qedf_ioreq *io_req,
0677     struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
0678 {
0679     struct qedf_mp_req *mp_req = &(io_req->mp_req);
0680     struct qedf_rport *fcport = io_req->fcport;
0681     struct qedf_ctx *qedf = io_req->fcport->qedf;
0682     struct fc_frame_header *fc_hdr;
0683     struct fcoe_tx_mid_path_params task_fc_hdr;
0684     struct scsi_sgl_task_params tx_sgl_task_params;
0685     struct scsi_sgl_task_params rx_sgl_task_params;
0686 
0687     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
0688           "Initializing MP task for cmd_type=%d\n",
0689           io_req->cmd_type);
0690 
0691     qedf->control_requests++;
0692 
0693     memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
0694     memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
0695     memset(task_ctx, 0, sizeof(struct fcoe_task_context));
0696     memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
0697 
0698     /* Setup the task from io_req for easy reference */
0699     io_req->task = task_ctx;
0700 
0701     /* Setup the fields for fcoe_task_params */
0702     io_req->task_params->context = task_ctx;
0703     io_req->task_params->sqe = sqe;
0704     io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
0705     io_req->task_params->tx_io_size = io_req->data_xfer_len;
0706     /* rx_io_size tells the f/w how large a response buffer we have */
0707     io_req->task_params->rx_io_size = PAGE_SIZE;
0708     io_req->task_params->conn_cid = fcport->fw_cid;
0709     io_req->task_params->itid = io_req->xid;
0710     /* Return middle path commands on CQ 0 */
0711     io_req->task_params->cq_rss_number = 0;
0712     io_req->task_params->is_tape_device = fcport->dev_type;
0713 
0714     fc_hdr = &(mp_req->req_fc_hdr);
0715     /* Set OX_ID and RX_ID based on driver task id */
0716     fc_hdr->fh_ox_id = io_req->xid;
0717     fc_hdr->fh_rx_id = htons(0xffff);
0718 
0719     /* Set up FC header information */
0720     task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
0721     task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
0722     task_fc_hdr.type = fc_hdr->fh_type;
0723     task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
0724     task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
0725     task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
0726     task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
0727 
0728     /* Set up s/g list parameters for request buffer */
0729     tx_sgl_task_params.sgl = mp_req->mp_req_bd;
0730     tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
0731     tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
0732     tx_sgl_task_params.num_sges = 1;
0733     /* Set PAGE_SIZE for now since sg element is that size ??? */
0734     tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
0735     tx_sgl_task_params.small_mid_sge = 0;
0736 
0737     /* Set up s/g list parameters for request buffer */
0738     rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
0739     rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
0740     rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
0741     rx_sgl_task_params.num_sges = 1;
0742     /* Set PAGE_SIZE for now since sg element is that size ??? */
0743     rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
0744     rx_sgl_task_params.small_mid_sge = 0;
0745 
0746 
0747     /*
0748      * Last arg is 0 as previous code did not set that we wanted the
0749      * fc header information.
0750      */
0751     init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
0752                              &task_fc_hdr,
0753                              &tx_sgl_task_params,
0754                              &rx_sgl_task_params, 0);
0755 }
0756 
0757 /* Presumed that fcport->rport_lock is held */
0758 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
0759 {
0760     uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
0761     u16 rval;
0762 
0763     rval = fcport->sq_prod_idx;
0764 
0765     /* Adjust ring index */
0766     fcport->sq_prod_idx++;
0767     fcport->fw_sq_prod_idx++;
0768     if (fcport->sq_prod_idx == total_sqe)
0769         fcport->sq_prod_idx = 0;
0770 
0771     return rval;
0772 }
0773 
0774 void qedf_ring_doorbell(struct qedf_rport *fcport)
0775 {
0776     struct fcoe_db_data dbell = { 0 };
0777 
0778     dbell.agg_flags = 0;
0779 
0780     dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
0781     dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
0782     dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
0783         FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
0784 
0785     dbell.sq_prod = fcport->fw_sq_prod_idx;
0786     /* wmb makes sure that the BDs data is updated before updating the
0787      * producer, otherwise FW may read old data from the BDs.
0788      */
0789     wmb();
0790     barrier();
0791     writel(*(u32 *)&dbell, fcport->p_doorbell);
0792     /*
0793      * Fence required to flush the write combined buffer, since another
0794      * CPU may write to the same doorbell address and data may be lost
0795      * due to relaxed order nature of write combined bar.
0796      */
0797     wmb();
0798 }
0799 
0800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
0801               int8_t direction)
0802 {
0803     struct qedf_ctx *qedf = fcport->qedf;
0804     struct qedf_io_log *io_log;
0805     struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
0806     unsigned long flags;
0807 
0808     spin_lock_irqsave(&qedf->io_trace_lock, flags);
0809 
0810     io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
0811     io_log->direction = direction;
0812     io_log->task_id = io_req->xid;
0813     io_log->port_id = fcport->rdata->ids.port_id;
0814     io_log->lun = sc_cmd->device->lun;
0815     io_log->op = sc_cmd->cmnd[0];
0816     io_log->lba[0] = sc_cmd->cmnd[2];
0817     io_log->lba[1] = sc_cmd->cmnd[3];
0818     io_log->lba[2] = sc_cmd->cmnd[4];
0819     io_log->lba[3] = sc_cmd->cmnd[5];
0820     io_log->bufflen = scsi_bufflen(sc_cmd);
0821     io_log->sg_count = scsi_sg_count(sc_cmd);
0822     io_log->result = sc_cmd->result;
0823     io_log->jiffies = jiffies;
0824     io_log->refcount = kref_read(&io_req->refcount);
0825 
0826     if (direction == QEDF_IO_TRACE_REQ) {
0827         /* For requests we only care abot the submission CPU */
0828         io_log->req_cpu = io_req->cpu;
0829         io_log->int_cpu = 0;
0830         io_log->rsp_cpu = 0;
0831     } else if (direction == QEDF_IO_TRACE_RSP) {
0832         io_log->req_cpu = io_req->cpu;
0833         io_log->int_cpu = io_req->int_cpu;
0834         io_log->rsp_cpu = smp_processor_id();
0835     }
0836 
0837     io_log->sge_type = io_req->sge_type;
0838 
0839     qedf->io_trace_idx++;
0840     if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
0841         qedf->io_trace_idx = 0;
0842 
0843     spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
0844 }
0845 
0846 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
0847 {
0848     struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
0849     struct Scsi_Host *host = sc_cmd->device->host;
0850     struct fc_lport *lport = shost_priv(host);
0851     struct qedf_ctx *qedf = lport_priv(lport);
0852     struct fcoe_task_context *task_ctx;
0853     u16 xid;
0854     struct fcoe_wqe *sqe;
0855     u16 sqe_idx;
0856 
0857     /* Initialize rest of io_req fileds */
0858     io_req->data_xfer_len = scsi_bufflen(sc_cmd);
0859     qedf_priv(sc_cmd)->io_req = io_req;
0860     io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
0861 
0862     /* Record which cpu this request is associated with */
0863     io_req->cpu = smp_processor_id();
0864 
0865     if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
0866         io_req->io_req_flags = QEDF_READ;
0867         qedf->input_requests++;
0868     } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
0869         io_req->io_req_flags = QEDF_WRITE;
0870         qedf->output_requests++;
0871     } else {
0872         io_req->io_req_flags = 0;
0873         qedf->control_requests++;
0874     }
0875 
0876     xid = io_req->xid;
0877 
0878     /* Build buffer descriptor list for firmware from sg list */
0879     if (qedf_build_bd_list_from_sg(io_req)) {
0880         QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
0881         /* Release cmd will release io_req, but sc_cmd is assigned */
0882         io_req->sc_cmd = NULL;
0883         kref_put(&io_req->refcount, qedf_release_cmd);
0884         return -EAGAIN;
0885     }
0886 
0887     if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
0888         test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
0889         QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
0890         /* Release cmd will release io_req, but sc_cmd is assigned */
0891         io_req->sc_cmd = NULL;
0892         kref_put(&io_req->refcount, qedf_release_cmd);
0893         return -EINVAL;
0894     }
0895 
0896     /* Record LUN number for later use if we need them */
0897     io_req->lun = (int)sc_cmd->device->lun;
0898 
0899     /* Obtain free SQE */
0900     sqe_idx = qedf_get_sqe_idx(fcport);
0901     sqe = &fcport->sq[sqe_idx];
0902     memset(sqe, 0, sizeof(struct fcoe_wqe));
0903 
0904     /* Get the task context */
0905     task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
0906     if (!task_ctx) {
0907         QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
0908                xid);
0909         /* Release cmd will release io_req, but sc_cmd is assigned */
0910         io_req->sc_cmd = NULL;
0911         kref_put(&io_req->refcount, qedf_release_cmd);
0912         return -EINVAL;
0913     }
0914 
0915     qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
0916 
0917     /* Ring doorbell */
0918     qedf_ring_doorbell(fcport);
0919 
0920     /* Set that command is with the firmware now */
0921     set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
0922 
0923     if (qedf_io_tracing && io_req->sc_cmd)
0924         qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
0925 
0926     return false;
0927 }
0928 
0929 int
0930 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
0931 {
0932     struct fc_lport *lport = shost_priv(host);
0933     struct qedf_ctx *qedf = lport_priv(lport);
0934     struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
0935     struct fc_rport_libfc_priv *rp = rport->dd_data;
0936     struct qedf_rport *fcport;
0937     struct qedf_ioreq *io_req;
0938     int rc = 0;
0939     int rval;
0940     unsigned long flags = 0;
0941     int num_sgs = 0;
0942 
0943     num_sgs = scsi_sg_count(sc_cmd);
0944     if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
0945         QEDF_ERR(&qedf->dbg_ctx,
0946              "Number of SG elements %d exceeds what hardware limitation of %d.\n",
0947              num_sgs, QEDF_MAX_BDS_PER_CMD);
0948         sc_cmd->result = DID_ERROR;
0949         scsi_done(sc_cmd);
0950         return 0;
0951     }
0952 
0953     if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
0954         test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
0955         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
0956               "Returning DNC as unloading or stop io, flags 0x%lx.\n",
0957               qedf->flags);
0958         sc_cmd->result = DID_NO_CONNECT << 16;
0959         scsi_done(sc_cmd);
0960         return 0;
0961     }
0962 
0963     if (!qedf->pdev->msix_enabled) {
0964         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
0965             "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
0966             sc_cmd);
0967         sc_cmd->result = DID_NO_CONNECT << 16;
0968         scsi_done(sc_cmd);
0969         return 0;
0970     }
0971 
0972     rval = fc_remote_port_chkready(rport);
0973     if (rval) {
0974         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
0975               "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
0976               rval, rport->port_id);
0977         sc_cmd->result = rval;
0978         scsi_done(sc_cmd);
0979         return 0;
0980     }
0981 
0982     /* Retry command if we are doing a qed drain operation */
0983     if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
0984         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
0985         rc = SCSI_MLQUEUE_HOST_BUSY;
0986         goto exit_qcmd;
0987     }
0988 
0989     if (lport->state != LPORT_ST_READY ||
0990         atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
0991         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
0992         rc = SCSI_MLQUEUE_HOST_BUSY;
0993         goto exit_qcmd;
0994     }
0995 
0996     /* rport and tgt are allocated together, so tgt should be non-NULL */
0997     fcport = (struct qedf_rport *)&rp[1];
0998 
0999     if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1000         test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1001         /*
1002          * Session is not offloaded yet. Let SCSI-ml retry
1003          * the command.
1004          */
1005         rc = SCSI_MLQUEUE_TARGET_BUSY;
1006         goto exit_qcmd;
1007     }
1008 
1009     atomic_inc(&fcport->ios_to_queue);
1010 
1011     if (fcport->retry_delay_timestamp) {
1012         /* Take fcport->rport_lock for resetting the delay_timestamp */
1013         spin_lock_irqsave(&fcport->rport_lock, flags);
1014         if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1015             fcport->retry_delay_timestamp = 0;
1016         } else {
1017             spin_unlock_irqrestore(&fcport->rport_lock, flags);
1018             /* If retry_delay timer is active, flow off the ML */
1019             rc = SCSI_MLQUEUE_TARGET_BUSY;
1020             atomic_dec(&fcport->ios_to_queue);
1021             goto exit_qcmd;
1022         }
1023         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1024     }
1025 
1026     io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1027     if (!io_req) {
1028         rc = SCSI_MLQUEUE_HOST_BUSY;
1029         atomic_dec(&fcport->ios_to_queue);
1030         goto exit_qcmd;
1031     }
1032 
1033     io_req->sc_cmd = sc_cmd;
1034 
1035     /* Take fcport->rport_lock for posting to fcport send queue */
1036     spin_lock_irqsave(&fcport->rport_lock, flags);
1037     if (qedf_post_io_req(fcport, io_req)) {
1038         QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1039         /* Return SQE to pool */
1040         atomic_inc(&fcport->free_sqes);
1041         rc = SCSI_MLQUEUE_HOST_BUSY;
1042     }
1043     spin_unlock_irqrestore(&fcport->rport_lock, flags);
1044     atomic_dec(&fcport->ios_to_queue);
1045 
1046 exit_qcmd:
1047     return rc;
1048 }
1049 
1050 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1051                  struct fcoe_cqe_rsp_info *fcp_rsp)
1052 {
1053     struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1054     struct qedf_ctx *qedf = io_req->fcport->qedf;
1055     u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1056     int fcp_sns_len = 0;
1057     int fcp_rsp_len = 0;
1058     uint8_t *rsp_info, *sense_data;
1059 
1060     io_req->fcp_status = FC_GOOD;
1061     io_req->fcp_resid = 0;
1062     if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1063         FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1064         io_req->fcp_resid = fcp_rsp->fcp_resid;
1065 
1066     io_req->scsi_comp_flags = rsp_flags;
1067     io_req->cdb_status = fcp_rsp->scsi_status_code;
1068 
1069     if (rsp_flags &
1070         FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1071         fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1072 
1073     if (rsp_flags &
1074         FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1075         fcp_sns_len = fcp_rsp->fcp_sns_len;
1076 
1077     io_req->fcp_rsp_len = fcp_rsp_len;
1078     io_req->fcp_sns_len = fcp_sns_len;
1079     rsp_info = sense_data = io_req->sense_buffer;
1080 
1081     /* fetch fcp_rsp_code */
1082     if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1083         /* Only for task management function */
1084         io_req->fcp_rsp_code = rsp_info[3];
1085         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1086             "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1087         /* Adjust sense-data location. */
1088         sense_data += fcp_rsp_len;
1089     }
1090 
1091     if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1092         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1093             "Truncating sense buffer\n");
1094         fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1095     }
1096 
1097     /* The sense buffer can be NULL for TMF commands */
1098     if (sc_cmd->sense_buffer) {
1099         memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1100         if (fcp_sns_len)
1101             memcpy(sc_cmd->sense_buffer, sense_data,
1102                 fcp_sns_len);
1103     }
1104 }
1105 
1106 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1107 {
1108     struct scsi_cmnd *sc = io_req->sc_cmd;
1109 
1110     if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1111         dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1112             scsi_sg_count(sc), sc->sc_data_direction);
1113         io_req->bd_tbl->bd_valid = 0;
1114     }
1115 }
1116 
1117 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1118     struct qedf_ioreq *io_req)
1119 {
1120     struct scsi_cmnd *sc_cmd;
1121     struct fcoe_cqe_rsp_info *fcp_rsp;
1122     struct qedf_rport *fcport;
1123     int refcount;
1124     u16 scope, qualifier = 0;
1125     u8 fw_residual_flag = 0;
1126     unsigned long flags = 0;
1127     u16 chk_scope = 0;
1128 
1129     if (!io_req)
1130         return;
1131     if (!cqe)
1132         return;
1133 
1134     if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1135         test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1136         test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1137         QEDF_ERR(&qedf->dbg_ctx,
1138              "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1139              io_req->xid);
1140         return;
1141     }
1142 
1143     sc_cmd = io_req->sc_cmd;
1144     fcp_rsp = &cqe->cqe_info.rsp_info;
1145 
1146     if (!sc_cmd) {
1147         QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1148         return;
1149     }
1150 
1151     if (!qedf_priv(sc_cmd)->io_req) {
1152         QEDF_WARN(&(qedf->dbg_ctx),
1153               "io_req is NULL, returned in another context.\n");
1154         return;
1155     }
1156 
1157     if (!sc_cmd->device) {
1158         QEDF_ERR(&qedf->dbg_ctx,
1159              "Device for sc_cmd %p is NULL.\n", sc_cmd);
1160         return;
1161     }
1162 
1163     if (!scsi_cmd_to_rq(sc_cmd)->q) {
1164         QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1165            "is not valid, sc_cmd=%p.\n", sc_cmd);
1166         return;
1167     }
1168 
1169     fcport = io_req->fcport;
1170 
1171     /*
1172      * When flush is active, let the cmds be completed from the cleanup
1173      * context
1174      */
1175     if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1176         (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1177          sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1178         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1179               "Dropping good completion xid=0x%x as fcport is flushing",
1180               io_req->xid);
1181         return;
1182     }
1183 
1184     qedf_parse_fcp_rsp(io_req, fcp_rsp);
1185 
1186     qedf_unmap_sg_list(qedf, io_req);
1187 
1188     /* Check for FCP transport error */
1189     if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1190         QEDF_ERR(&(qedf->dbg_ctx),
1191             "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1192             "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1193             io_req->fcp_rsp_code);
1194         sc_cmd->result = DID_BUS_BUSY << 16;
1195         goto out;
1196     }
1197 
1198     fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1199         FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1200     if (fw_residual_flag) {
1201         QEDF_ERR(&qedf->dbg_ctx,
1202              "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1203              io_req->xid, fcp_rsp->rsp_flags.flags,
1204              io_req->fcp_resid,
1205              cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1206              sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1207 
1208         if (io_req->cdb_status == 0)
1209             sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1210         else
1211             sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1212 
1213         /*
1214          * Set resid to the whole buffer length so we won't try to resue
1215          * any previously data.
1216          */
1217         scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1218         goto out;
1219     }
1220 
1221     switch (io_req->fcp_status) {
1222     case FC_GOOD:
1223         if (io_req->cdb_status == 0) {
1224             /* Good I/O completion */
1225             sc_cmd->result = DID_OK << 16;
1226         } else {
1227             refcount = kref_read(&io_req->refcount);
1228             QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1229                 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1230                 "lba=%02x%02x%02x%02x cdb_status=%d "
1231                 "fcp_resid=0x%x refcount=%d.\n",
1232                 qedf->lport->host->host_no, sc_cmd->device->id,
1233                 sc_cmd->device->lun, io_req->xid,
1234                 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1235                 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1236                 io_req->cdb_status, io_req->fcp_resid,
1237                 refcount);
1238             sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1239 
1240             if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1241                 io_req->cdb_status == SAM_STAT_BUSY) {
1242                 /*
1243                  * Check whether we need to set retry_delay at
1244                  * all based on retry_delay module parameter
1245                  * and the status qualifier.
1246                  */
1247 
1248                 /* Upper 2 bits */
1249                 scope = fcp_rsp->retry_delay_timer & 0xC000;
1250                 /* Lower 14 bits */
1251                 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1252 
1253                 if (qedf_retry_delay)
1254                     chk_scope = 1;
1255                 /* Record stats */
1256                 if (io_req->cdb_status ==
1257                     SAM_STAT_TASK_SET_FULL)
1258                     qedf->task_set_fulls++;
1259                 else
1260                     qedf->busy++;
1261             }
1262         }
1263         if (io_req->fcp_resid)
1264             scsi_set_resid(sc_cmd, io_req->fcp_resid);
1265 
1266         if (chk_scope == 1) {
1267             if ((scope == 1 || scope == 2) &&
1268                 (qualifier > 0 && qualifier <= 0x3FEF)) {
1269                 /* Check we don't go over the max */
1270                 if (qualifier > QEDF_RETRY_DELAY_MAX) {
1271                     qualifier = QEDF_RETRY_DELAY_MAX;
1272                     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1273                           "qualifier = %d\n",
1274                           (fcp_rsp->retry_delay_timer &
1275                           0x3FFF));
1276                 }
1277                 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1278                       "Scope = %d and qualifier = %d",
1279                       scope, qualifier);
1280                 /*  Take fcport->rport_lock to
1281                  *  update the retry_delay_timestamp
1282                  */
1283                 spin_lock_irqsave(&fcport->rport_lock, flags);
1284                 fcport->retry_delay_timestamp =
1285                     jiffies + (qualifier * HZ / 10);
1286                 spin_unlock_irqrestore(&fcport->rport_lock,
1287                                flags);
1288 
1289             } else {
1290                 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1291                       "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1292                       scope, qualifier);
1293             }
1294         }
1295         break;
1296     default:
1297         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1298                io_req->fcp_status);
1299         break;
1300     }
1301 
1302 out:
1303     if (qedf_io_tracing)
1304         qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1305 
1306     /*
1307      * We wait till the end of the function to clear the
1308      * outstanding bit in case we need to send an abort
1309      */
1310     clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1311 
1312     io_req->sc_cmd = NULL;
1313     qedf_priv(sc_cmd)->io_req =  NULL;
1314     scsi_done(sc_cmd);
1315     kref_put(&io_req->refcount, qedf_release_cmd);
1316 }
1317 
1318 /* Return a SCSI command in some other context besides a normal completion */
1319 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1320     int result)
1321 {
1322     struct scsi_cmnd *sc_cmd;
1323     int refcount;
1324 
1325     if (!io_req) {
1326         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1327         return;
1328     }
1329 
1330     if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1331         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1332               "io_req:%p scsi_done handling already done\n",
1333               io_req);
1334         return;
1335     }
1336 
1337     /*
1338      * We will be done with this command after this call so clear the
1339      * outstanding bit.
1340      */
1341     clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1342 
1343     sc_cmd = io_req->sc_cmd;
1344 
1345     if (!sc_cmd) {
1346         QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1347         return;
1348     }
1349 
1350     if (!virt_addr_valid(sc_cmd)) {
1351         QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1352         goto bad_scsi_ptr;
1353     }
1354 
1355     if (!qedf_priv(sc_cmd)->io_req) {
1356         QEDF_WARN(&(qedf->dbg_ctx),
1357               "io_req is NULL, returned in another context.\n");
1358         return;
1359     }
1360 
1361     if (!sc_cmd->device) {
1362         QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1363              sc_cmd);
1364         goto bad_scsi_ptr;
1365     }
1366 
1367     if (!virt_addr_valid(sc_cmd->device)) {
1368         QEDF_ERR(&qedf->dbg_ctx,
1369              "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1370         goto bad_scsi_ptr;
1371     }
1372 
1373     if (!sc_cmd->sense_buffer) {
1374         QEDF_ERR(&qedf->dbg_ctx,
1375              "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1376              sc_cmd);
1377         goto bad_scsi_ptr;
1378     }
1379 
1380     if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1381         QEDF_ERR(&qedf->dbg_ctx,
1382              "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1383              sc_cmd);
1384         goto bad_scsi_ptr;
1385     }
1386 
1387     qedf_unmap_sg_list(qedf, io_req);
1388 
1389     sc_cmd->result = result << 16;
1390     refcount = kref_read(&io_req->refcount);
1391     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1392         "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1393         "allowed=%d retries=%d refcount=%d.\n",
1394         qedf->lport->host->host_no, sc_cmd->device->id,
1395         sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1396         sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1397         sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1398         refcount);
1399 
1400     /*
1401      * Set resid to the whole buffer length so we won't try to resue any
1402      * previously read data
1403      */
1404     scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1405 
1406     if (qedf_io_tracing)
1407         qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1408 
1409     io_req->sc_cmd = NULL;
1410     qedf_priv(sc_cmd)->io_req = NULL;
1411     scsi_done(sc_cmd);
1412     kref_put(&io_req->refcount, qedf_release_cmd);
1413     return;
1414 
1415 bad_scsi_ptr:
1416     /*
1417      * Clear the io_req->sc_cmd backpointer so we don't try to process
1418      * this again
1419      */
1420     io_req->sc_cmd = NULL;
1421     kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 001 */
1422 }
1423 
1424 /*
1425  * Handle warning type CQE completions. This is mainly used for REC timer
1426  * popping.
1427  */
1428 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1429     struct qedf_ioreq *io_req)
1430 {
1431     int rval, i;
1432     struct qedf_rport *fcport = io_req->fcport;
1433     u64 err_warn_bit_map;
1434     u8 err_warn = 0xff;
1435 
1436     if (!cqe) {
1437         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1438               "cqe is NULL for io_req %p xid=0x%x\n",
1439               io_req, io_req->xid);
1440         return;
1441     }
1442 
1443     QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1444           "xid=0x%x\n", io_req->xid);
1445     QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1446           "err_warn_bitmap=%08x:%08x\n",
1447           le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1448           le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1449     QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1450           "rx_buff_off=%08x, rx_id=%04x\n",
1451           le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1452           le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1453           le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1454 
1455     /* Normalize the error bitmap value to an just an unsigned int */
1456     err_warn_bit_map = (u64)
1457         ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1458         (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1459     for (i = 0; i < 64; i++) {
1460         if (err_warn_bit_map & (u64)((u64)1 << i)) {
1461             err_warn = i;
1462             break;
1463         }
1464     }
1465 
1466     /* Check if REC TOV expired if this is a tape device */
1467     if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1468         if (err_warn ==
1469             FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1470             QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1471             if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1472                 io_req->rx_buf_off =
1473                     cqe->cqe_info.err_info.rx_buf_off;
1474                 io_req->tx_buf_off =
1475                     cqe->cqe_info.err_info.tx_buf_off;
1476                 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1477                 rval = qedf_send_rec(io_req);
1478                 /*
1479                  * We only want to abort the io_req if we
1480                  * can't queue the REC command as we want to
1481                  * keep the exchange open for recovery.
1482                  */
1483                 if (rval)
1484                     goto send_abort;
1485             }
1486             return;
1487         }
1488     }
1489 
1490 send_abort:
1491     init_completion(&io_req->abts_done);
1492     rval = qedf_initiate_abts(io_req, true);
1493     if (rval)
1494         QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1495 }
1496 
1497 /* Cleanup a command when we receive an error detection completion */
1498 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1499     struct qedf_ioreq *io_req)
1500 {
1501     int rval;
1502 
1503     if (io_req == NULL) {
1504         QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1505         return;
1506     }
1507 
1508     if (io_req->fcport == NULL) {
1509         QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1510         return;
1511     }
1512 
1513     if (!cqe) {
1514         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1515             "cqe is NULL for io_req %p\n", io_req);
1516         return;
1517     }
1518 
1519     QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1520           "xid=0x%x\n", io_req->xid);
1521     QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1522           "err_warn_bitmap=%08x:%08x\n",
1523           le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1524           le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1525     QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1526           "rx_buff_off=%08x, rx_id=%04x\n",
1527           le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1528           le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1529           le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1530 
1531     /* When flush is active, let the cmds be flushed out from the cleanup context */
1532     if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1533         (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1534          io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1535         QEDF_ERR(&qedf->dbg_ctx,
1536             "Dropping EQE for xid=0x%x as fcport is flushing",
1537             io_req->xid);
1538         return;
1539     }
1540 
1541     if (qedf->stop_io_on_error) {
1542         qedf_stop_all_io(qedf);
1543         return;
1544     }
1545 
1546     init_completion(&io_req->abts_done);
1547     rval = qedf_initiate_abts(io_req, true);
1548     if (rval)
1549         QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1550 }
1551 
1552 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1553     struct qedf_ioreq *els_req)
1554 {
1555     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1556         "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1557         kref_read(&els_req->refcount));
1558 
1559     /*
1560      * Need to distinguish this from a timeout when calling the
1561      * els_req->cb_func.
1562      */
1563     els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1564 
1565     clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
1566 
1567     /* Cancel the timer */
1568     cancel_delayed_work_sync(&els_req->timeout_work);
1569 
1570     /* Call callback function to complete command */
1571     if (els_req->cb_func && els_req->cb_arg) {
1572         els_req->cb_func(els_req->cb_arg);
1573         els_req->cb_arg = NULL;
1574     }
1575 
1576     /* Release kref for original initiate_els */
1577     kref_put(&els_req->refcount, qedf_release_cmd);
1578 }
1579 
1580 /* A value of -1 for lun is a wild card that means flush all
1581  * active SCSI I/Os for the target.
1582  */
1583 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1584 {
1585     struct qedf_ioreq *io_req;
1586     struct qedf_ctx *qedf;
1587     struct qedf_cmd_mgr *cmd_mgr;
1588     int i, rc;
1589     unsigned long flags;
1590     int flush_cnt = 0;
1591     int wait_cnt = 100;
1592     int refcount = 0;
1593 
1594     if (!fcport) {
1595         QEDF_ERR(NULL, "fcport is NULL\n");
1596         return;
1597     }
1598 
1599     /* Check that fcport is still offloaded */
1600     if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1601         QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1602         return;
1603     }
1604 
1605     qedf = fcport->qedf;
1606 
1607     if (!qedf) {
1608         QEDF_ERR(NULL, "qedf is NULL.\n");
1609         return;
1610     }
1611 
1612     /* Only wait for all commands to be queued in the Upload context */
1613     if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1614         (lun == -1)) {
1615         while (atomic_read(&fcport->ios_to_queue)) {
1616             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1617                   "Waiting for %d I/Os to be queued\n",
1618                   atomic_read(&fcport->ios_to_queue));
1619             if (wait_cnt == 0) {
1620                 QEDF_ERR(NULL,
1621                      "%d IOs request could not be queued\n",
1622                      atomic_read(&fcport->ios_to_queue));
1623             }
1624             msleep(20);
1625             wait_cnt--;
1626         }
1627     }
1628 
1629     cmd_mgr = qedf->cmd_mgr;
1630 
1631     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1632           "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1633           atomic_read(&fcport->num_active_ios), fcport,
1634           fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1635     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1636 
1637     mutex_lock(&qedf->flush_mutex);
1638     if (lun == -1) {
1639         set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1640     } else {
1641         set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1642         fcport->lun_reset_lun = lun;
1643     }
1644 
1645     for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1646         io_req = &cmd_mgr->cmds[i];
1647 
1648         if (!io_req)
1649             continue;
1650         if (!io_req->fcport)
1651             continue;
1652 
1653         spin_lock_irqsave(&cmd_mgr->lock, flags);
1654 
1655         if (io_req->alloc) {
1656             if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1657                 if (io_req->cmd_type == QEDF_SCSI_CMD)
1658                     QEDF_ERR(&qedf->dbg_ctx,
1659                          "Allocated but not queued, xid=0x%x\n",
1660                          io_req->xid);
1661             }
1662             spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1663         } else {
1664             spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1665             continue;
1666         }
1667 
1668         if (io_req->fcport != fcport)
1669             continue;
1670 
1671         /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1672          * but RRQ is still pending.
1673          * Workaround: Within qedf_send_rrq, we check if the fcport is
1674          * NULL, and we drop the ref on the io_req to clean it up.
1675          */
1676         if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1677             refcount = kref_read(&io_req->refcount);
1678             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1679                   "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1680                   io_req->xid, io_req->cmd_type, refcount);
1681             /* If RRQ work has been queue, try to cancel it and
1682              * free the io_req
1683              */
1684             if (atomic_read(&io_req->state) ==
1685                 QEDFC_CMD_ST_RRQ_WAIT) {
1686                 if (cancel_delayed_work_sync
1687                     (&io_req->rrq_work)) {
1688                     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1689                           "Putting reference for pending RRQ work xid=0x%x.\n",
1690                           io_req->xid);
1691                     /* ID: 003 */
1692                     kref_put(&io_req->refcount,
1693                          qedf_release_cmd);
1694                 }
1695             }
1696             continue;
1697         }
1698 
1699         /* Only consider flushing ELS during target reset */
1700         if (io_req->cmd_type == QEDF_ELS &&
1701             lun == -1) {
1702             rc = kref_get_unless_zero(&io_req->refcount);
1703             if (!rc) {
1704                 QEDF_ERR(&(qedf->dbg_ctx),
1705                     "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1706                     io_req, io_req->xid);
1707                 continue;
1708             }
1709             qedf_initiate_cleanup(io_req, false);
1710             flush_cnt++;
1711             qedf_flush_els_req(qedf, io_req);
1712 
1713             /*
1714              * Release the kref and go back to the top of the
1715              * loop.
1716              */
1717             goto free_cmd;
1718         }
1719 
1720         if (io_req->cmd_type == QEDF_ABTS) {
1721             /* ID: 004 */
1722             rc = kref_get_unless_zero(&io_req->refcount);
1723             if (!rc) {
1724                 QEDF_ERR(&(qedf->dbg_ctx),
1725                     "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1726                     io_req, io_req->xid);
1727                 continue;
1728             }
1729             if (lun != -1 && io_req->lun != lun)
1730                 goto free_cmd;
1731 
1732             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1733                 "Flushing abort xid=0x%x.\n", io_req->xid);
1734 
1735             if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1736                 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1737                       "Putting ref for cancelled RRQ work xid=0x%x.\n",
1738                       io_req->xid);
1739                 kref_put(&io_req->refcount, qedf_release_cmd);
1740             }
1741 
1742             if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1743                 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1744                       "Putting ref for cancelled tmo work xid=0x%x.\n",
1745                       io_req->xid);
1746                 qedf_initiate_cleanup(io_req, true);
1747                 /* Notify eh_abort handler that ABTS is
1748                  * complete
1749                  */
1750                 complete(&io_req->abts_done);
1751                 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1752                 /* ID: 002 */
1753                 kref_put(&io_req->refcount, qedf_release_cmd);
1754             }
1755             flush_cnt++;
1756             goto free_cmd;
1757         }
1758 
1759         if (!io_req->sc_cmd)
1760             continue;
1761         if (!io_req->sc_cmd->device) {
1762             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1763                   "Device backpointer NULL for sc_cmd=%p.\n",
1764                   io_req->sc_cmd);
1765             /* Put reference for non-existent scsi_cmnd */
1766             io_req->sc_cmd = NULL;
1767             qedf_initiate_cleanup(io_req, false);
1768             kref_put(&io_req->refcount, qedf_release_cmd);
1769             continue;
1770         }
1771         if (lun > -1) {
1772             if (io_req->lun != lun)
1773                 continue;
1774         }
1775 
1776         /*
1777          * Use kref_get_unless_zero in the unlikely case the command
1778          * we're about to flush was completed in the normal SCSI path
1779          */
1780         rc = kref_get_unless_zero(&io_req->refcount);
1781         if (!rc) {
1782             QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1783                 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1784             continue;
1785         }
1786 
1787         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1788             "Cleanup xid=0x%x.\n", io_req->xid);
1789         flush_cnt++;
1790 
1791         /* Cleanup task and return I/O mid-layer */
1792         qedf_initiate_cleanup(io_req, true);
1793 
1794 free_cmd:
1795         kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 004 */
1796     }
1797 
1798     wait_cnt = 60;
1799     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1800           "Flushed 0x%x I/Os, active=0x%x.\n",
1801           flush_cnt, atomic_read(&fcport->num_active_ios));
1802     /* Only wait for all commands to complete in the Upload context */
1803     if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1804         (lun == -1)) {
1805         while (atomic_read(&fcport->num_active_ios)) {
1806             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1807                   "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1808                   flush_cnt,
1809                   atomic_read(&fcport->num_active_ios),
1810                   wait_cnt);
1811             if (wait_cnt == 0) {
1812                 QEDF_ERR(&qedf->dbg_ctx,
1813                      "Flushed %d I/Os, active=%d.\n",
1814                      flush_cnt,
1815                      atomic_read(&fcport->num_active_ios));
1816                 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1817                     io_req = &cmd_mgr->cmds[i];
1818                     if (io_req->fcport &&
1819                         io_req->fcport == fcport) {
1820                         refcount =
1821                         kref_read(&io_req->refcount);
1822                         set_bit(QEDF_CMD_DIRTY,
1823                             &io_req->flags);
1824                         QEDF_ERR(&qedf->dbg_ctx,
1825                              "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1826                              io_req, io_req->xid,
1827                              io_req->flags,
1828                              io_req->sc_cmd,
1829                              refcount,
1830                              io_req->cmd_type);
1831                     }
1832                 }
1833                 WARN_ON(1);
1834                 break;
1835             }
1836             msleep(500);
1837             wait_cnt--;
1838         }
1839     }
1840 
1841     clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1842     clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1843     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1844     mutex_unlock(&qedf->flush_mutex);
1845 }
1846 
1847 /*
1848  * Initiate a ABTS middle path command. Note that we don't have to initialize
1849  * the task context for an ABTS task.
1850  */
1851 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1852 {
1853     struct fc_lport *lport;
1854     struct qedf_rport *fcport = io_req->fcport;
1855     struct fc_rport_priv *rdata;
1856     struct qedf_ctx *qedf;
1857     u16 xid;
1858     int rc = 0;
1859     unsigned long flags;
1860     struct fcoe_wqe *sqe;
1861     u16 sqe_idx;
1862     int refcount = 0;
1863 
1864     /* Sanity check qedf_rport before dereferencing any pointers */
1865     if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1866         QEDF_ERR(NULL, "tgt not offloaded\n");
1867         rc = 1;
1868         goto out;
1869     }
1870 
1871     qedf = fcport->qedf;
1872     rdata = fcport->rdata;
1873 
1874     if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1875         QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1876         rc = 1;
1877         goto out;
1878     }
1879 
1880     lport = qedf->lport;
1881 
1882     if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1883         QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1884         rc = 1;
1885         goto drop_rdata_kref;
1886     }
1887 
1888     if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1889         QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1890         rc = 1;
1891         goto drop_rdata_kref;
1892     }
1893 
1894     /* Ensure room on SQ */
1895     if (!atomic_read(&fcport->free_sqes)) {
1896         QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1897         rc = 1;
1898         goto drop_rdata_kref;
1899     }
1900 
1901     if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1902         QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1903         rc = 1;
1904         goto drop_rdata_kref;
1905     }
1906 
1907     if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1908         test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1909         test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1910         QEDF_ERR(&qedf->dbg_ctx,
1911              "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1912              io_req->xid, io_req->sc_cmd);
1913         rc = 1;
1914         goto drop_rdata_kref;
1915     }
1916 
1917     kref_get(&io_req->refcount);
1918 
1919     xid = io_req->xid;
1920     qedf->control_requests++;
1921     qedf->packet_aborts++;
1922 
1923     /* Set the command type to abort */
1924     io_req->cmd_type = QEDF_ABTS;
1925     io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1926 
1927     set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1928     refcount = kref_read(&io_req->refcount);
1929     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1930           "ABTS io_req xid = 0x%x refcount=%d\n",
1931           xid, refcount);
1932 
1933     qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1934 
1935     spin_lock_irqsave(&fcport->rport_lock, flags);
1936 
1937     sqe_idx = qedf_get_sqe_idx(fcport);
1938     sqe = &fcport->sq[sqe_idx];
1939     memset(sqe, 0, sizeof(struct fcoe_wqe));
1940     io_req->task_params->sqe = sqe;
1941 
1942     init_initiator_abort_fcoe_task(io_req->task_params);
1943     qedf_ring_doorbell(fcport);
1944 
1945     spin_unlock_irqrestore(&fcport->rport_lock, flags);
1946 
1947 drop_rdata_kref:
1948     kref_put(&rdata->kref, fc_rport_destroy);
1949 out:
1950     return rc;
1951 }
1952 
1953 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1954     struct qedf_ioreq *io_req)
1955 {
1956     uint32_t r_ctl;
1957     int rc;
1958     struct qedf_rport *fcport = io_req->fcport;
1959 
1960     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1961            "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1962 
1963     r_ctl = cqe->cqe_info.abts_info.r_ctl;
1964 
1965     /* This was added at a point when we were scheduling abts_compl &
1966      * cleanup_compl on different CPUs and there was a possibility of
1967      * the io_req to be freed from the other context before we got here.
1968      */
1969     if (!fcport) {
1970         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1971               "Dropping ABTS completion xid=0x%x as fcport is NULL",
1972               io_req->xid);
1973         return;
1974     }
1975 
1976     /*
1977      * When flush is active, let the cmds be completed from the cleanup
1978      * context
1979      */
1980     if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1981         test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1982         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1983               "Dropping ABTS completion xid=0x%x as fcport is flushing",
1984               io_req->xid);
1985         return;
1986     }
1987 
1988     if (!cancel_delayed_work(&io_req->timeout_work)) {
1989         QEDF_ERR(&qedf->dbg_ctx,
1990              "Wasn't able to cancel abts timeout work.\n");
1991     }
1992 
1993     switch (r_ctl) {
1994     case FC_RCTL_BA_ACC:
1995         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1996             "ABTS response - ACC Send RRQ after R_A_TOV\n");
1997         io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1998         rc = kref_get_unless_zero(&io_req->refcount);   /* ID: 003 */
1999         if (!rc) {
2000             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2001                   "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
2002                   io_req->xid);
2003             return;
2004         }
2005         /*
2006          * Dont release this cmd yet. It will be relesed
2007          * after we get RRQ response
2008          */
2009         queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2010             msecs_to_jiffies(qedf->lport->r_a_tov));
2011         atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2012         break;
2013     /* For error cases let the cleanup return the command */
2014     case FC_RCTL_BA_RJT:
2015         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2016            "ABTS response - RJT\n");
2017         io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2018         break;
2019     default:
2020         QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
2021         break;
2022     }
2023 
2024     clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2025 
2026     if (io_req->sc_cmd) {
2027         if (!io_req->return_scsi_cmd_on_abts)
2028             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2029                   "Not call scsi_done for xid=0x%x.\n",
2030                   io_req->xid);
2031         if (io_req->return_scsi_cmd_on_abts)
2032             qedf_scsi_done(qedf, io_req, DID_ERROR);
2033     }
2034 
2035     /* Notify eh_abort handler that ABTS is complete */
2036     complete(&io_req->abts_done);
2037 
2038     kref_put(&io_req->refcount, qedf_release_cmd);
2039 }
2040 
2041 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2042 {
2043     struct qedf_mp_req *mp_req;
2044     struct scsi_sge *mp_req_bd;
2045     struct scsi_sge *mp_resp_bd;
2046     struct qedf_ctx *qedf = io_req->fcport->qedf;
2047     dma_addr_t addr;
2048     uint64_t sz;
2049 
2050     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2051 
2052     mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2053     memset(mp_req, 0, sizeof(struct qedf_mp_req));
2054 
2055     if (io_req->cmd_type != QEDF_ELS) {
2056         mp_req->req_len = sizeof(struct fcp_cmnd);
2057         io_req->data_xfer_len = mp_req->req_len;
2058     } else
2059         mp_req->req_len = io_req->data_xfer_len;
2060 
2061     mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2062         &mp_req->req_buf_dma, GFP_KERNEL);
2063     if (!mp_req->req_buf) {
2064         QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2065         qedf_free_mp_resc(io_req);
2066         return -ENOMEM;
2067     }
2068 
2069     mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2070         QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2071     if (!mp_req->resp_buf) {
2072         QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2073               "buffer\n");
2074         qedf_free_mp_resc(io_req);
2075         return -ENOMEM;
2076     }
2077 
2078     /* Allocate and map mp_req_bd and mp_resp_bd */
2079     sz = sizeof(struct scsi_sge);
2080     mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2081         &mp_req->mp_req_bd_dma, GFP_KERNEL);
2082     if (!mp_req->mp_req_bd) {
2083         QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2084         qedf_free_mp_resc(io_req);
2085         return -ENOMEM;
2086     }
2087 
2088     mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2089         &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2090     if (!mp_req->mp_resp_bd) {
2091         QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2092         qedf_free_mp_resc(io_req);
2093         return -ENOMEM;
2094     }
2095 
2096     /* Fill bd table */
2097     addr = mp_req->req_buf_dma;
2098     mp_req_bd = mp_req->mp_req_bd;
2099     mp_req_bd->sge_addr.lo = U64_LO(addr);
2100     mp_req_bd->sge_addr.hi = U64_HI(addr);
2101     mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2102 
2103     /*
2104      * MP buffer is either a task mgmt command or an ELS.
2105      * So the assumption is that it consumes a single bd
2106      * entry in the bd table
2107      */
2108     mp_resp_bd = mp_req->mp_resp_bd;
2109     addr = mp_req->resp_buf_dma;
2110     mp_resp_bd->sge_addr.lo = U64_LO(addr);
2111     mp_resp_bd->sge_addr.hi = U64_HI(addr);
2112     mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2113 
2114     return 0;
2115 }
2116 
2117 /*
2118  * Last ditch effort to clear the port if it's stuck. Used only after a
2119  * cleanup task times out.
2120  */
2121 static void qedf_drain_request(struct qedf_ctx *qedf)
2122 {
2123     if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2124         QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2125         return;
2126     }
2127 
2128     /* Set bit to return all queuecommand requests as busy */
2129     set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2130 
2131     /* Call qed drain request for function. Should be synchronous */
2132     qed_ops->common->drain(qedf->cdev);
2133 
2134     /* Settle time for CQEs to be returned */
2135     msleep(100);
2136 
2137     /* Unplug and continue */
2138     clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2139 }
2140 
2141 /*
2142  * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2143  * FAILURE.
2144  */
2145 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2146     bool return_scsi_cmd_on_abts)
2147 {
2148     struct qedf_rport *fcport;
2149     struct qedf_ctx *qedf;
2150     int tmo = 0;
2151     int rc = SUCCESS;
2152     unsigned long flags;
2153     struct fcoe_wqe *sqe;
2154     u16 sqe_idx;
2155     int refcount = 0;
2156 
2157     fcport = io_req->fcport;
2158     if (!fcport) {
2159         QEDF_ERR(NULL, "fcport is NULL.\n");
2160         return SUCCESS;
2161     }
2162 
2163     /* Sanity check qedf_rport before dereferencing any pointers */
2164     if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2165         QEDF_ERR(NULL, "tgt not offloaded\n");
2166         return SUCCESS;
2167     }
2168 
2169     qedf = fcport->qedf;
2170     if (!qedf) {
2171         QEDF_ERR(NULL, "qedf is NULL.\n");
2172         return SUCCESS;
2173     }
2174 
2175     if (io_req->cmd_type == QEDF_ELS) {
2176         goto process_els;
2177     }
2178 
2179     if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2180         test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2181         QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2182               "cleanup processing or already completed.\n",
2183               io_req->xid);
2184         return SUCCESS;
2185     }
2186     set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2187 
2188 process_els:
2189     /* Ensure room on SQ */
2190     if (!atomic_read(&fcport->free_sqes)) {
2191         QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2192         /* Need to make sure we clear the flag since it was set */
2193         clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2194         return FAILED;
2195     }
2196 
2197     if (io_req->cmd_type == QEDF_CLEANUP) {
2198         QEDF_ERR(&qedf->dbg_ctx,
2199              "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2200              io_req->xid, io_req->cmd_type);
2201         clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2202         return SUCCESS;
2203     }
2204 
2205     refcount = kref_read(&io_req->refcount);
2206 
2207     QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2208           "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2209           io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2210           refcount, fcport, fcport->rdata->ids.port_id);
2211 
2212     /* Cleanup cmds re-use the same TID as the original I/O */
2213     io_req->cmd_type = QEDF_CLEANUP;
2214     io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2215 
2216     init_completion(&io_req->cleanup_done);
2217 
2218     spin_lock_irqsave(&fcport->rport_lock, flags);
2219 
2220     sqe_idx = qedf_get_sqe_idx(fcport);
2221     sqe = &fcport->sq[sqe_idx];
2222     memset(sqe, 0, sizeof(struct fcoe_wqe));
2223     io_req->task_params->sqe = sqe;
2224 
2225     init_initiator_cleanup_fcoe_task(io_req->task_params);
2226     qedf_ring_doorbell(fcport);
2227 
2228     spin_unlock_irqrestore(&fcport->rport_lock, flags);
2229 
2230     tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2231                       QEDF_CLEANUP_TIMEOUT * HZ);
2232 
2233     if (!tmo) {
2234         rc = FAILED;
2235         /* Timeout case */
2236         QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2237               "xid=%x.\n", io_req->xid);
2238         clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2239         /* Issue a drain request if cleanup task times out */
2240         QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2241         qedf_drain_request(qedf);
2242     }
2243 
2244     /* If it TASK MGMT handle it, reference will be decreased
2245      * in qedf_execute_tmf
2246      */
2247     if (io_req->tm_flags  == FCP_TMF_LUN_RESET ||
2248         io_req->tm_flags == FCP_TMF_TGT_RESET) {
2249         clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2250         io_req->sc_cmd = NULL;
2251         kref_put(&io_req->refcount, qedf_release_cmd);
2252         complete(&io_req->tm_done);
2253     }
2254 
2255     if (io_req->sc_cmd) {
2256         if (!io_req->return_scsi_cmd_on_abts)
2257             QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2258                   "Not call scsi_done for xid=0x%x.\n",
2259                   io_req->xid);
2260         if (io_req->return_scsi_cmd_on_abts)
2261             qedf_scsi_done(qedf, io_req, DID_ERROR);
2262     }
2263 
2264     if (rc == SUCCESS)
2265         io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2266     else
2267         io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2268 
2269     return rc;
2270 }
2271 
2272 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2273     struct qedf_ioreq *io_req)
2274 {
2275     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2276            io_req->xid);
2277 
2278     clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2279 
2280     /* Complete so we can finish cleaning up the I/O */
2281     complete(&io_req->cleanup_done);
2282 }
2283 
2284 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2285     uint8_t tm_flags)
2286 {
2287     struct qedf_ioreq *io_req;
2288     struct fcoe_task_context *task;
2289     struct qedf_ctx *qedf = fcport->qedf;
2290     struct fc_lport *lport = qedf->lport;
2291     int rc = 0;
2292     uint16_t xid;
2293     int tmo = 0;
2294     int lun = 0;
2295     unsigned long flags;
2296     struct fcoe_wqe *sqe;
2297     u16 sqe_idx;
2298 
2299     if (!sc_cmd) {
2300         QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2301         return FAILED;
2302     }
2303 
2304     lun = (int)sc_cmd->device->lun;
2305     if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2306         QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2307         rc = FAILED;
2308         goto no_flush;
2309     }
2310 
2311     io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2312     if (!io_req) {
2313         QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2314         rc = -EAGAIN;
2315         goto no_flush;
2316     }
2317 
2318     if (tm_flags == FCP_TMF_LUN_RESET)
2319         qedf->lun_resets++;
2320     else if (tm_flags == FCP_TMF_TGT_RESET)
2321         qedf->target_resets++;
2322 
2323     /* Initialize rest of io_req fields */
2324     io_req->sc_cmd = sc_cmd;
2325     io_req->fcport = fcport;
2326     io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2327 
2328     /* Record which cpu this request is associated with */
2329     io_req->cpu = smp_processor_id();
2330 
2331     /* Set TM flags */
2332     io_req->io_req_flags = QEDF_READ;
2333     io_req->data_xfer_len = 0;
2334     io_req->tm_flags = tm_flags;
2335 
2336     /* Default is to return a SCSI command when an error occurs */
2337     io_req->return_scsi_cmd_on_abts = false;
2338 
2339     /* Obtain exchange id */
2340     xid = io_req->xid;
2341 
2342     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2343            "0x%x\n", xid);
2344 
2345     /* Initialize task context for this IO request */
2346     task = qedf_get_task_mem(&qedf->tasks, xid);
2347 
2348     init_completion(&io_req->tm_done);
2349 
2350     spin_lock_irqsave(&fcport->rport_lock, flags);
2351 
2352     sqe_idx = qedf_get_sqe_idx(fcport);
2353     sqe = &fcport->sq[sqe_idx];
2354     memset(sqe, 0, sizeof(struct fcoe_wqe));
2355 
2356     qedf_init_task(fcport, lport, io_req, task, sqe);
2357     qedf_ring_doorbell(fcport);
2358 
2359     spin_unlock_irqrestore(&fcport->rport_lock, flags);
2360 
2361     set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2362     tmo = wait_for_completion_timeout(&io_req->tm_done,
2363         QEDF_TM_TIMEOUT * HZ);
2364 
2365     if (!tmo) {
2366         rc = FAILED;
2367         QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2368         /* Clear outstanding bit since command timed out */
2369         clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2370         io_req->sc_cmd = NULL;
2371     } else {
2372         /* Check TMF response code */
2373         if (io_req->fcp_rsp_code == 0)
2374             rc = SUCCESS;
2375         else
2376             rc = FAILED;
2377     }
2378     /*
2379      * Double check that fcport has not gone into an uploading state before
2380      * executing the command flush for the LUN/target.
2381      */
2382     if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2383         QEDF_ERR(&qedf->dbg_ctx,
2384              "fcport is uploading, not executing flush.\n");
2385         goto no_flush;
2386     }
2387     /* We do not need this io_req any more */
2388     kref_put(&io_req->refcount, qedf_release_cmd);
2389 
2390 
2391     if (tm_flags == FCP_TMF_LUN_RESET)
2392         qedf_flush_active_ios(fcport, lun);
2393     else
2394         qedf_flush_active_ios(fcport, -1);
2395 
2396 no_flush:
2397     if (rc != SUCCESS) {
2398         QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2399         rc = FAILED;
2400     } else {
2401         QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2402         rc = SUCCESS;
2403     }
2404     return rc;
2405 }
2406 
2407 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2408 {
2409     struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2410     struct fc_rport_libfc_priv *rp = rport->dd_data;
2411     struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2412     struct qedf_ctx *qedf;
2413     struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2414     int rc = SUCCESS;
2415     int rval;
2416     struct qedf_ioreq *io_req = NULL;
2417     int ref_cnt = 0;
2418     struct fc_rport_priv *rdata = fcport->rdata;
2419 
2420     QEDF_ERR(NULL,
2421          "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2422          tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2423          rport->scsi_target_id, (int)sc_cmd->device->lun);
2424 
2425     if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2426         QEDF_ERR(NULL, "stale rport\n");
2427         return FAILED;
2428     }
2429 
2430     QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2431          (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2432          "LUN RESET");
2433 
2434     if (qedf_priv(sc_cmd)->io_req) {
2435         io_req = qedf_priv(sc_cmd)->io_req;
2436         ref_cnt = kref_read(&io_req->refcount);
2437         QEDF_ERR(NULL,
2438              "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2439              io_req, io_req->xid, ref_cnt);
2440     }
2441 
2442     rval = fc_remote_port_chkready(rport);
2443     if (rval) {
2444         QEDF_ERR(NULL, "device_reset rport not ready\n");
2445         rc = FAILED;
2446         goto tmf_err;
2447     }
2448 
2449     rc = fc_block_scsi_eh(sc_cmd);
2450     if (rc)
2451         goto tmf_err;
2452 
2453     if (!fcport) {
2454         QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2455         rc = FAILED;
2456         goto tmf_err;
2457     }
2458 
2459     qedf = fcport->qedf;
2460 
2461     if (!qedf) {
2462         QEDF_ERR(NULL, "qedf is NULL.\n");
2463         rc = FAILED;
2464         goto tmf_err;
2465     }
2466 
2467     if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2468         QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2469         rc = SUCCESS;
2470         goto tmf_err;
2471     }
2472 
2473     if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2474         test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2475         rc = SUCCESS;
2476         goto tmf_err;
2477     }
2478 
2479     if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2480         QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2481         rc = FAILED;
2482         goto tmf_err;
2483     }
2484 
2485     if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2486         if (!fcport->rdata)
2487             QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2488                  fcport);
2489         else
2490             QEDF_ERR(&qedf->dbg_ctx,
2491                  "fcport %p port_id=%06x is uploading.\n",
2492                  fcport, fcport->rdata->ids.port_id);
2493         rc = FAILED;
2494         goto tmf_err;
2495     }
2496 
2497     rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2498 
2499 tmf_err:
2500     kref_put(&rdata->kref, fc_rport_destroy);
2501     return rc;
2502 }
2503 
2504 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2505     struct qedf_ioreq *io_req)
2506 {
2507     struct fcoe_cqe_rsp_info *fcp_rsp;
2508 
2509     clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2510 
2511     fcp_rsp = &cqe->cqe_info.rsp_info;
2512     qedf_parse_fcp_rsp(io_req, fcp_rsp);
2513 
2514     io_req->sc_cmd = NULL;
2515     complete(&io_req->tm_done);
2516 }
2517 
2518 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2519     struct fcoe_cqe *cqe)
2520 {
2521     unsigned long flags;
2522     uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2523     u32 payload_len, crc;
2524     struct fc_frame_header *fh;
2525     struct fc_frame *fp;
2526     struct qedf_io_work *io_work;
2527     u32 bdq_idx;
2528     void *bdq_addr;
2529     struct scsi_bd *p_bd_info;
2530 
2531     p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2532     QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2533           "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2534           le32_to_cpu(p_bd_info->address.hi),
2535           le32_to_cpu(p_bd_info->address.lo),
2536           le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2537           le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2538           qedf->bdq_prod_idx, pktlen);
2539 
2540     bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2541     if (bdq_idx >= QEDF_BDQ_SIZE) {
2542         QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2543             bdq_idx);
2544         goto increment_prod;
2545     }
2546 
2547     bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2548     if (!bdq_addr) {
2549         QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2550             "unsolicited packet.\n");
2551         goto increment_prod;
2552     }
2553 
2554     if (qedf_dump_frames) {
2555         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2556             "BDQ frame is at addr=%p.\n", bdq_addr);
2557         print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2558             (void *)bdq_addr, pktlen, false);
2559     }
2560 
2561     /* Allocate frame */
2562     payload_len = pktlen - sizeof(struct fc_frame_header);
2563     fp = fc_frame_alloc(qedf->lport, payload_len);
2564     if (!fp) {
2565         QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2566         goto increment_prod;
2567     }
2568 
2569     /* Copy data from BDQ buffer into fc_frame struct */
2570     fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2571     memcpy(fh, (void *)bdq_addr, pktlen);
2572 
2573     QEDF_WARN(&qedf->dbg_ctx,
2574           "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2575           ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2576           fh->fh_type, fc_frame_payload_op(fp));
2577 
2578     /* Initialize the frame so libfc sees it as a valid frame */
2579     crc = fcoe_fc_crc(fp);
2580     fc_frame_init(fp);
2581     fr_dev(fp) = qedf->lport;
2582     fr_sof(fp) = FC_SOF_I3;
2583     fr_eof(fp) = FC_EOF_T;
2584     fr_crc(fp) = cpu_to_le32(~crc);
2585 
2586     /*
2587      * We need to return the frame back up to libfc in a non-atomic
2588      * context
2589      */
2590     io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2591     if (!io_work) {
2592         QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2593                "work for I/O completion.\n");
2594         fc_frame_free(fp);
2595         goto increment_prod;
2596     }
2597     memset(io_work, 0, sizeof(struct qedf_io_work));
2598 
2599     INIT_WORK(&io_work->work, qedf_fp_io_handler);
2600 
2601     /* Copy contents of CQE for deferred processing */
2602     memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2603 
2604     io_work->qedf = qedf;
2605     io_work->fp = fp;
2606 
2607     queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2608 increment_prod:
2609     spin_lock_irqsave(&qedf->hba_lock, flags);
2610 
2611     /* Increment producer to let f/w know we've handled the frame */
2612     qedf->bdq_prod_idx++;
2613 
2614     /* Producer index wraps at uint16_t boundary */
2615     if (qedf->bdq_prod_idx == 0xffff)
2616         qedf->bdq_prod_idx = 0;
2617 
2618     writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2619     readw(qedf->bdq_primary_prod);
2620     writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2621     readw(qedf->bdq_secondary_prod);
2622 
2623     spin_unlock_irqrestore(&qedf->hba_lock, flags);
2624 }