Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is part of the Chelsio FCoE driver for Linux.
0003  *
0004  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
0005  *
0006  * This software is available to you under a choice of one of two
0007  * licenses.  You may choose to be licensed under the terms of the GNU
0008  * General Public License (GPL) Version 2, available from the file
0009  * COPYING in the main directory of this source tree, or the
0010  * OpenIB.org BSD license below:
0011  *
0012  *     Redistribution and use in source and binary forms, with or
0013  *     without modification, are permitted provided that the following
0014  *     conditions are met:
0015  *
0016  *      - Redistributions of source code must retain the above
0017  *        copyright notice, this list of conditions and the following
0018  *        disclaimer.
0019  *
0020  *      - Redistributions in binary form must reproduce the above
0021  *        copyright notice, this list of conditions and the following
0022  *        disclaimer in the documentation and/or other materials
0023  *        provided with the distribution.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0026  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0027  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0028  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0029  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0030  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0031  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032  * SOFTWARE.
0033  */
0034 
0035 #include <linux/device.h>
0036 #include <linux/delay.h>
0037 #include <linux/ctype.h>
0038 #include <linux/kernel.h>
0039 #include <linux/slab.h>
0040 #include <linux/string.h>
0041 #include <linux/compiler.h>
0042 #include <linux/export.h>
0043 #include <linux/module.h>
0044 #include <asm/unaligned.h>
0045 #include <asm/page.h>
0046 #include <scsi/scsi.h>
0047 #include <scsi/scsi_device.h>
0048 #include <scsi/scsi_transport_fc.h>
0049 
0050 #include "csio_hw.h"
0051 #include "csio_lnode.h"
0052 #include "csio_rnode.h"
0053 #include "csio_scsi.h"
0054 #include "csio_init.h"
0055 
0056 int csio_scsi_eqsize = 65536;
0057 int csio_scsi_iqlen = 128;
0058 int csio_scsi_ioreqs = 2048;
0059 uint32_t csio_max_scan_tmo;
0060 uint32_t csio_delta_scan_tmo = 5;
0061 int csio_lun_qdepth = 32;
0062 
0063 static int csio_ddp_descs = 128;
0064 
0065 static int csio_do_abrt_cls(struct csio_hw *,
0066                       struct csio_ioreq *, bool);
0067 
0068 static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
0069 static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
0070 static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
0071 static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
0072 static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
0073 static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
0074 
0075 /*
0076  * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
0077  * @ioreq: The I/O request
0078  * @sld: Level information
0079  *
0080  * Should be called with lock held.
0081  *
0082  */
0083 static bool
0084 csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
0085 {
0086     struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
0087 
0088     switch (sld->level) {
0089     case CSIO_LEV_LUN:
0090         if (scmnd == NULL)
0091             return false;
0092 
0093         return ((ioreq->lnode == sld->lnode) &&
0094             (ioreq->rnode == sld->rnode) &&
0095             ((uint64_t)scmnd->device->lun == sld->oslun));
0096 
0097     case CSIO_LEV_RNODE:
0098         return ((ioreq->lnode == sld->lnode) &&
0099                 (ioreq->rnode == sld->rnode));
0100     case CSIO_LEV_LNODE:
0101         return (ioreq->lnode == sld->lnode);
0102     case CSIO_LEV_ALL:
0103         return true;
0104     default:
0105         return false;
0106     }
0107 }
0108 
0109 /*
0110  * csio_scsi_gather_active_ios - Gather active I/Os based on level
0111  * @scm: SCSI module
0112  * @sld: Level information
0113  * @dest: The queue where these I/Os have to be gathered.
0114  *
0115  * Should be called with lock held.
0116  */
0117 static void
0118 csio_scsi_gather_active_ios(struct csio_scsim *scm,
0119                 struct csio_scsi_level_data *sld,
0120                 struct list_head *dest)
0121 {
0122     struct list_head *tmp, *next;
0123 
0124     if (list_empty(&scm->active_q))
0125         return;
0126 
0127     /* Just splice the entire active_q into dest */
0128     if (sld->level == CSIO_LEV_ALL) {
0129         list_splice_tail_init(&scm->active_q, dest);
0130         return;
0131     }
0132 
0133     list_for_each_safe(tmp, next, &scm->active_q) {
0134         if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
0135             list_del_init(tmp);
0136             list_add_tail(tmp, dest);
0137         }
0138     }
0139 }
0140 
0141 static inline bool
0142 csio_scsi_itnexus_loss_error(uint16_t error)
0143 {
0144     switch (error) {
0145     case FW_ERR_LINK_DOWN:
0146     case FW_RDEV_NOT_READY:
0147     case FW_ERR_RDEV_LOST:
0148     case FW_ERR_RDEV_LOGO:
0149     case FW_ERR_RDEV_IMPL_LOGO:
0150         return true;
0151     }
0152     return false;
0153 }
0154 
0155 /*
0156  * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
0157  * @req: IO req structure.
0158  * @addr: DMA location to place the payload.
0159  *
0160  * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
0161  */
0162 static inline void
0163 csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
0164 {
0165     struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
0166     struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0167 
0168     /* Check for Task Management */
0169     if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) {
0170         int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
0171         fcp_cmnd->fc_tm_flags = 0;
0172         fcp_cmnd->fc_cmdref = 0;
0173 
0174         memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
0175         fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
0176         fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
0177 
0178         if (req->nsge)
0179             if (req->datadir == DMA_TO_DEVICE)
0180                 fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
0181             else
0182                 fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
0183         else
0184             fcp_cmnd->fc_flags = 0;
0185     } else {
0186         memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
0187         int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
0188         fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags;
0189     }
0190 }
0191 
0192 /*
0193  * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
0194  * @req: IO req structure.
0195  * @addr: DMA location to place the payload.
0196  * @size: Size of WR (including FW WR + immed data + rsp SG entry
0197  *
0198  * Wrapper for populating fw_scsi_cmd_wr.
0199  */
0200 static inline void
0201 csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
0202 {
0203     struct csio_hw *hw = req->lnode->hwp;
0204     struct csio_rnode *rn = req->rnode;
0205     struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
0206     struct csio_dma_buf *dma_buf;
0207     uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
0208 
0209     wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) |
0210                       FW_SCSI_CMD_WR_IMMDLEN(imm));
0211     wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
0212                         FW_WR_LEN16_V(
0213                         DIV_ROUND_UP(size, 16)));
0214 
0215     wr->cookie = (uintptr_t) req;
0216     wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
0217     wr->tmo_val = (uint8_t) req->tmo;
0218     wr->r3 = 0;
0219     memset(&wr->r5, 0, 8);
0220 
0221     /* Get RSP DMA buffer */
0222     dma_buf = &req->dma_buf;
0223 
0224     /* Prepare RSP SGL */
0225     wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
0226     wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
0227 
0228     wr->r6 = 0;
0229 
0230     wr->u.fcoe.ctl_pri = 0;
0231     wr->u.fcoe.cp_en_class = 0;
0232     wr->u.fcoe.r4_lo[0] = 0;
0233     wr->u.fcoe.r4_lo[1] = 0;
0234 
0235     /* Frame a FCP command */
0236     csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
0237                     sizeof(struct fw_scsi_cmd_wr)));
0238 }
0239 
0240 #define CSIO_SCSI_CMD_WR_SZ(_imm)                   \
0241     (sizeof(struct fw_scsi_cmd_wr) +        /* WR size */   \
0242      ALIGN((_imm), 16))             /* Immed data */
0243 
0244 #define CSIO_SCSI_CMD_WR_SZ_16(_imm)                    \
0245             (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
0246 
0247 /*
0248  * csio_scsi_cmd - Create a SCSI CMD WR.
0249  * @req: IO req structure.
0250  *
0251  * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
0252  *
0253  */
0254 static inline void
0255 csio_scsi_cmd(struct csio_ioreq *req)
0256 {
0257     struct csio_wr_pair wrp;
0258     struct csio_hw *hw = req->lnode->hwp;
0259     struct csio_scsim *scsim = csio_hw_to_scsim(hw);
0260     uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
0261 
0262     req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
0263     if (unlikely(req->drv_status != 0))
0264         return;
0265 
0266     if (wrp.size1 >= size) {
0267         /* Initialize WR in one shot */
0268         csio_scsi_init_cmd_wr(req, wrp.addr1, size);
0269     } else {
0270         uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
0271 
0272         /*
0273          * Make a temporary copy of the WR and write back
0274          * the copy into the WR pair.
0275          */
0276         csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
0277         memcpy(wrp.addr1, tmpwr, wrp.size1);
0278         memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
0279     }
0280 }
0281 
0282 /*
0283  * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
0284  * @hw: HW module
0285  * @req: IO request
0286  * @sgl: ULP TX SGL pointer.
0287  *
0288  */
0289 static inline void
0290 csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
0291                struct ulptx_sgl *sgl)
0292 {
0293     struct ulptx_sge_pair *sge_pair = NULL;
0294     struct scatterlist *sgel;
0295     uint32_t i = 0;
0296     uint32_t xfer_len;
0297     struct list_head *tmp;
0298     struct csio_dma_buf *dma_buf;
0299     struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0300 
0301     sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
0302                      ULPTX_NSGE_V(req->nsge));
0303     /* Now add the data SGLs */
0304     if (likely(!req->dcopy)) {
0305         scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
0306             if (i == 0) {
0307                 sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
0308                 sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
0309                 sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
0310                 continue;
0311             }
0312             if ((i - 1) & 0x1) {
0313                 sge_pair->addr[1] = cpu_to_be64(
0314                             sg_dma_address(sgel));
0315                 sge_pair->len[1] = cpu_to_be32(
0316                             sg_dma_len(sgel));
0317                 sge_pair++;
0318             } else {
0319                 sge_pair->addr[0] = cpu_to_be64(
0320                             sg_dma_address(sgel));
0321                 sge_pair->len[0] = cpu_to_be32(
0322                             sg_dma_len(sgel));
0323             }
0324         }
0325     } else {
0326         /* Program sg elements with driver's DDP buffer */
0327         xfer_len = scsi_bufflen(scmnd);
0328         list_for_each(tmp, &req->gen_list) {
0329             dma_buf = (struct csio_dma_buf *)tmp;
0330             if (i == 0) {
0331                 sgl->addr0 = cpu_to_be64(dma_buf->paddr);
0332                 sgl->len0 = cpu_to_be32(
0333                         min(xfer_len, dma_buf->len));
0334                 sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
0335             } else if ((i - 1) & 0x1) {
0336                 sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
0337                 sge_pair->len[1] = cpu_to_be32(
0338                         min(xfer_len, dma_buf->len));
0339                 sge_pair++;
0340             } else {
0341                 sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
0342                 sge_pair->len[0] = cpu_to_be32(
0343                         min(xfer_len, dma_buf->len));
0344             }
0345             xfer_len -= min(xfer_len, dma_buf->len);
0346             i++;
0347         }
0348     }
0349 }
0350 
0351 /*
0352  * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
0353  * @req: IO req structure.
0354  * @wrp: DMA location to place the payload.
0355  * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
0356  *
0357  * Wrapper for populating fw_scsi_read_wr.
0358  */
0359 static inline void
0360 csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
0361 {
0362     struct csio_hw *hw = req->lnode->hwp;
0363     struct csio_rnode *rn = req->rnode;
0364     struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
0365     struct ulptx_sgl *sgl;
0366     struct csio_dma_buf *dma_buf;
0367     uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
0368     struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0369 
0370     wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) |
0371                      FW_SCSI_READ_WR_IMMDLEN(imm));
0372     wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
0373                        FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
0374     wr->cookie = (uintptr_t)req;
0375     wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
0376     wr->tmo_val = (uint8_t)(req->tmo);
0377     wr->use_xfer_cnt = 1;
0378     wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
0379     wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
0380     /* Get RSP DMA buffer */
0381     dma_buf = &req->dma_buf;
0382 
0383     /* Prepare RSP SGL */
0384     wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
0385     wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
0386 
0387     wr->r4 = 0;
0388 
0389     wr->u.fcoe.ctl_pri = 0;
0390     wr->u.fcoe.cp_en_class = 0;
0391     wr->u.fcoe.r3_lo[0] = 0;
0392     wr->u.fcoe.r3_lo[1] = 0;
0393     csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
0394                     sizeof(struct fw_scsi_read_wr)));
0395 
0396     /* Move WR pointer past command and immediate data */
0397     sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
0398                   sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
0399 
0400     /* Fill in the DSGL */
0401     csio_scsi_init_ultptx_dsgl(hw, req, sgl);
0402 }
0403 
0404 /*
0405  * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
0406  * @req: IO req structure.
0407  * @wrp: DMA location to place the payload.
0408  * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
0409  *
0410  * Wrapper for populating fw_scsi_write_wr.
0411  */
0412 static inline void
0413 csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
0414 {
0415     struct csio_hw *hw = req->lnode->hwp;
0416     struct csio_rnode *rn = req->rnode;
0417     struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
0418     struct ulptx_sgl *sgl;
0419     struct csio_dma_buf *dma_buf;
0420     uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
0421     struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0422 
0423     wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) |
0424                      FW_SCSI_WRITE_WR_IMMDLEN(imm));
0425     wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
0426                        FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
0427     wr->cookie = (uintptr_t)req;
0428     wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
0429     wr->tmo_val = (uint8_t)(req->tmo);
0430     wr->use_xfer_cnt = 1;
0431     wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
0432     wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
0433     /* Get RSP DMA buffer */
0434     dma_buf = &req->dma_buf;
0435 
0436     /* Prepare RSP SGL */
0437     wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
0438     wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
0439 
0440     wr->r4 = 0;
0441 
0442     wr->u.fcoe.ctl_pri = 0;
0443     wr->u.fcoe.cp_en_class = 0;
0444     wr->u.fcoe.r3_lo[0] = 0;
0445     wr->u.fcoe.r3_lo[1] = 0;
0446     csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
0447                     sizeof(struct fw_scsi_write_wr)));
0448 
0449     /* Move WR pointer past command and immediate data */
0450     sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
0451                   sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
0452 
0453     /* Fill in the DSGL */
0454     csio_scsi_init_ultptx_dsgl(hw, req, sgl);
0455 }
0456 
0457 /* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
0458 #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm)                    \
0459 do {                                           \
0460     (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */          \
0461            ALIGN((imm), 16) +           /* Immed data */       \
0462            sizeof(struct ulptx_sgl);        /* ulptx_sgl */        \
0463                                            \
0464     if (unlikely((req)->nsge > 1))                             \
0465         (sz) += (sizeof(struct ulptx_sge_pair) *               \
0466                 (ALIGN(((req)->nsge - 1), 2) / 2));            \
0467                             /* Data SGE */         \
0468 } while (0)
0469 
0470 /*
0471  * csio_scsi_read - Create a SCSI READ WR.
0472  * @req: IO req structure.
0473  *
0474  * Gets a WR slot in the ingress queue and initializes it with
0475  * SCSI READ WR.
0476  *
0477  */
0478 static inline void
0479 csio_scsi_read(struct csio_ioreq *req)
0480 {
0481     struct csio_wr_pair wrp;
0482     uint32_t size;
0483     struct csio_hw *hw = req->lnode->hwp;
0484     struct csio_scsim *scsim = csio_hw_to_scsim(hw);
0485 
0486     CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
0487     size = ALIGN(size, 16);
0488 
0489     req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
0490     if (likely(req->drv_status == 0)) {
0491         if (likely(wrp.size1 >= size)) {
0492             /* Initialize WR in one shot */
0493             csio_scsi_init_read_wr(req, wrp.addr1, size);
0494         } else {
0495             uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
0496             /*
0497              * Make a temporary copy of the WR and write back
0498              * the copy into the WR pair.
0499              */
0500             csio_scsi_init_read_wr(req, (void *)tmpwr, size);
0501             memcpy(wrp.addr1, tmpwr, wrp.size1);
0502             memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
0503         }
0504     }
0505 }
0506 
0507 /*
0508  * csio_scsi_write - Create a SCSI WRITE WR.
0509  * @req: IO req structure.
0510  *
0511  * Gets a WR slot in the ingress queue and initializes it with
0512  * SCSI WRITE WR.
0513  *
0514  */
0515 static inline void
0516 csio_scsi_write(struct csio_ioreq *req)
0517 {
0518     struct csio_wr_pair wrp;
0519     uint32_t size;
0520     struct csio_hw *hw = req->lnode->hwp;
0521     struct csio_scsim *scsim = csio_hw_to_scsim(hw);
0522 
0523     CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
0524     size = ALIGN(size, 16);
0525 
0526     req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
0527     if (likely(req->drv_status == 0)) {
0528         if (likely(wrp.size1 >= size)) {
0529             /* Initialize WR in one shot */
0530             csio_scsi_init_write_wr(req, wrp.addr1, size);
0531         } else {
0532             uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
0533             /*
0534              * Make a temporary copy of the WR and write back
0535              * the copy into the WR pair.
0536              */
0537             csio_scsi_init_write_wr(req, (void *)tmpwr, size);
0538             memcpy(wrp.addr1, tmpwr, wrp.size1);
0539             memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
0540         }
0541     }
0542 }
0543 
0544 /*
0545  * csio_setup_ddp - Setup DDP buffers for Read request.
0546  * @req: IO req structure.
0547  *
0548  * Checks SGLs/Data buffers are virtually contiguous required for DDP.
0549  * If contiguous,driver posts SGLs in the WR otherwise post internal
0550  * buffers for such request for DDP.
0551  */
0552 static inline void
0553 csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
0554 {
0555 #ifdef __CSIO_DEBUG__
0556     struct csio_hw *hw = req->lnode->hwp;
0557 #endif
0558     struct scatterlist *sgel = NULL;
0559     struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0560     uint64_t sg_addr = 0;
0561     uint32_t ddp_pagesz = 4096;
0562     uint32_t buf_off;
0563     struct csio_dma_buf *dma_buf = NULL;
0564     uint32_t alloc_len = 0;
0565     uint32_t xfer_len = 0;
0566     uint32_t sg_len = 0;
0567     uint32_t i;
0568 
0569     scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
0570         sg_addr = sg_dma_address(sgel);
0571         sg_len  = sg_dma_len(sgel);
0572 
0573         buf_off = sg_addr & (ddp_pagesz - 1);
0574 
0575         /* Except 1st buffer,all buffer addr have to be Page aligned */
0576         if (i != 0 && buf_off) {
0577             csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
0578                  sg_addr, sg_len);
0579             goto unaligned;
0580         }
0581 
0582         /* Except last buffer,all buffer must end on page boundary */
0583         if ((i != (req->nsge - 1)) &&
0584             ((buf_off + sg_len) & (ddp_pagesz - 1))) {
0585             csio_dbg(hw,
0586                  "SGL addr not ending on page boundary"
0587                  "(%llx:%d)\n", sg_addr, sg_len);
0588             goto unaligned;
0589         }
0590     }
0591 
0592     /* SGL's are virtually contiguous. HW will DDP to SGLs */
0593     req->dcopy = 0;
0594     csio_scsi_read(req);
0595 
0596     return;
0597 
0598 unaligned:
0599     CSIO_INC_STATS(scsim, n_unaligned);
0600     /*
0601      * For unaligned SGLs, driver will allocate internal DDP buffer.
0602      * Once command is completed data from DDP buffer copied to SGLs
0603      */
0604     req->dcopy = 1;
0605 
0606     /* Use gen_list to store the DDP buffers */
0607     INIT_LIST_HEAD(&req->gen_list);
0608     xfer_len = scsi_bufflen(scmnd);
0609 
0610     i = 0;
0611     /* Allocate ddp buffers for this request */
0612     while (alloc_len < xfer_len) {
0613         dma_buf = csio_get_scsi_ddp(scsim);
0614         if (dma_buf == NULL || i > scsim->max_sge) {
0615             req->drv_status = -EBUSY;
0616             break;
0617         }
0618         alloc_len += dma_buf->len;
0619         /* Added to IO req */
0620         list_add_tail(&dma_buf->list, &req->gen_list);
0621         i++;
0622     }
0623 
0624     if (!req->drv_status) {
0625         /* set number of ddp bufs used */
0626         req->nsge = i;
0627         csio_scsi_read(req);
0628         return;
0629     }
0630 
0631      /* release dma descs */
0632     if (i > 0)
0633         csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
0634 }
0635 
0636 /*
0637  * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
0638  * @req: IO req structure.
0639  * @addr: DMA location to place the payload.
0640  * @size: Size of WR
0641  * @abort: abort OR close
0642  *
0643  * Wrapper for populating fw_scsi_cmd_wr.
0644  */
0645 static inline void
0646 csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
0647                bool abort)
0648 {
0649     struct csio_hw *hw = req->lnode->hwp;
0650     struct csio_rnode *rn = req->rnode;
0651     struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
0652 
0653     wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR));
0654     wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
0655                         FW_WR_LEN16_V(
0656                         DIV_ROUND_UP(size, 16)));
0657 
0658     wr->cookie = (uintptr_t) req;
0659     wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
0660     wr->tmo_val = (uint8_t) req->tmo;
0661     /* 0 for CHK_ALL_IO tells FW to look up t_cookie */
0662     wr->sub_opcode_to_chk_all_io =
0663                 (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
0664                  FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
0665     wr->r3[0] = 0;
0666     wr->r3[1] = 0;
0667     wr->r3[2] = 0;
0668     wr->r3[3] = 0;
0669     /* Since we re-use the same ioreq for abort as well */
0670     wr->t_cookie = (uintptr_t) req;
0671 }
0672 
0673 static inline void
0674 csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
0675 {
0676     struct csio_wr_pair wrp;
0677     struct csio_hw *hw = req->lnode->hwp;
0678     uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
0679 
0680     req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
0681     if (req->drv_status != 0)
0682         return;
0683 
0684     if (wrp.size1 >= size) {
0685         /* Initialize WR in one shot */
0686         csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
0687     } else {
0688         uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
0689         /*
0690          * Make a temporary copy of the WR and write back
0691          * the copy into the WR pair.
0692          */
0693         csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
0694         memcpy(wrp.addr1, tmpwr, wrp.size1);
0695         memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
0696     }
0697 }
0698 
0699 /*****************************************************************************/
0700 /* START: SCSI SM                                                            */
0701 /*****************************************************************************/
0702 static void
0703 csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
0704 {
0705     struct csio_hw *hw = req->lnode->hwp;
0706     struct csio_scsim *scsim = csio_hw_to_scsim(hw);
0707 
0708     switch (evt) {
0709     case CSIO_SCSIE_START_IO:
0710 
0711         if (req->nsge) {
0712             if (req->datadir == DMA_TO_DEVICE) {
0713                 req->dcopy = 0;
0714                 csio_scsi_write(req);
0715             } else
0716                 csio_setup_ddp(scsim, req);
0717         } else {
0718             csio_scsi_cmd(req);
0719         }
0720 
0721         if (likely(req->drv_status == 0)) {
0722             /* change state and enqueue on active_q */
0723             csio_set_state(&req->sm, csio_scsis_io_active);
0724             list_add_tail(&req->sm.sm_list, &scsim->active_q);
0725             csio_wr_issue(hw, req->eq_idx, false);
0726             CSIO_INC_STATS(scsim, n_active);
0727 
0728             return;
0729         }
0730         break;
0731 
0732     case CSIO_SCSIE_START_TM:
0733         csio_scsi_cmd(req);
0734         if (req->drv_status == 0) {
0735             /*
0736              * NOTE: We collect the affected I/Os prior to issuing
0737              * LUN reset, and not after it. This is to prevent
0738              * aborting I/Os that get issued after the LUN reset,
0739              * but prior to LUN reset completion (in the event that
0740              * the host stack has not blocked I/Os to a LUN that is
0741              * being reset.
0742              */
0743             csio_set_state(&req->sm, csio_scsis_tm_active);
0744             list_add_tail(&req->sm.sm_list, &scsim->active_q);
0745             csio_wr_issue(hw, req->eq_idx, false);
0746             CSIO_INC_STATS(scsim, n_tm_active);
0747         }
0748         return;
0749 
0750     case CSIO_SCSIE_ABORT:
0751     case CSIO_SCSIE_CLOSE:
0752         /*
0753          * NOTE:
0754          * We could get here due to  :
0755          * - a window in the cleanup path of the SCSI module
0756          *   (csio_scsi_abort_io()). Please see NOTE in this function.
0757          * - a window in the time we tried to issue an abort/close
0758          *   of a request to FW, and the FW completed the request
0759          *   itself.
0760          *   Print a message for now, and return INVAL either way.
0761          */
0762         req->drv_status = -EINVAL;
0763         csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
0764         break;
0765 
0766     default:
0767         csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
0768         CSIO_DB_ASSERT(0);
0769     }
0770 }
0771 
0772 static void
0773 csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
0774 {
0775     struct csio_hw *hw = req->lnode->hwp;
0776     struct csio_scsim *scm = csio_hw_to_scsim(hw);
0777     struct csio_rnode *rn;
0778 
0779     switch (evt) {
0780     case CSIO_SCSIE_COMPLETED:
0781         CSIO_DEC_STATS(scm, n_active);
0782         list_del_init(&req->sm.sm_list);
0783         csio_set_state(&req->sm, csio_scsis_uninit);
0784         /*
0785          * In MSIX mode, with multiple queues, the SCSI compeltions
0786          * could reach us sooner than the FW events sent to indicate
0787          * I-T nexus loss (link down, remote device logo etc). We
0788          * dont want to be returning such I/Os to the upper layer
0789          * immediately, since we wouldnt have reported the I-T nexus
0790          * loss itself. This forces us to serialize such completions
0791          * with the reporting of the I-T nexus loss. Therefore, we
0792          * internally queue up such up such completions in the rnode.
0793          * The reporting of I-T nexus loss to the upper layer is then
0794          * followed by the returning of I/Os in this internal queue.
0795          * Having another state alongwith another queue helps us take
0796          * actions for events such as ABORT received while we are
0797          * in this rnode queue.
0798          */
0799         if (unlikely(req->wr_status != FW_SUCCESS)) {
0800             rn = req->rnode;
0801             /*
0802              * FW says remote device is lost, but rnode
0803              * doesnt reflect it.
0804              */
0805             if (csio_scsi_itnexus_loss_error(req->wr_status) &&
0806                         csio_is_rnode_ready(rn)) {
0807                 csio_set_state(&req->sm,
0808                         csio_scsis_shost_cmpl_await);
0809                 list_add_tail(&req->sm.sm_list,
0810                           &rn->host_cmpl_q);
0811             }
0812         }
0813 
0814         break;
0815 
0816     case CSIO_SCSIE_ABORT:
0817         csio_scsi_abrt_cls(req, SCSI_ABORT);
0818         if (req->drv_status == 0) {
0819             csio_wr_issue(hw, req->eq_idx, false);
0820             csio_set_state(&req->sm, csio_scsis_aborting);
0821         }
0822         break;
0823 
0824     case CSIO_SCSIE_CLOSE:
0825         csio_scsi_abrt_cls(req, SCSI_CLOSE);
0826         if (req->drv_status == 0) {
0827             csio_wr_issue(hw, req->eq_idx, false);
0828             csio_set_state(&req->sm, csio_scsis_closing);
0829         }
0830         break;
0831 
0832     case CSIO_SCSIE_DRVCLEANUP:
0833         req->wr_status = FW_HOSTERROR;
0834         CSIO_DEC_STATS(scm, n_active);
0835         csio_set_state(&req->sm, csio_scsis_uninit);
0836         break;
0837 
0838     default:
0839         csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
0840         CSIO_DB_ASSERT(0);
0841     }
0842 }
0843 
0844 static void
0845 csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
0846 {
0847     struct csio_hw *hw = req->lnode->hwp;
0848     struct csio_scsim *scm = csio_hw_to_scsim(hw);
0849 
0850     switch (evt) {
0851     case CSIO_SCSIE_COMPLETED:
0852         CSIO_DEC_STATS(scm, n_tm_active);
0853         list_del_init(&req->sm.sm_list);
0854         csio_set_state(&req->sm, csio_scsis_uninit);
0855 
0856         break;
0857 
0858     case CSIO_SCSIE_ABORT:
0859         csio_scsi_abrt_cls(req, SCSI_ABORT);
0860         if (req->drv_status == 0) {
0861             csio_wr_issue(hw, req->eq_idx, false);
0862             csio_set_state(&req->sm, csio_scsis_aborting);
0863         }
0864         break;
0865 
0866 
0867     case CSIO_SCSIE_CLOSE:
0868         csio_scsi_abrt_cls(req, SCSI_CLOSE);
0869         if (req->drv_status == 0) {
0870             csio_wr_issue(hw, req->eq_idx, false);
0871             csio_set_state(&req->sm, csio_scsis_closing);
0872         }
0873         break;
0874 
0875     case CSIO_SCSIE_DRVCLEANUP:
0876         req->wr_status = FW_HOSTERROR;
0877         CSIO_DEC_STATS(scm, n_tm_active);
0878         csio_set_state(&req->sm, csio_scsis_uninit);
0879         break;
0880 
0881     default:
0882         csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
0883         CSIO_DB_ASSERT(0);
0884     }
0885 }
0886 
0887 static void
0888 csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
0889 {
0890     struct csio_hw *hw = req->lnode->hwp;
0891     struct csio_scsim *scm = csio_hw_to_scsim(hw);
0892 
0893     switch (evt) {
0894     case CSIO_SCSIE_COMPLETED:
0895         csio_dbg(hw,
0896              "ioreq %p recvd cmpltd (wr_status:%d) "
0897              "in aborting st\n", req, req->wr_status);
0898         /*
0899          * Use -ECANCELED to explicitly tell the ABORTED event that
0900          * the original I/O was returned to driver by FW.
0901          * We dont really care if the I/O was returned with success by
0902          * FW (because the ABORT and completion of the I/O crossed each
0903          * other), or any other return value. Once we are in aborting
0904          * state, the success or failure of the I/O is unimportant to
0905          * us.
0906          */
0907         req->drv_status = -ECANCELED;
0908         break;
0909 
0910     case CSIO_SCSIE_ABORT:
0911         CSIO_INC_STATS(scm, n_abrt_dups);
0912         break;
0913 
0914     case CSIO_SCSIE_ABORTED:
0915 
0916         csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
0917              req, req->wr_status, req->drv_status);
0918         /*
0919          * Check if original I/O WR completed before the Abort
0920          * completion.
0921          */
0922         if (req->drv_status != -ECANCELED) {
0923             csio_warn(hw,
0924                   "Abort completed before original I/O,"
0925                    " req:%p\n", req);
0926             CSIO_DB_ASSERT(0);
0927         }
0928 
0929         /*
0930          * There are the following possible scenarios:
0931          * 1. The abort completed successfully, FW returned FW_SUCCESS.
0932          * 2. The completion of an I/O and the receipt of
0933          *    abort for that I/O by the FW crossed each other.
0934          *    The FW returned FW_EINVAL. The original I/O would have
0935          *    returned with FW_SUCCESS or any other SCSI error.
0936          * 3. The FW couldn't sent the abort out on the wire, as there
0937          *    was an I-T nexus loss (link down, remote device logged
0938          *    out etc). FW sent back an appropriate IT nexus loss status
0939          *    for the abort.
0940          * 4. FW sent an abort, but abort timed out (remote device
0941          *    didnt respond). FW replied back with
0942          *    FW_SCSI_ABORT_TIMEDOUT.
0943          * 5. FW couldn't genuinely abort the request for some reason,
0944          *    and sent us an error.
0945          *
0946          * The first 3 scenarios are treated as  succesful abort
0947          * operations by the host, while the last 2 are failed attempts
0948          * to abort. Manipulate the return value of the request
0949          * appropriately, so that host can convey these results
0950          * back to the upper layer.
0951          */
0952         if ((req->wr_status == FW_SUCCESS) ||
0953             (req->wr_status == FW_EINVAL) ||
0954             csio_scsi_itnexus_loss_error(req->wr_status))
0955             req->wr_status = FW_SCSI_ABORT_REQUESTED;
0956 
0957         CSIO_DEC_STATS(scm, n_active);
0958         list_del_init(&req->sm.sm_list);
0959         csio_set_state(&req->sm, csio_scsis_uninit);
0960         break;
0961 
0962     case CSIO_SCSIE_DRVCLEANUP:
0963         req->wr_status = FW_HOSTERROR;
0964         CSIO_DEC_STATS(scm, n_active);
0965         csio_set_state(&req->sm, csio_scsis_uninit);
0966         break;
0967 
0968     case CSIO_SCSIE_CLOSE:
0969         /*
0970          * We can receive this event from the module
0971          * cleanup paths, if the FW forgot to reply to the ABORT WR
0972          * and left this ioreq in this state. For now, just ignore
0973          * the event. The CLOSE event is sent to this state, as
0974          * the LINK may have already gone down.
0975          */
0976         break;
0977 
0978     default:
0979         csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
0980         CSIO_DB_ASSERT(0);
0981     }
0982 }
0983 
0984 static void
0985 csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
0986 {
0987     struct csio_hw *hw = req->lnode->hwp;
0988     struct csio_scsim *scm = csio_hw_to_scsim(hw);
0989 
0990     switch (evt) {
0991     case CSIO_SCSIE_COMPLETED:
0992         csio_dbg(hw,
0993              "ioreq %p recvd cmpltd (wr_status:%d) "
0994              "in closing st\n", req, req->wr_status);
0995         /*
0996          * Use -ECANCELED to explicitly tell the CLOSED event that
0997          * the original I/O was returned to driver by FW.
0998          * We dont really care if the I/O was returned with success by
0999          * FW (because the CLOSE and completion of the I/O crossed each
1000          * other), or any other return value. Once we are in aborting
1001          * state, the success or failure of the I/O is unimportant to
1002          * us.
1003          */
1004         req->drv_status = -ECANCELED;
1005         break;
1006 
1007     case CSIO_SCSIE_CLOSED:
1008         /*
1009          * Check if original I/O WR completed before the Close
1010          * completion.
1011          */
1012         if (req->drv_status != -ECANCELED) {
1013             csio_fatal(hw,
1014                    "Close completed before original I/O,"
1015                    " req:%p\n", req);
1016             CSIO_DB_ASSERT(0);
1017         }
1018 
1019         /*
1020          * Either close succeeded, or we issued close to FW at the
1021          * same time FW compelted it to us. Either way, the I/O
1022          * is closed.
1023          */
1024         CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
1025                     (req->wr_status == FW_EINVAL));
1026         req->wr_status = FW_SCSI_CLOSE_REQUESTED;
1027 
1028         CSIO_DEC_STATS(scm, n_active);
1029         list_del_init(&req->sm.sm_list);
1030         csio_set_state(&req->sm, csio_scsis_uninit);
1031         break;
1032 
1033     case CSIO_SCSIE_CLOSE:
1034         break;
1035 
1036     case CSIO_SCSIE_DRVCLEANUP:
1037         req->wr_status = FW_HOSTERROR;
1038         CSIO_DEC_STATS(scm, n_active);
1039         csio_set_state(&req->sm, csio_scsis_uninit);
1040         break;
1041 
1042     default:
1043         csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
1044         CSIO_DB_ASSERT(0);
1045     }
1046 }
1047 
1048 static void
1049 csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
1050 {
1051     switch (evt) {
1052     case CSIO_SCSIE_ABORT:
1053     case CSIO_SCSIE_CLOSE:
1054         /*
1055          * Just succeed the abort request, and hope that
1056          * the remote device unregister path will cleanup
1057          * this I/O to the upper layer within a sane
1058          * amount of time.
1059          */
1060         /*
1061          * A close can come in during a LINK DOWN. The FW would have
1062          * returned us the I/O back, but not the remote device lost
1063          * FW event. In this interval, if the I/O times out at the upper
1064          * layer, a close can come in. Take the same action as abort:
1065          * return success, and hope that the remote device unregister
1066          * path will cleanup this I/O. If the FW still doesnt send
1067          * the msg, the close times out, and the upper layer resorts
1068          * to the next level of error recovery.
1069          */
1070         req->drv_status = 0;
1071         break;
1072     case CSIO_SCSIE_DRVCLEANUP:
1073         csio_set_state(&req->sm, csio_scsis_uninit);
1074         break;
1075     default:
1076         csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
1077              evt, req);
1078         CSIO_DB_ASSERT(0);
1079     }
1080 }
1081 
1082 /*
1083  * csio_scsi_cmpl_handler - WR completion handler for SCSI.
1084  * @hw: HW module.
1085  * @wr: The completed WR from the ingress queue.
1086  * @len: Length of the WR.
1087  * @flb: Freelist buffer array.
1088  * @priv: Private object
1089  * @scsiwr: Pointer to SCSI WR.
1090  *
1091  * This is the WR completion handler called per completion from the
1092  * ISR. It is called with lock held. It walks past the RSS and CPL message
1093  * header where the actual WR is present.
1094  * It then gets the status, WR handle (ioreq pointer) and the len of
1095  * the WR, based on WR opcode. Only on a non-good status is the entire
1096  * WR copied into the WR cache (ioreq->fw_wr).
1097  * The ioreq corresponding to the WR is returned to the caller.
1098  * NOTE: The SCSI queue doesnt allocate a freelist today, hence
1099  * no freelist buffer is expected.
1100  */
1101 struct csio_ioreq *
1102 csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
1103              struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
1104 {
1105     struct csio_ioreq *ioreq = NULL;
1106     struct cpl_fw6_msg *cpl;
1107     uint8_t *tempwr;
1108     uint8_t status;
1109     struct csio_scsim *scm = csio_hw_to_scsim(hw);
1110 
1111     /* skip RSS header */
1112     cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
1113 
1114     if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
1115         csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
1116               cpl->opcode);
1117         CSIO_INC_STATS(scm, n_inval_cplop);
1118         return NULL;
1119     }
1120 
1121     tempwr = (uint8_t *)(cpl->data);
1122     status = csio_wr_status(tempwr);
1123     *scsiwr = tempwr;
1124 
1125     if (likely((*tempwr == FW_SCSI_READ_WR) ||
1126             (*tempwr == FW_SCSI_WRITE_WR) ||
1127             (*tempwr == FW_SCSI_CMD_WR))) {
1128         ioreq = (struct csio_ioreq *)((uintptr_t)
1129                  (((struct fw_scsi_read_wr *)tempwr)->cookie));
1130         CSIO_DB_ASSERT(virt_addr_valid(ioreq));
1131 
1132         ioreq->wr_status = status;
1133 
1134         return ioreq;
1135     }
1136 
1137     if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
1138         ioreq = (struct csio_ioreq *)((uintptr_t)
1139              (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
1140         CSIO_DB_ASSERT(virt_addr_valid(ioreq));
1141 
1142         ioreq->wr_status = status;
1143         return ioreq;
1144     }
1145 
1146     csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
1147     CSIO_INC_STATS(scm, n_inval_scsiop);
1148     return NULL;
1149 }
1150 
1151 /*
1152  * csio_scsi_cleanup_io_q - Cleanup the given queue.
1153  * @scm: SCSI module.
1154  * @q: Queue to be cleaned up.
1155  *
1156  * Called with lock held. Has to exit with lock held.
1157  */
1158 void
1159 csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
1160 {
1161     struct csio_hw *hw = scm->hw;
1162     struct csio_ioreq *ioreq;
1163     struct list_head *tmp, *next;
1164     struct scsi_cmnd *scmnd;
1165 
1166     /* Call back the completion routines of the active_q */
1167     list_for_each_safe(tmp, next, q) {
1168         ioreq = (struct csio_ioreq *)tmp;
1169         csio_scsi_drvcleanup(ioreq);
1170         list_del_init(&ioreq->sm.sm_list);
1171         scmnd = csio_scsi_cmnd(ioreq);
1172         spin_unlock_irq(&hw->lock);
1173 
1174         /*
1175          * Upper layers may have cleared this command, hence this
1176          * check to avoid accessing stale references.
1177          */
1178         if (scmnd != NULL)
1179             ioreq->io_cbfn(hw, ioreq);
1180 
1181         spin_lock_irq(&scm->freelist_lock);
1182         csio_put_scsi_ioreq(scm, ioreq);
1183         spin_unlock_irq(&scm->freelist_lock);
1184 
1185         spin_lock_irq(&hw->lock);
1186     }
1187 }
1188 
1189 #define CSIO_SCSI_ABORT_Q_POLL_MS       2000
1190 
1191 static void
1192 csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
1193 {
1194     struct csio_lnode *ln = ioreq->lnode;
1195     struct csio_hw *hw = ln->hwp;
1196     int ready = 0;
1197     struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1198     int rv;
1199 
1200     if (csio_scsi_cmnd(ioreq) != scmnd) {
1201         CSIO_INC_STATS(scsim, n_abrt_race_comp);
1202         return;
1203     }
1204 
1205     ready = csio_is_lnode_ready(ln);
1206 
1207     rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1208     if (rv != 0) {
1209         if (ready)
1210             CSIO_INC_STATS(scsim, n_abrt_busy_error);
1211         else
1212             CSIO_INC_STATS(scsim, n_cls_busy_error);
1213     }
1214 }
1215 
1216 /*
1217  * csio_scsi_abort_io_q - Abort all I/Os on given queue
1218  * @scm: SCSI module.
1219  * @q: Queue to abort.
1220  * @tmo: Timeout in ms
1221  *
1222  * Attempt to abort all I/Os on given queue, and wait for a max
1223  * of tmo milliseconds for them to complete. Returns success
1224  * if all I/Os are aborted. Else returns -ETIMEDOUT.
1225  * Should be entered with lock held. Exits with lock held.
1226  * NOTE:
1227  * Lock has to be held across the loop that aborts I/Os, since dropping the lock
1228  * in between can cause the list to be corrupted. As a result, the caller
1229  * of this function has to ensure that the number of I/os to be aborted
1230  * is finite enough to not cause lock-held-for-too-long issues.
1231  */
1232 static int
1233 csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
1234 {
1235     struct csio_hw *hw = scm->hw;
1236     struct list_head *tmp, *next;
1237     int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
1238     struct scsi_cmnd *scmnd;
1239 
1240     if (list_empty(q))
1241         return 0;
1242 
1243     csio_dbg(hw, "Aborting SCSI I/Os\n");
1244 
1245     /* Now abort/close I/Os in the queue passed */
1246     list_for_each_safe(tmp, next, q) {
1247         scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
1248         csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
1249     }
1250 
1251     /* Wait till all active I/Os are completed/aborted/closed */
1252     while (!list_empty(q) && count--) {
1253         spin_unlock_irq(&hw->lock);
1254         msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1255         spin_lock_irq(&hw->lock);
1256     }
1257 
1258     /* all aborts completed */
1259     if (list_empty(q))
1260         return 0;
1261 
1262     return -ETIMEDOUT;
1263 }
1264 
1265 /*
1266  * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
1267  * @scm: SCSI module.
1268  * @abort: abort required.
1269  * Called with lock held, should exit with lock held.
1270  * Can sleep when waiting for I/Os to complete.
1271  */
1272 int
1273 csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
1274 {
1275     struct csio_hw *hw = scm->hw;
1276     int rv = 0;
1277     int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
1278 
1279     /* No I/Os pending */
1280     if (list_empty(&scm->active_q))
1281         return 0;
1282 
1283     /* Wait until all active I/Os are completed */
1284     while (!list_empty(&scm->active_q) && count--) {
1285         spin_unlock_irq(&hw->lock);
1286         msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1287         spin_lock_irq(&hw->lock);
1288     }
1289 
1290     /* all I/Os completed */
1291     if (list_empty(&scm->active_q))
1292         return 0;
1293 
1294     /* Else abort */
1295     if (abort) {
1296         rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
1297         if (rv == 0)
1298             return rv;
1299         csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
1300     }
1301 
1302     csio_scsi_cleanup_io_q(scm, &scm->active_q);
1303 
1304     CSIO_DB_ASSERT(list_empty(&scm->active_q));
1305 
1306     return rv;
1307 }
1308 
1309 /*
1310  * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
1311  * @scm: SCSI module.
1312  * @lnode: lnode
1313  *
1314  * Called with lock held, should exit with lock held.
1315  * Can sleep (with dropped lock) when waiting for I/Os to complete.
1316  */
1317 int
1318 csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
1319 {
1320     struct csio_hw *hw = scm->hw;
1321     struct csio_scsi_level_data sld;
1322     int rv;
1323     int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
1324 
1325     csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
1326 
1327     sld.level = CSIO_LEV_LNODE;
1328     sld.lnode = ln;
1329     INIT_LIST_HEAD(&ln->cmpl_q);
1330     csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
1331 
1332     /* No I/Os pending on this lnode  */
1333     if (list_empty(&ln->cmpl_q))
1334         return 0;
1335 
1336     /* Wait until all active I/Os on this lnode are completed */
1337     while (!list_empty(&ln->cmpl_q) && count--) {
1338         spin_unlock_irq(&hw->lock);
1339         msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1340         spin_lock_irq(&hw->lock);
1341     }
1342 
1343     /* all I/Os completed */
1344     if (list_empty(&ln->cmpl_q))
1345         return 0;
1346 
1347     csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
1348 
1349     /* I/Os are pending, abort them */
1350     rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
1351     if (rv != 0) {
1352         csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
1353         csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
1354     }
1355 
1356     CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
1357 
1358     return rv;
1359 }
1360 
1361 static ssize_t
1362 csio_show_hw_state(struct device *dev,
1363            struct device_attribute *attr, char *buf)
1364 {
1365     struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1366     struct csio_hw *hw = csio_lnode_to_hw(ln);
1367 
1368     if (csio_is_hw_ready(hw))
1369         return snprintf(buf, PAGE_SIZE, "ready\n");
1370     else
1371         return snprintf(buf, PAGE_SIZE, "not ready\n");
1372 }
1373 
1374 /* Device reset */
1375 static ssize_t
1376 csio_device_reset(struct device *dev,
1377            struct device_attribute *attr, const char *buf, size_t count)
1378 {
1379     struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1380     struct csio_hw *hw = csio_lnode_to_hw(ln);
1381 
1382     if (*buf != '1')
1383         return -EINVAL;
1384 
1385     /* Delete NPIV lnodes */
1386     csio_lnodes_exit(hw, 1);
1387 
1388     /* Block upper IOs */
1389     csio_lnodes_block_request(hw);
1390 
1391     spin_lock_irq(&hw->lock);
1392     csio_hw_reset(hw);
1393     spin_unlock_irq(&hw->lock);
1394 
1395     /* Unblock upper IOs */
1396     csio_lnodes_unblock_request(hw);
1397     return count;
1398 }
1399 
1400 /* disable port */
1401 static ssize_t
1402 csio_disable_port(struct device *dev,
1403            struct device_attribute *attr, const char *buf, size_t count)
1404 {
1405     struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1406     struct csio_hw *hw = csio_lnode_to_hw(ln);
1407     bool disable;
1408 
1409     if (*buf == '1' || *buf == '0')
1410         disable = (*buf == '1') ? true : false;
1411     else
1412         return -EINVAL;
1413 
1414     /* Block upper IOs */
1415     csio_lnodes_block_by_port(hw, ln->portid);
1416 
1417     spin_lock_irq(&hw->lock);
1418     csio_disable_lnodes(hw, ln->portid, disable);
1419     spin_unlock_irq(&hw->lock);
1420 
1421     /* Unblock upper IOs */
1422     csio_lnodes_unblock_by_port(hw, ln->portid);
1423     return count;
1424 }
1425 
1426 /* Show debug level */
1427 static ssize_t
1428 csio_show_dbg_level(struct device *dev,
1429            struct device_attribute *attr, char *buf)
1430 {
1431     struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1432 
1433     return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
1434 }
1435 
1436 /* Store debug level */
1437 static ssize_t
1438 csio_store_dbg_level(struct device *dev,
1439            struct device_attribute *attr, const char *buf, size_t count)
1440 {
1441     struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1442     struct csio_hw *hw = csio_lnode_to_hw(ln);
1443     uint32_t dbg_level = 0;
1444 
1445     if (!isdigit(buf[0]))
1446         return -EINVAL;
1447 
1448     if (sscanf(buf, "%i", &dbg_level))
1449         return -EINVAL;
1450 
1451     ln->params.log_level = dbg_level;
1452     hw->params.log_level = dbg_level;
1453 
1454     return 0;
1455 }
1456 
1457 static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
1458 static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset);
1459 static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
1460 static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
1461           csio_store_dbg_level);
1462 
1463 static struct attribute *csio_fcoe_lport_attrs[] = {
1464     &dev_attr_hw_state.attr,
1465     &dev_attr_device_reset.attr,
1466     &dev_attr_disable_port.attr,
1467     &dev_attr_dbg_level.attr,
1468     NULL,
1469 };
1470 
1471 ATTRIBUTE_GROUPS(csio_fcoe_lport);
1472 
1473 static ssize_t
1474 csio_show_num_reg_rnodes(struct device *dev,
1475              struct device_attribute *attr, char *buf)
1476 {
1477     struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1478 
1479     return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
1480 }
1481 
1482 static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
1483 
1484 static struct attribute *csio_fcoe_vport_attrs[] = {
1485     &dev_attr_num_reg_rnodes.attr,
1486     &dev_attr_dbg_level.attr,
1487     NULL,
1488 };
1489 
1490 ATTRIBUTE_GROUPS(csio_fcoe_vport);
1491 
1492 static inline uint32_t
1493 csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
1494 {
1495     struct scsi_cmnd *scmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1496     struct scatterlist *sg;
1497     uint32_t bytes_left;
1498     uint32_t bytes_copy;
1499     uint32_t buf_off = 0;
1500     uint32_t start_off = 0;
1501     uint32_t sg_off = 0;
1502     void *sg_addr;
1503     void *buf_addr;
1504     struct csio_dma_buf *dma_buf;
1505 
1506     bytes_left = scsi_bufflen(scmnd);
1507     sg = scsi_sglist(scmnd);
1508     dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
1509 
1510     /* Copy data from driver buffer to SGs of SCSI CMD */
1511     while (bytes_left > 0 && sg && dma_buf) {
1512         if (buf_off >= dma_buf->len) {
1513             buf_off = 0;
1514             dma_buf = (struct csio_dma_buf *)
1515                     csio_list_next(dma_buf);
1516             continue;
1517         }
1518 
1519         if (start_off >= sg->length) {
1520             start_off -= sg->length;
1521             sg = sg_next(sg);
1522             continue;
1523         }
1524 
1525         buf_addr = dma_buf->vaddr + buf_off;
1526         sg_off = sg->offset + start_off;
1527         bytes_copy = min((dma_buf->len - buf_off),
1528                 sg->length - start_off);
1529         bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
1530                  bytes_copy);
1531 
1532         sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
1533         if (!sg_addr) {
1534             csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
1535                 sg, req);
1536             break;
1537         }
1538 
1539         csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
1540                 sg_addr, sg_off, buf_addr, bytes_copy);
1541         memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
1542         kunmap_atomic(sg_addr);
1543 
1544         start_off +=  bytes_copy;
1545         buf_off += bytes_copy;
1546         bytes_left -= bytes_copy;
1547     }
1548 
1549     if (bytes_left > 0)
1550         return DID_ERROR;
1551     else
1552         return DID_OK;
1553 }
1554 
1555 /*
1556  * csio_scsi_err_handler - SCSI error handler.
1557  * @hw: HW module.
1558  * @req: IO request.
1559  *
1560  */
1561 static inline void
1562 csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
1563 {
1564     struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1565     struct csio_scsim *scm = csio_hw_to_scsim(hw);
1566     struct fcp_resp_with_ext *fcp_resp;
1567     struct fcp_resp_rsp_info *rsp_info;
1568     struct csio_dma_buf *dma_buf;
1569     uint8_t flags, scsi_status = 0;
1570     uint32_t host_status = DID_OK;
1571     uint32_t rsp_len = 0, sns_len = 0;
1572     struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1573 
1574 
1575     switch (req->wr_status) {
1576     case FW_HOSTERROR:
1577         if (unlikely(!csio_is_hw_ready(hw)))
1578             return;
1579 
1580         host_status = DID_ERROR;
1581         CSIO_INC_STATS(scm, n_hosterror);
1582 
1583         break;
1584     case FW_SCSI_RSP_ERR:
1585         dma_buf = &req->dma_buf;
1586         fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
1587         rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
1588         flags = fcp_resp->resp.fr_flags;
1589         scsi_status = fcp_resp->resp.fr_status;
1590 
1591         if (flags & FCP_RSP_LEN_VAL) {
1592             rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
1593             if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
1594                 (rsp_info->rsp_code != FCP_TMF_CMPL)) {
1595                 host_status = DID_ERROR;
1596                 goto out;
1597             }
1598         }
1599 
1600         if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
1601             sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
1602             if (sns_len > SCSI_SENSE_BUFFERSIZE)
1603                 sns_len = SCSI_SENSE_BUFFERSIZE;
1604 
1605             memcpy(cmnd->sense_buffer,
1606                    &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
1607             CSIO_INC_STATS(scm, n_autosense);
1608         }
1609 
1610         scsi_set_resid(cmnd, 0);
1611 
1612         /* Under run */
1613         if (flags & FCP_RESID_UNDER) {
1614             scsi_set_resid(cmnd,
1615                        be32_to_cpu(fcp_resp->ext.fr_resid));
1616 
1617             if (!(flags & FCP_SNS_LEN_VAL) &&
1618                 (scsi_status == SAM_STAT_GOOD) &&
1619                 ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
1620                             < cmnd->underflow))
1621                 host_status = DID_ERROR;
1622         } else if (flags & FCP_RESID_OVER)
1623             host_status = DID_ERROR;
1624 
1625         CSIO_INC_STATS(scm, n_rsperror);
1626         break;
1627 
1628     case FW_SCSI_OVER_FLOW_ERR:
1629         csio_warn(hw,
1630               "Over-flow error,cmnd:0x%x expected len:0x%x"
1631               " resid:0x%x\n", cmnd->cmnd[0],
1632               scsi_bufflen(cmnd), scsi_get_resid(cmnd));
1633         host_status = DID_ERROR;
1634         CSIO_INC_STATS(scm, n_ovflerror);
1635         break;
1636 
1637     case FW_SCSI_UNDER_FLOW_ERR:
1638         csio_warn(hw,
1639               "Under-flow error,cmnd:0x%x expected"
1640               " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n",
1641               cmnd->cmnd[0], scsi_bufflen(cmnd),
1642               scsi_get_resid(cmnd), cmnd->device->lun,
1643               rn->flowid);
1644         host_status = DID_ERROR;
1645         CSIO_INC_STATS(scm, n_unflerror);
1646         break;
1647 
1648     case FW_SCSI_ABORT_REQUESTED:
1649     case FW_SCSI_ABORTED:
1650     case FW_SCSI_CLOSE_REQUESTED:
1651         csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
1652                  cmnd->cmnd[0],
1653                 (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
1654                 "closed" : "aborted");
1655         /*
1656          * csio_eh_abort_handler checks this value to
1657          * succeed or fail the abort request.
1658          */
1659         host_status = DID_REQUEUE;
1660         if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
1661             CSIO_INC_STATS(scm, n_closed);
1662         else
1663             CSIO_INC_STATS(scm, n_aborted);
1664         break;
1665 
1666     case FW_SCSI_ABORT_TIMEDOUT:
1667         /* FW timed out the abort itself */
1668         csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
1669              req, cmnd, req->wr_status);
1670         host_status = DID_ERROR;
1671         CSIO_INC_STATS(scm, n_abrt_timedout);
1672         break;
1673 
1674     case FW_RDEV_NOT_READY:
1675         /*
1676          * In firmware, a RDEV can get into this state
1677          * temporarily, before moving into dissapeared/lost
1678          * state. So, the driver should complete the request equivalent
1679          * to device-disappeared!
1680          */
1681         CSIO_INC_STATS(scm, n_rdev_nr_error);
1682         host_status = DID_ERROR;
1683         break;
1684 
1685     case FW_ERR_RDEV_LOST:
1686         CSIO_INC_STATS(scm, n_rdev_lost_error);
1687         host_status = DID_ERROR;
1688         break;
1689 
1690     case FW_ERR_RDEV_LOGO:
1691         CSIO_INC_STATS(scm, n_rdev_logo_error);
1692         host_status = DID_ERROR;
1693         break;
1694 
1695     case FW_ERR_RDEV_IMPL_LOGO:
1696         host_status = DID_ERROR;
1697         break;
1698 
1699     case FW_ERR_LINK_DOWN:
1700         CSIO_INC_STATS(scm, n_link_down_error);
1701         host_status = DID_ERROR;
1702         break;
1703 
1704     case FW_FCOE_NO_XCHG:
1705         CSIO_INC_STATS(scm, n_no_xchg_error);
1706         host_status = DID_ERROR;
1707         break;
1708 
1709     default:
1710         csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
1711                 req->wr_status, req, cmnd);
1712         CSIO_DB_ASSERT(0);
1713 
1714         CSIO_INC_STATS(scm, n_unknown_error);
1715         host_status = DID_ERROR;
1716         break;
1717     }
1718 
1719 out:
1720     if (req->nsge > 0) {
1721         scsi_dma_unmap(cmnd);
1722         if (req->dcopy && (host_status == DID_OK))
1723             host_status = csio_scsi_copy_to_sgl(hw, req);
1724     }
1725 
1726     cmnd->result = (((host_status) << 16) | scsi_status);
1727     scsi_done(cmnd);
1728 
1729     /* Wake up waiting threads */
1730     csio_scsi_cmnd(req) = NULL;
1731     complete(&req->cmplobj);
1732 }
1733 
1734 /*
1735  * csio_scsi_cbfn - SCSI callback function.
1736  * @hw: HW module.
1737  * @req: IO request.
1738  *
1739  */
1740 static void
1741 csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
1742 {
1743     struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1744     uint8_t scsi_status = SAM_STAT_GOOD;
1745     uint32_t host_status = DID_OK;
1746 
1747     if (likely(req->wr_status == FW_SUCCESS)) {
1748         if (req->nsge > 0) {
1749             scsi_dma_unmap(cmnd);
1750             if (req->dcopy)
1751                 host_status = csio_scsi_copy_to_sgl(hw, req);
1752         }
1753 
1754         cmnd->result = (((host_status) << 16) | scsi_status);
1755         scsi_done(cmnd);
1756         csio_scsi_cmnd(req) = NULL;
1757         CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
1758     } else {
1759         /* Error handling */
1760         csio_scsi_err_handler(hw, req);
1761     }
1762 }
1763 
1764 /**
1765  * csio_queuecommand - Entry point to kickstart an I/O request.
1766  * @host:   The scsi_host pointer.
1767  * @cmnd:   The I/O request from ML.
1768  *
1769  * This routine does the following:
1770  *  - Checks for HW and Rnode module readiness.
1771  *  - Gets a free ioreq structure (which is already initialized
1772  *    to uninit during its allocation).
1773  *  - Maps SG elements.
1774  *  - Initializes ioreq members.
1775  *  - Kicks off the SCSI state machine for this IO.
1776  *  - Returns busy status on error.
1777  */
1778 static int
1779 csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
1780 {
1781     struct csio_lnode *ln = shost_priv(host);
1782     struct csio_hw *hw = csio_lnode_to_hw(ln);
1783     struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1784     struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1785     struct csio_ioreq *ioreq = NULL;
1786     unsigned long flags;
1787     int nsge = 0;
1788     int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
1789     int retval;
1790     struct csio_scsi_qset *sqset;
1791     struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1792 
1793     sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
1794 
1795     nr = fc_remote_port_chkready(rport);
1796     if (nr) {
1797         cmnd->result = nr;
1798         CSIO_INC_STATS(scsim, n_rn_nr_error);
1799         goto err_done;
1800     }
1801 
1802     if (unlikely(!csio_is_hw_ready(hw))) {
1803         cmnd->result = (DID_REQUEUE << 16);
1804         CSIO_INC_STATS(scsim, n_hw_nr_error);
1805         goto err_done;
1806     }
1807 
1808     /* Get req->nsge, if there are SG elements to be mapped  */
1809     nsge = scsi_dma_map(cmnd);
1810     if (unlikely(nsge < 0)) {
1811         CSIO_INC_STATS(scsim, n_dmamap_error);
1812         goto err;
1813     }
1814 
1815     /* Do we support so many mappings? */
1816     if (unlikely(nsge > scsim->max_sge)) {
1817         csio_warn(hw,
1818               "More SGEs than can be supported."
1819               " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
1820         CSIO_INC_STATS(scsim, n_unsupp_sge_error);
1821         goto err_dma_unmap;
1822     }
1823 
1824     /* Get a free ioreq structure - SM is already set to uninit */
1825     ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
1826     if (!ioreq) {
1827         csio_err(hw, "Out of I/O request elements. Active #:%d\n",
1828              scsim->stats.n_active);
1829         CSIO_INC_STATS(scsim, n_no_req_error);
1830         goto err_dma_unmap;
1831     }
1832 
1833     ioreq->nsge     = nsge;
1834     ioreq->lnode        = ln;
1835     ioreq->rnode        = rn;
1836     ioreq->iq_idx       = sqset->iq_idx;
1837     ioreq->eq_idx       = sqset->eq_idx;
1838     ioreq->wr_status    = 0;
1839     ioreq->drv_status   = 0;
1840     csio_scsi_cmnd(ioreq)   = (void *)cmnd;
1841     ioreq->tmo      = 0;
1842     ioreq->datadir      = cmnd->sc_data_direction;
1843 
1844     if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
1845         CSIO_INC_STATS(ln, n_output_requests);
1846         ln->stats.n_output_bytes += scsi_bufflen(cmnd);
1847     } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
1848         CSIO_INC_STATS(ln, n_input_requests);
1849         ln->stats.n_input_bytes += scsi_bufflen(cmnd);
1850     } else
1851         CSIO_INC_STATS(ln, n_control_requests);
1852 
1853     /* Set cbfn */
1854     ioreq->io_cbfn = csio_scsi_cbfn;
1855 
1856     /* Needed during abort */
1857     cmnd->host_scribble = (unsigned char *)ioreq;
1858     csio_priv(cmnd)->fc_tm_flags = 0;
1859 
1860     /* Kick off SCSI IO SM on the ioreq */
1861     spin_lock_irqsave(&hw->lock, flags);
1862     retval = csio_scsi_start_io(ioreq);
1863     spin_unlock_irqrestore(&hw->lock, flags);
1864 
1865     if (retval != 0) {
1866         csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
1867              ioreq, retval);
1868         CSIO_INC_STATS(scsim, n_busy_error);
1869         goto err_put_req;
1870     }
1871 
1872     return 0;
1873 
1874 err_put_req:
1875     csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
1876 err_dma_unmap:
1877     if (nsge > 0)
1878         scsi_dma_unmap(cmnd);
1879 err:
1880     return rv;
1881 
1882 err_done:
1883     scsi_done(cmnd);
1884     return 0;
1885 }
1886 
1887 static int
1888 csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
1889 {
1890     int rv;
1891     int cpu = smp_processor_id();
1892     struct csio_lnode *ln = ioreq->lnode;
1893     struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
1894 
1895     ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
1896     /*
1897      * Use current processor queue for posting the abort/close, but retain
1898      * the ingress queue ID of the original I/O being aborted/closed - we
1899      * need the abort/close completion to be received on the same queue
1900      * as the original I/O.
1901      */
1902     ioreq->eq_idx = sqset->eq_idx;
1903 
1904     if (abort == SCSI_ABORT)
1905         rv = csio_scsi_abort(ioreq);
1906     else
1907         rv = csio_scsi_close(ioreq);
1908 
1909     return rv;
1910 }
1911 
1912 static int
1913 csio_eh_abort_handler(struct scsi_cmnd *cmnd)
1914 {
1915     struct csio_ioreq *ioreq;
1916     struct csio_lnode *ln = shost_priv(cmnd->device->host);
1917     struct csio_hw *hw = csio_lnode_to_hw(ln);
1918     struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1919     int ready = 0, ret;
1920     unsigned long tmo = 0;
1921     int rv;
1922     struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1923 
1924     ret = fc_block_scsi_eh(cmnd);
1925     if (ret)
1926         return ret;
1927 
1928     ioreq = (struct csio_ioreq *)cmnd->host_scribble;
1929     if (!ioreq)
1930         return SUCCESS;
1931 
1932     if (!rn)
1933         return FAILED;
1934 
1935     csio_dbg(hw,
1936          "Request to abort ioreq:%p cmd:%p cdb:%08llx"
1937          " ssni:0x%x lun:%llu iq:0x%x\n",
1938         ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
1939         cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
1940 
1941     if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
1942         CSIO_INC_STATS(scsim, n_abrt_race_comp);
1943         return SUCCESS;
1944     }
1945 
1946     ready = csio_is_lnode_ready(ln);
1947     tmo = CSIO_SCSI_ABRT_TMO_MS;
1948 
1949     reinit_completion(&ioreq->cmplobj);
1950     spin_lock_irq(&hw->lock);
1951     rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1952     spin_unlock_irq(&hw->lock);
1953 
1954     if (rv != 0) {
1955         if (rv == -EINVAL) {
1956             /* Return success, if abort/close request issued on
1957              * already completed IO
1958              */
1959             return SUCCESS;
1960         }
1961         if (ready)
1962             CSIO_INC_STATS(scsim, n_abrt_busy_error);
1963         else
1964             CSIO_INC_STATS(scsim, n_cls_busy_error);
1965 
1966         goto inval_scmnd;
1967     }
1968 
1969     wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
1970 
1971     /* FW didnt respond to abort within our timeout */
1972     if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
1973 
1974         csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
1975         CSIO_INC_STATS(scsim, n_abrt_timedout);
1976 
1977 inval_scmnd:
1978         if (ioreq->nsge > 0)
1979             scsi_dma_unmap(cmnd);
1980 
1981         spin_lock_irq(&hw->lock);
1982         csio_scsi_cmnd(ioreq) = NULL;
1983         spin_unlock_irq(&hw->lock);
1984 
1985         cmnd->result = (DID_ERROR << 16);
1986         scsi_done(cmnd);
1987 
1988         return FAILED;
1989     }
1990 
1991     /* FW successfully aborted the request */
1992     if (host_byte(cmnd->result) == DID_REQUEUE) {
1993         csio_info(hw,
1994             "Aborted SCSI command to (%d:%llu) tag %u\n",
1995             cmnd->device->id, cmnd->device->lun,
1996             scsi_cmd_to_rq(cmnd)->tag);
1997         return SUCCESS;
1998     } else {
1999         csio_info(hw,
2000             "Failed to abort SCSI command, (%d:%llu) tag %u\n",
2001             cmnd->device->id, cmnd->device->lun,
2002             scsi_cmd_to_rq(cmnd)->tag);
2003         return FAILED;
2004     }
2005 }
2006 
2007 /*
2008  * csio_tm_cbfn - TM callback function.
2009  * @hw: HW module.
2010  * @req: IO request.
2011  *
2012  * Cache the result in 'cmnd', since ioreq will be freed soon
2013  * after we return from here, and the waiting thread shouldnt trust
2014  * the ioreq contents.
2015  */
2016 static void
2017 csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
2018 {
2019     struct scsi_cmnd *cmnd  = (struct scsi_cmnd *)csio_scsi_cmnd(req);
2020     struct csio_dma_buf *dma_buf;
2021     uint8_t flags = 0;
2022     struct fcp_resp_with_ext *fcp_resp;
2023     struct fcp_resp_rsp_info *rsp_info;
2024 
2025     csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
2026               req, req->wr_status);
2027 
2028     /* Cache FW return status */
2029     csio_priv(cmnd)->wr_status = req->wr_status;
2030 
2031     /* Special handling based on FCP response */
2032 
2033     /*
2034      * FW returns us this error, if flags were set. FCP4 says
2035      * FCP_RSP_LEN_VAL in flags shall be set for TM completions.
2036      * So if a target were to set this bit, we expect that the
2037      * rsp_code is set to FCP_TMF_CMPL for a successful TM
2038      * completion. Any other rsp_code means TM operation failed.
2039      * If a target were to just ignore setting flags, we treat
2040      * the TM operation as success, and FW returns FW_SUCCESS.
2041      */
2042     if (req->wr_status == FW_SCSI_RSP_ERR) {
2043         dma_buf = &req->dma_buf;
2044         fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
2045         rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
2046 
2047         flags = fcp_resp->resp.fr_flags;
2048 
2049         /* Modify return status if flags indicate success */
2050         if (flags & FCP_RSP_LEN_VAL)
2051             if (rsp_info->rsp_code == FCP_TMF_CMPL)
2052                 csio_priv(cmnd)->wr_status = FW_SUCCESS;
2053 
2054         csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
2055     }
2056 
2057     /* Wake up the TM handler thread */
2058     csio_scsi_cmnd(req) = NULL;
2059 }
2060 
2061 static int
2062 csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
2063 {
2064     struct csio_lnode *ln = shost_priv(cmnd->device->host);
2065     struct csio_hw *hw = csio_lnode_to_hw(ln);
2066     struct csio_scsim *scsim = csio_hw_to_scsim(hw);
2067     struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
2068     struct csio_ioreq *ioreq = NULL;
2069     struct csio_scsi_qset *sqset;
2070     unsigned long flags;
2071     int retval;
2072     int count, ret;
2073     LIST_HEAD(local_q);
2074     struct csio_scsi_level_data sld;
2075 
2076     if (!rn)
2077         goto fail;
2078 
2079     csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n",
2080               cmnd->device->lun, rn->flowid, rn->scsi_id);
2081 
2082     if (!csio_is_lnode_ready(ln)) {
2083         csio_err(hw,
2084              "LUN reset cannot be issued on non-ready"
2085              " local node vnpi:0x%x (LUN:%llu)\n",
2086              ln->vnp_flowid, cmnd->device->lun);
2087         goto fail;
2088     }
2089 
2090     /* Lnode is ready, now wait on rport node readiness */
2091     ret = fc_block_scsi_eh(cmnd);
2092     if (ret)
2093         return ret;
2094 
2095     /*
2096      * If we have blocked in the previous call, at this point, either the
2097      * remote node has come back online, or device loss timer has fired
2098      * and the remote node is destroyed. Allow the LUN reset only for
2099      * the former case, since LUN reset is a TMF I/O on the wire, and we
2100      * need a valid session to issue it.
2101      */
2102     if (fc_remote_port_chkready(rn->rport)) {
2103         csio_err(hw,
2104              "LUN reset cannot be issued on non-ready"
2105              " remote node ssni:0x%x (LUN:%llu)\n",
2106              rn->flowid, cmnd->device->lun);
2107         goto fail;
2108     }
2109 
2110     /* Get a free ioreq structure - SM is already set to uninit */
2111     ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
2112 
2113     if (!ioreq) {
2114         csio_err(hw, "Out of IO request elements. Active # :%d\n",
2115              scsim->stats.n_active);
2116         goto fail;
2117     }
2118 
2119     sqset           = &hw->sqset[ln->portid][smp_processor_id()];
2120     ioreq->nsge     = 0;
2121     ioreq->lnode        = ln;
2122     ioreq->rnode        = rn;
2123     ioreq->iq_idx       = sqset->iq_idx;
2124     ioreq->eq_idx       = sqset->eq_idx;
2125 
2126     csio_scsi_cmnd(ioreq)   = cmnd;
2127     cmnd->host_scribble = (unsigned char *)ioreq;
2128     csio_priv(cmnd)->wr_status = 0;
2129 
2130     csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET;
2131     ioreq->tmo      = CSIO_SCSI_LUNRST_TMO_MS / 1000;
2132 
2133     /*
2134      * FW times the LUN reset for ioreq->tmo, so we got to wait a little
2135      * longer (10s for now) than that to allow FW to return the timed
2136      * out command.
2137      */
2138     count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
2139 
2140     /* Set cbfn */
2141     ioreq->io_cbfn = csio_tm_cbfn;
2142 
2143     /* Save of the ioreq info for later use */
2144     sld.level = CSIO_LEV_LUN;
2145     sld.lnode = ioreq->lnode;
2146     sld.rnode = ioreq->rnode;
2147     sld.oslun = cmnd->device->lun;
2148 
2149     spin_lock_irqsave(&hw->lock, flags);
2150     /* Kick off TM SM on the ioreq */
2151     retval = csio_scsi_start_tm(ioreq);
2152     spin_unlock_irqrestore(&hw->lock, flags);
2153 
2154     if (retval != 0) {
2155         csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
2156                 ioreq, retval);
2157         goto fail_ret_ioreq;
2158     }
2159 
2160     csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
2161             count * (CSIO_SCSI_TM_POLL_MS / 1000));
2162     /* Wait for completion */
2163     while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
2164                                 && count--)
2165         msleep(CSIO_SCSI_TM_POLL_MS);
2166 
2167     /* LUN reset timed-out */
2168     if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
2169         csio_err(hw, "LUN reset (%d:%llu) timed out\n",
2170              cmnd->device->id, cmnd->device->lun);
2171 
2172         spin_lock_irq(&hw->lock);
2173         csio_scsi_drvcleanup(ioreq);
2174         list_del_init(&ioreq->sm.sm_list);
2175         spin_unlock_irq(&hw->lock);
2176 
2177         goto fail_ret_ioreq;
2178     }
2179 
2180     /* LUN reset returned, check cached status */
2181     if (csio_priv(cmnd)->wr_status != FW_SUCCESS) {
2182         csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
2183              cmnd->device->id, cmnd->device->lun,
2184              csio_priv(cmnd)->wr_status);
2185         goto fail;
2186     }
2187 
2188     /* LUN reset succeeded, Start aborting affected I/Os */
2189     /*
2190      * Since the host guarantees during LUN reset that there
2191      * will not be any more I/Os to that LUN, until the LUN reset
2192      * completes, we gather pending I/Os after the LUN reset.
2193      */
2194     spin_lock_irq(&hw->lock);
2195     csio_scsi_gather_active_ios(scsim, &sld, &local_q);
2196 
2197     retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
2198     spin_unlock_irq(&hw->lock);
2199 
2200     /* Aborts may have timed out */
2201     if (retval != 0) {
2202         csio_err(hw,
2203              "Attempt to abort I/Os during LUN reset of %llu"
2204              " returned %d\n", cmnd->device->lun, retval);
2205         /* Return I/Os back to active_q */
2206         spin_lock_irq(&hw->lock);
2207         list_splice_tail_init(&local_q, &scsim->active_q);
2208         spin_unlock_irq(&hw->lock);
2209         goto fail;
2210     }
2211 
2212     CSIO_INC_STATS(rn, n_lun_rst);
2213 
2214     csio_info(hw, "LUN reset occurred (%d:%llu)\n",
2215           cmnd->device->id, cmnd->device->lun);
2216 
2217     return SUCCESS;
2218 
2219 fail_ret_ioreq:
2220     csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
2221 fail:
2222     CSIO_INC_STATS(rn, n_lun_rst_fail);
2223     return FAILED;
2224 }
2225 
2226 static int
2227 csio_slave_alloc(struct scsi_device *sdev)
2228 {
2229     struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2230 
2231     if (!rport || fc_remote_port_chkready(rport))
2232         return -ENXIO;
2233 
2234     sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
2235 
2236     return 0;
2237 }
2238 
2239 static int
2240 csio_slave_configure(struct scsi_device *sdev)
2241 {
2242     scsi_change_queue_depth(sdev, csio_lun_qdepth);
2243     return 0;
2244 }
2245 
2246 static void
2247 csio_slave_destroy(struct scsi_device *sdev)
2248 {
2249     sdev->hostdata = NULL;
2250 }
2251 
2252 static int
2253 csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
2254 {
2255     struct csio_lnode *ln = shost_priv(shost);
2256     int rv = 1;
2257 
2258     spin_lock_irq(shost->host_lock);
2259     if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
2260         goto out;
2261 
2262     rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
2263                 csio_delta_scan_tmo * HZ);
2264 out:
2265     spin_unlock_irq(shost->host_lock);
2266 
2267     return rv;
2268 }
2269 
2270 struct scsi_host_template csio_fcoe_shost_template = {
2271     .module         = THIS_MODULE,
2272     .name           = CSIO_DRV_DESC,
2273     .proc_name      = KBUILD_MODNAME,
2274     .queuecommand       = csio_queuecommand,
2275     .cmd_size       = sizeof(struct csio_cmd_priv),
2276     .eh_timed_out       = fc_eh_timed_out,
2277     .eh_abort_handler   = csio_eh_abort_handler,
2278     .eh_device_reset_handler = csio_eh_lun_reset_handler,
2279     .slave_alloc        = csio_slave_alloc,
2280     .slave_configure    = csio_slave_configure,
2281     .slave_destroy      = csio_slave_destroy,
2282     .scan_finished      = csio_scan_finished,
2283     .this_id        = -1,
2284     .sg_tablesize       = CSIO_SCSI_MAX_SGE,
2285     .cmd_per_lun        = CSIO_MAX_CMD_PER_LUN,
2286     .shost_groups       = csio_fcoe_lport_groups,
2287     .max_sectors        = CSIO_MAX_SECTOR_SIZE,
2288 };
2289 
2290 struct scsi_host_template csio_fcoe_shost_vport_template = {
2291     .module         = THIS_MODULE,
2292     .name           = CSIO_DRV_DESC,
2293     .proc_name      = KBUILD_MODNAME,
2294     .queuecommand       = csio_queuecommand,
2295     .eh_timed_out       = fc_eh_timed_out,
2296     .eh_abort_handler   = csio_eh_abort_handler,
2297     .eh_device_reset_handler = csio_eh_lun_reset_handler,
2298     .slave_alloc        = csio_slave_alloc,
2299     .slave_configure    = csio_slave_configure,
2300     .slave_destroy      = csio_slave_destroy,
2301     .scan_finished      = csio_scan_finished,
2302     .this_id        = -1,
2303     .sg_tablesize       = CSIO_SCSI_MAX_SGE,
2304     .cmd_per_lun        = CSIO_MAX_CMD_PER_LUN,
2305     .shost_groups       = csio_fcoe_vport_groups,
2306     .max_sectors        = CSIO_MAX_SECTOR_SIZE,
2307 };
2308 
2309 /*
2310  * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
2311  * @scm: SCSI Module
2312  * @hw: HW device.
2313  * @buf_size: buffer size
2314  * @num_buf : Number of buffers.
2315  *
2316  * This routine allocates DMA buffers required for SCSI Data xfer, if
2317  * each SGL buffer for a SCSI Read request posted by SCSI midlayer are
2318  * not virtually contiguous.
2319  */
2320 static int
2321 csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
2322              int buf_size, int num_buf)
2323 {
2324     int n = 0;
2325     struct list_head *tmp;
2326     struct csio_dma_buf *ddp_desc = NULL;
2327     uint32_t unit_size = 0;
2328 
2329     if (!num_buf)
2330         return 0;
2331 
2332     if (!buf_size)
2333         return -EINVAL;
2334 
2335     INIT_LIST_HEAD(&scm->ddp_freelist);
2336 
2337     /* Align buf size to page size */
2338     buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
2339     /* Initialize dma descriptors */
2340     for (n = 0; n < num_buf; n++) {
2341         /* Set unit size to request size */
2342         unit_size = buf_size;
2343         ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
2344         if (!ddp_desc) {
2345             csio_err(hw,
2346                  "Failed to allocate ddp descriptors,"
2347                  " Num allocated = %d.\n",
2348                  scm->stats.n_free_ddp);
2349             goto no_mem;
2350         }
2351 
2352         /* Allocate Dma buffers for DDP */
2353         ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
2354                 &ddp_desc->paddr, GFP_KERNEL);
2355         if (!ddp_desc->vaddr) {
2356             csio_err(hw,
2357                  "SCSI response DMA buffer (ddp) allocation"
2358                  " failed!\n");
2359             kfree(ddp_desc);
2360             goto no_mem;
2361         }
2362 
2363         ddp_desc->len = unit_size;
2364 
2365         /* Added it to scsi ddp freelist */
2366         list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
2367         CSIO_INC_STATS(scm, n_free_ddp);
2368     }
2369 
2370     return 0;
2371 no_mem:
2372     /* release dma descs back to freelist and free dma memory */
2373     list_for_each(tmp, &scm->ddp_freelist) {
2374         ddp_desc = (struct csio_dma_buf *) tmp;
2375         tmp = csio_list_prev(tmp);
2376         dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2377                   ddp_desc->vaddr, ddp_desc->paddr);
2378         list_del_init(&ddp_desc->list);
2379         kfree(ddp_desc);
2380     }
2381     scm->stats.n_free_ddp = 0;
2382 
2383     return -ENOMEM;
2384 }
2385 
2386 /*
2387  * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
2388  * @scm: SCSI Module
2389  * @hw: HW device.
2390  *
2391  * This routine frees ddp buffers.
2392  */
2393 static void
2394 csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
2395 {
2396     struct list_head *tmp;
2397     struct csio_dma_buf *ddp_desc;
2398 
2399     /* release dma descs back to freelist and free dma memory */
2400     list_for_each(tmp, &scm->ddp_freelist) {
2401         ddp_desc = (struct csio_dma_buf *) tmp;
2402         tmp = csio_list_prev(tmp);
2403         dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2404                   ddp_desc->vaddr, ddp_desc->paddr);
2405         list_del_init(&ddp_desc->list);
2406         kfree(ddp_desc);
2407     }
2408     scm->stats.n_free_ddp = 0;
2409 }
2410 
2411 /**
2412  * csio_scsim_init - Initialize SCSI Module
2413  * @scm:    SCSI Module
2414  * @hw:     HW module
2415  *
2416  */
2417 int
2418 csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
2419 {
2420     int i;
2421     struct csio_ioreq *ioreq;
2422     struct csio_dma_buf *dma_buf;
2423 
2424     INIT_LIST_HEAD(&scm->active_q);
2425     scm->hw = hw;
2426 
2427     scm->proto_cmd_len = sizeof(struct fcp_cmnd);
2428     scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
2429     scm->max_sge = CSIO_SCSI_MAX_SGE;
2430 
2431     spin_lock_init(&scm->freelist_lock);
2432 
2433     /* Pre-allocate ioreqs and initialize them */
2434     INIT_LIST_HEAD(&scm->ioreq_freelist);
2435     for (i = 0; i < csio_scsi_ioreqs; i++) {
2436 
2437         ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
2438         if (!ioreq) {
2439             csio_err(hw,
2440                  "I/O request element allocation failed, "
2441                  " Num allocated = %d.\n",
2442                  scm->stats.n_free_ioreq);
2443 
2444             goto free_ioreq;
2445         }
2446 
2447         /* Allocate Dma buffers for Response Payload */
2448         dma_buf = &ioreq->dma_buf;
2449         dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL,
2450                         &dma_buf->paddr);
2451         if (!dma_buf->vaddr) {
2452             csio_err(hw,
2453                  "SCSI response DMA buffer allocation"
2454                  " failed!\n");
2455             kfree(ioreq);
2456             goto free_ioreq;
2457         }
2458 
2459         dma_buf->len = scm->proto_rsp_len;
2460 
2461         /* Set state to uninit */
2462         csio_init_state(&ioreq->sm, csio_scsis_uninit);
2463         INIT_LIST_HEAD(&ioreq->gen_list);
2464         init_completion(&ioreq->cmplobj);
2465 
2466         list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
2467         CSIO_INC_STATS(scm, n_free_ioreq);
2468     }
2469 
2470     if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
2471         goto free_ioreq;
2472 
2473     return 0;
2474 
2475 free_ioreq:
2476     /*
2477      * Free up existing allocations, since an error
2478      * from here means we are returning for good
2479      */
2480     while (!list_empty(&scm->ioreq_freelist)) {
2481         struct csio_sm *tmp;
2482 
2483         tmp = list_first_entry(&scm->ioreq_freelist,
2484                        struct csio_sm, sm_list);
2485         list_del_init(&tmp->sm_list);
2486         ioreq = (struct csio_ioreq *)tmp;
2487 
2488         dma_buf = &ioreq->dma_buf;
2489         dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr,
2490                   dma_buf->paddr);
2491 
2492         kfree(ioreq);
2493     }
2494 
2495     scm->stats.n_free_ioreq = 0;
2496 
2497     return -ENOMEM;
2498 }
2499 
2500 /**
2501  * csio_scsim_exit: Uninitialize SCSI Module
2502  * @scm: SCSI Module
2503  *
2504  */
2505 void
2506 csio_scsim_exit(struct csio_scsim *scm)
2507 {
2508     struct csio_ioreq *ioreq;
2509     struct csio_dma_buf *dma_buf;
2510 
2511     while (!list_empty(&scm->ioreq_freelist)) {
2512         struct csio_sm *tmp;
2513 
2514         tmp = list_first_entry(&scm->ioreq_freelist,
2515                        struct csio_sm, sm_list);
2516         list_del_init(&tmp->sm_list);
2517         ioreq = (struct csio_ioreq *)tmp;
2518 
2519         dma_buf = &ioreq->dma_buf;
2520         dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr,
2521                   dma_buf->paddr);
2522 
2523         kfree(ioreq);
2524     }
2525 
2526     scm->stats.n_free_ioreq = 0;
2527 
2528     csio_scsi_free_ddp_bufs(scm, scm->hw);
2529 }