Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * QLogic Fibre Channel HBA Driver
0004  * Copyright (c)  2003-2014 QLogic Corporation
0005  */
0006 #include "qla_def.h"
0007 #include "qla_target.h"
0008 
0009 #include <linux/blkdev.h>
0010 #include <linux/delay.h>
0011 
0012 #include <scsi/scsi_tcq.h>
0013 
0014 /**
0015  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
0016  * @sp: SCSI command
0017  *
0018  * Returns the proper CF_* direction based on CDB.
0019  */
0020 static inline uint16_t
0021 qla2x00_get_cmd_direction(srb_t *sp)
0022 {
0023     uint16_t cflags;
0024     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
0025     struct scsi_qla_host *vha = sp->vha;
0026 
0027     cflags = 0;
0028 
0029     /* Set transfer direction */
0030     if (cmd->sc_data_direction == DMA_TO_DEVICE) {
0031         cflags = CF_WRITE;
0032         vha->qla_stats.output_bytes += scsi_bufflen(cmd);
0033         vha->qla_stats.output_requests++;
0034     } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
0035         cflags = CF_READ;
0036         vha->qla_stats.input_bytes += scsi_bufflen(cmd);
0037         vha->qla_stats.input_requests++;
0038     }
0039     return (cflags);
0040 }
0041 
0042 /**
0043  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
0044  * Continuation Type 0 IOCBs to allocate.
0045  *
0046  * @dsds: number of data segment descriptors needed
0047  *
0048  * Returns the number of IOCB entries needed to store @dsds.
0049  */
0050 uint16_t
0051 qla2x00_calc_iocbs_32(uint16_t dsds)
0052 {
0053     uint16_t iocbs;
0054 
0055     iocbs = 1;
0056     if (dsds > 3) {
0057         iocbs += (dsds - 3) / 7;
0058         if ((dsds - 3) % 7)
0059             iocbs++;
0060     }
0061     return (iocbs);
0062 }
0063 
0064 /**
0065  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
0066  * Continuation Type 1 IOCBs to allocate.
0067  *
0068  * @dsds: number of data segment descriptors needed
0069  *
0070  * Returns the number of IOCB entries needed to store @dsds.
0071  */
0072 uint16_t
0073 qla2x00_calc_iocbs_64(uint16_t dsds)
0074 {
0075     uint16_t iocbs;
0076 
0077     iocbs = 1;
0078     if (dsds > 2) {
0079         iocbs += (dsds - 2) / 5;
0080         if ((dsds - 2) % 5)
0081             iocbs++;
0082     }
0083     return (iocbs);
0084 }
0085 
0086 /**
0087  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
0088  * @vha: HA context
0089  *
0090  * Returns a pointer to the Continuation Type 0 IOCB packet.
0091  */
0092 static inline cont_entry_t *
0093 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
0094 {
0095     cont_entry_t *cont_pkt;
0096     struct req_que *req = vha->req;
0097     /* Adjust ring index. */
0098     req->ring_index++;
0099     if (req->ring_index == req->length) {
0100         req->ring_index = 0;
0101         req->ring_ptr = req->ring;
0102     } else {
0103         req->ring_ptr++;
0104     }
0105 
0106     cont_pkt = (cont_entry_t *)req->ring_ptr;
0107 
0108     /* Load packet defaults. */
0109     put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
0110 
0111     return (cont_pkt);
0112 }
0113 
0114 /**
0115  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
0116  * @vha: HA context
0117  * @req: request queue
0118  *
0119  * Returns a pointer to the continuation type 1 IOCB packet.
0120  */
0121 cont_a64_entry_t *
0122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
0123 {
0124     cont_a64_entry_t *cont_pkt;
0125 
0126     /* Adjust ring index. */
0127     req->ring_index++;
0128     if (req->ring_index == req->length) {
0129         req->ring_index = 0;
0130         req->ring_ptr = req->ring;
0131     } else {
0132         req->ring_ptr++;
0133     }
0134 
0135     cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
0136 
0137     /* Load packet defaults. */
0138     put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
0139                CONTINUE_A64_TYPE, &cont_pkt->entry_type);
0140 
0141     return (cont_pkt);
0142 }
0143 
0144 inline int
0145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
0146 {
0147     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
0148 
0149     /* We always use DIFF Bundling for best performance */
0150     *fw_prot_opts = 0;
0151 
0152     /* Translate SCSI opcode to a protection opcode */
0153     switch (scsi_get_prot_op(cmd)) {
0154     case SCSI_PROT_READ_STRIP:
0155         *fw_prot_opts |= PO_MODE_DIF_REMOVE;
0156         break;
0157     case SCSI_PROT_WRITE_INSERT:
0158         *fw_prot_opts |= PO_MODE_DIF_INSERT;
0159         break;
0160     case SCSI_PROT_READ_INSERT:
0161         *fw_prot_opts |= PO_MODE_DIF_INSERT;
0162         break;
0163     case SCSI_PROT_WRITE_STRIP:
0164         *fw_prot_opts |= PO_MODE_DIF_REMOVE;
0165         break;
0166     case SCSI_PROT_READ_PASS:
0167     case SCSI_PROT_WRITE_PASS:
0168         if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
0169             *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
0170         else
0171             *fw_prot_opts |= PO_MODE_DIF_PASS;
0172         break;
0173     default:    /* Normal Request */
0174         *fw_prot_opts |= PO_MODE_DIF_PASS;
0175         break;
0176     }
0177 
0178     if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
0179         *fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
0180 
0181     return scsi_prot_sg_count(cmd);
0182 }
0183 
0184 /*
0185  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
0186  * capable IOCB types.
0187  *
0188  * @sp: SRB command to process
0189  * @cmd_pkt: Command type 2 IOCB
0190  * @tot_dsds: Total number of segments to transfer
0191  */
0192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
0193     uint16_t tot_dsds)
0194 {
0195     uint16_t    avail_dsds;
0196     struct dsd32    *cur_dsd;
0197     scsi_qla_host_t *vha;
0198     struct scsi_cmnd *cmd;
0199     struct scatterlist *sg;
0200     int i;
0201 
0202     cmd = GET_CMD_SP(sp);
0203 
0204     /* Update entry type to indicate Command Type 2 IOCB */
0205     put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
0206 
0207     /* No data transfer */
0208     if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
0209         cmd_pkt->byte_count = cpu_to_le32(0);
0210         return;
0211     }
0212 
0213     vha = sp->vha;
0214     cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
0215 
0216     /* Three DSDs are available in the Command Type 2 IOCB */
0217     avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
0218     cur_dsd = cmd_pkt->dsd32;
0219 
0220     /* Load data segments */
0221     scsi_for_each_sg(cmd, sg, tot_dsds, i) {
0222         cont_entry_t *cont_pkt;
0223 
0224         /* Allocate additional continuation packets? */
0225         if (avail_dsds == 0) {
0226             /*
0227              * Seven DSDs are available in the Continuation
0228              * Type 0 IOCB.
0229              */
0230             cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
0231             cur_dsd = cont_pkt->dsd;
0232             avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
0233         }
0234 
0235         append_dsd32(&cur_dsd, sg);
0236         avail_dsds--;
0237     }
0238 }
0239 
0240 /**
0241  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
0242  * capable IOCB types.
0243  *
0244  * @sp: SRB command to process
0245  * @cmd_pkt: Command type 3 IOCB
0246  * @tot_dsds: Total number of segments to transfer
0247  */
0248 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
0249     uint16_t tot_dsds)
0250 {
0251     uint16_t    avail_dsds;
0252     struct dsd64    *cur_dsd;
0253     scsi_qla_host_t *vha;
0254     struct scsi_cmnd *cmd;
0255     struct scatterlist *sg;
0256     int i;
0257 
0258     cmd = GET_CMD_SP(sp);
0259 
0260     /* Update entry type to indicate Command Type 3 IOCB */
0261     put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
0262 
0263     /* No data transfer */
0264     if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
0265         cmd_pkt->byte_count = cpu_to_le32(0);
0266         return;
0267     }
0268 
0269     vha = sp->vha;
0270     cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
0271 
0272     /* Two DSDs are available in the Command Type 3 IOCB */
0273     avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
0274     cur_dsd = cmd_pkt->dsd64;
0275 
0276     /* Load data segments */
0277     scsi_for_each_sg(cmd, sg, tot_dsds, i) {
0278         cont_a64_entry_t *cont_pkt;
0279 
0280         /* Allocate additional continuation packets? */
0281         if (avail_dsds == 0) {
0282             /*
0283              * Five DSDs are available in the Continuation
0284              * Type 1 IOCB.
0285              */
0286             cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
0287             cur_dsd = cont_pkt->dsd;
0288             avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
0289         }
0290 
0291         append_dsd64(&cur_dsd, sg);
0292         avail_dsds--;
0293     }
0294 }
0295 
0296 /*
0297  * Find the first handle that is not in use, starting from
0298  * req->current_outstanding_cmd + 1. The caller must hold the lock that is
0299  * associated with @req.
0300  */
0301 uint32_t qla2xxx_get_next_handle(struct req_que *req)
0302 {
0303     uint32_t index, handle = req->current_outstanding_cmd;
0304 
0305     for (index = 1; index < req->num_outstanding_cmds; index++) {
0306         handle++;
0307         if (handle == req->num_outstanding_cmds)
0308             handle = 1;
0309         if (!req->outstanding_cmds[handle])
0310             return handle;
0311     }
0312 
0313     return 0;
0314 }
0315 
0316 /**
0317  * qla2x00_start_scsi() - Send a SCSI command to the ISP
0318  * @sp: command to send to the ISP
0319  *
0320  * Returns non-zero if a failure occurred, else zero.
0321  */
0322 int
0323 qla2x00_start_scsi(srb_t *sp)
0324 {
0325     int     nseg;
0326     unsigned long   flags;
0327     scsi_qla_host_t *vha;
0328     struct scsi_cmnd *cmd;
0329     uint32_t    *clr_ptr;
0330     uint32_t    handle;
0331     cmd_entry_t *cmd_pkt;
0332     uint16_t    cnt;
0333     uint16_t    req_cnt;
0334     uint16_t    tot_dsds;
0335     struct device_reg_2xxx __iomem *reg;
0336     struct qla_hw_data *ha;
0337     struct req_que *req;
0338     struct rsp_que *rsp;
0339 
0340     /* Setup device pointers. */
0341     vha = sp->vha;
0342     ha = vha->hw;
0343     reg = &ha->iobase->isp;
0344     cmd = GET_CMD_SP(sp);
0345     req = ha->req_q_map[0];
0346     rsp = ha->rsp_q_map[0];
0347     /* So we know we haven't pci_map'ed anything yet */
0348     tot_dsds = 0;
0349 
0350     /* Send marker if required */
0351     if (vha->marker_needed != 0) {
0352         if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
0353             QLA_SUCCESS) {
0354             return (QLA_FUNCTION_FAILED);
0355         }
0356         vha->marker_needed = 0;
0357     }
0358 
0359     /* Acquire ring specific lock */
0360     spin_lock_irqsave(&ha->hardware_lock, flags);
0361 
0362     handle = qla2xxx_get_next_handle(req);
0363     if (handle == 0)
0364         goto queuing_error;
0365 
0366     /* Map the sg table so we have an accurate count of sg entries needed */
0367     if (scsi_sg_count(cmd)) {
0368         nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
0369             scsi_sg_count(cmd), cmd->sc_data_direction);
0370         if (unlikely(!nseg))
0371             goto queuing_error;
0372     } else
0373         nseg = 0;
0374 
0375     tot_dsds = nseg;
0376 
0377     /* Calculate the number of request entries needed. */
0378     req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
0379     if (req->cnt < (req_cnt + 2)) {
0380         cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
0381         if (req->ring_index < cnt)
0382             req->cnt = cnt - req->ring_index;
0383         else
0384             req->cnt = req->length -
0385                 (req->ring_index - cnt);
0386         /* If still no head room then bail out */
0387         if (req->cnt < (req_cnt + 2))
0388             goto queuing_error;
0389     }
0390 
0391     /* Build command packet */
0392     req->current_outstanding_cmd = handle;
0393     req->outstanding_cmds[handle] = sp;
0394     sp->handle = handle;
0395     cmd->host_scribble = (unsigned char *)(unsigned long)handle;
0396     req->cnt -= req_cnt;
0397 
0398     cmd_pkt = (cmd_entry_t *)req->ring_ptr;
0399     cmd_pkt->handle = handle;
0400     /* Zero out remaining portion of packet. */
0401     clr_ptr = (uint32_t *)cmd_pkt + 2;
0402     memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
0403     cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
0404 
0405     /* Set target ID and LUN number*/
0406     SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
0407     cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
0408     cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
0409 
0410     /* Load SCSI command packet. */
0411     memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
0412     cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
0413 
0414     /* Build IOCB segments */
0415     ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
0416 
0417     /* Set total data segment count. */
0418     cmd_pkt->entry_count = (uint8_t)req_cnt;
0419     wmb();
0420 
0421     /* Adjust ring index. */
0422     req->ring_index++;
0423     if (req->ring_index == req->length) {
0424         req->ring_index = 0;
0425         req->ring_ptr = req->ring;
0426     } else
0427         req->ring_ptr++;
0428 
0429     sp->flags |= SRB_DMA_VALID;
0430 
0431     /* Set chip new ring index. */
0432     wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
0433     rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
0434 
0435     /* Manage unprocessed RIO/ZIO commands in response queue. */
0436     if (vha->flags.process_response_queue &&
0437         rsp->ring_ptr->signature != RESPONSE_PROCESSED)
0438         qla2x00_process_response_queue(rsp);
0439 
0440     spin_unlock_irqrestore(&ha->hardware_lock, flags);
0441     return (QLA_SUCCESS);
0442 
0443 queuing_error:
0444     if (tot_dsds)
0445         scsi_dma_unmap(cmd);
0446 
0447     spin_unlock_irqrestore(&ha->hardware_lock, flags);
0448 
0449     return (QLA_FUNCTION_FAILED);
0450 }
0451 
0452 /**
0453  * qla2x00_start_iocbs() - Execute the IOCB command
0454  * @vha: HA context
0455  * @req: request queue
0456  */
0457 void
0458 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
0459 {
0460     struct qla_hw_data *ha = vha->hw;
0461     device_reg_t *reg = ISP_QUE_REG(ha, req->id);
0462 
0463     if (IS_P3P_TYPE(ha)) {
0464         qla82xx_start_iocbs(vha);
0465     } else {
0466         /* Adjust ring index. */
0467         req->ring_index++;
0468         if (req->ring_index == req->length) {
0469             req->ring_index = 0;
0470             req->ring_ptr = req->ring;
0471         } else
0472             req->ring_ptr++;
0473 
0474         /* Set chip new ring index. */
0475         if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
0476             wrt_reg_dword(req->req_q_in, req->ring_index);
0477         } else if (IS_QLA83XX(ha)) {
0478             wrt_reg_dword(req->req_q_in, req->ring_index);
0479             rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
0480         } else if (IS_QLAFX00(ha)) {
0481             wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
0482             rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
0483             QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
0484         } else if (IS_FWI2_CAPABLE(ha)) {
0485             wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
0486             rd_reg_dword_relaxed(&reg->isp24.req_q_in);
0487         } else {
0488             wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
0489                 req->ring_index);
0490             rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
0491         }
0492     }
0493 }
0494 
0495 /**
0496  * __qla2x00_marker() - Send a marker IOCB to the firmware.
0497  * @vha: HA context
0498  * @qpair: queue pair pointer
0499  * @loop_id: loop ID
0500  * @lun: LUN
0501  * @type: marker modifier
0502  *
0503  * Can be called from both normal and interrupt context.
0504  *
0505  * Returns non-zero if a failure occurred, else zero.
0506  */
0507 static int
0508 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
0509     uint16_t loop_id, uint64_t lun, uint8_t type)
0510 {
0511     mrk_entry_t *mrk;
0512     struct mrk_entry_24xx *mrk24 = NULL;
0513     struct req_que *req = qpair->req;
0514     struct qla_hw_data *ha = vha->hw;
0515     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
0516 
0517     mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
0518     if (mrk == NULL) {
0519         ql_log(ql_log_warn, base_vha, 0x3026,
0520             "Failed to allocate Marker IOCB.\n");
0521 
0522         return (QLA_FUNCTION_FAILED);
0523     }
0524 
0525     mrk->entry_type = MARKER_TYPE;
0526     mrk->modifier = type;
0527     if (type != MK_SYNC_ALL) {
0528         if (IS_FWI2_CAPABLE(ha)) {
0529             mrk24 = (struct mrk_entry_24xx *) mrk;
0530             mrk24->nport_handle = cpu_to_le16(loop_id);
0531             int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
0532             host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
0533             mrk24->vp_index = vha->vp_idx;
0534             mrk24->handle = make_handle(req->id, mrk24->handle);
0535         } else {
0536             SET_TARGET_ID(ha, mrk->target, loop_id);
0537             mrk->lun = cpu_to_le16((uint16_t)lun);
0538         }
0539     }
0540     wmb();
0541 
0542     qla2x00_start_iocbs(vha, req);
0543 
0544     return (QLA_SUCCESS);
0545 }
0546 
0547 int
0548 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
0549     uint16_t loop_id, uint64_t lun, uint8_t type)
0550 {
0551     int ret;
0552     unsigned long flags = 0;
0553 
0554     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
0555     ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
0556     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
0557 
0558     return (ret);
0559 }
0560 
0561 /*
0562  * qla2x00_issue_marker
0563  *
0564  * Issue marker
0565  * Caller CAN have hardware lock held as specified by ha_locked parameter.
0566  * Might release it, then reaquire.
0567  */
0568 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
0569 {
0570     if (ha_locked) {
0571         if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
0572                     MK_SYNC_ALL) != QLA_SUCCESS)
0573             return QLA_FUNCTION_FAILED;
0574     } else {
0575         if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
0576                     MK_SYNC_ALL) != QLA_SUCCESS)
0577             return QLA_FUNCTION_FAILED;
0578     }
0579     vha->marker_needed = 0;
0580 
0581     return QLA_SUCCESS;
0582 }
0583 
0584 static inline int
0585 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
0586     uint16_t tot_dsds)
0587 {
0588     struct dsd64 *cur_dsd = NULL, *next_dsd;
0589     scsi_qla_host_t *vha;
0590     struct qla_hw_data *ha;
0591     struct scsi_cmnd *cmd;
0592     struct  scatterlist *cur_seg;
0593     uint8_t avail_dsds;
0594     uint8_t first_iocb = 1;
0595     uint32_t dsd_list_len;
0596     struct dsd_dma *dsd_ptr;
0597     struct ct6_dsd *ctx;
0598     struct qla_qpair *qpair = sp->qpair;
0599 
0600     cmd = GET_CMD_SP(sp);
0601 
0602     /* Update entry type to indicate Command Type 3 IOCB */
0603     put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
0604 
0605     /* No data transfer */
0606     if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
0607         cmd_pkt->byte_count = cpu_to_le32(0);
0608         return 0;
0609     }
0610 
0611     vha = sp->vha;
0612     ha = vha->hw;
0613 
0614     /* Set transfer direction */
0615     if (cmd->sc_data_direction == DMA_TO_DEVICE) {
0616         cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
0617         qpair->counters.output_bytes += scsi_bufflen(cmd);
0618         qpair->counters.output_requests++;
0619     } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
0620         cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
0621         qpair->counters.input_bytes += scsi_bufflen(cmd);
0622         qpair->counters.input_requests++;
0623     }
0624 
0625     cur_seg = scsi_sglist(cmd);
0626     ctx = sp->u.scmd.ct6_ctx;
0627 
0628     while (tot_dsds) {
0629         avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
0630             QLA_DSDS_PER_IOCB : tot_dsds;
0631         tot_dsds -= avail_dsds;
0632         dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
0633 
0634         dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
0635             struct dsd_dma, list);
0636         next_dsd = dsd_ptr->dsd_addr;
0637         list_del(&dsd_ptr->list);
0638         ha->gbl_dsd_avail--;
0639         list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
0640         ctx->dsd_use_cnt++;
0641         ha->gbl_dsd_inuse++;
0642 
0643         if (first_iocb) {
0644             first_iocb = 0;
0645             put_unaligned_le64(dsd_ptr->dsd_list_dma,
0646                        &cmd_pkt->fcp_dsd.address);
0647             cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
0648         } else {
0649             put_unaligned_le64(dsd_ptr->dsd_list_dma,
0650                        &cur_dsd->address);
0651             cur_dsd->length = cpu_to_le32(dsd_list_len);
0652             cur_dsd++;
0653         }
0654         cur_dsd = next_dsd;
0655         while (avail_dsds) {
0656             append_dsd64(&cur_dsd, cur_seg);
0657             cur_seg = sg_next(cur_seg);
0658             avail_dsds--;
0659         }
0660     }
0661 
0662     /* Null termination */
0663     cur_dsd->address = 0;
0664     cur_dsd->length = 0;
0665     cur_dsd++;
0666     cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
0667     return 0;
0668 }
0669 
0670 /*
0671  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
0672  * for Command Type 6.
0673  *
0674  * @dsds: number of data segment descriptors needed
0675  *
0676  * Returns the number of dsd list needed to store @dsds.
0677  */
0678 static inline uint16_t
0679 qla24xx_calc_dsd_lists(uint16_t dsds)
0680 {
0681     uint16_t dsd_lists = 0;
0682 
0683     dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
0684     if (dsds % QLA_DSDS_PER_IOCB)
0685         dsd_lists++;
0686     return dsd_lists;
0687 }
0688 
0689 
0690 /**
0691  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
0692  * IOCB types.
0693  *
0694  * @sp: SRB command to process
0695  * @cmd_pkt: Command type 3 IOCB
0696  * @tot_dsds: Total number of segments to transfer
0697  * @req: pointer to request queue
0698  */
0699 inline void
0700 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
0701     uint16_t tot_dsds, struct req_que *req)
0702 {
0703     uint16_t    avail_dsds;
0704     struct dsd64    *cur_dsd;
0705     scsi_qla_host_t *vha;
0706     struct scsi_cmnd *cmd;
0707     struct scatterlist *sg;
0708     int i;
0709     struct qla_qpair *qpair = sp->qpair;
0710 
0711     cmd = GET_CMD_SP(sp);
0712 
0713     /* Update entry type to indicate Command Type 3 IOCB */
0714     put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
0715 
0716     /* No data transfer */
0717     if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
0718         cmd_pkt->byte_count = cpu_to_le32(0);
0719         return;
0720     }
0721 
0722     vha = sp->vha;
0723 
0724     /* Set transfer direction */
0725     if (cmd->sc_data_direction == DMA_TO_DEVICE) {
0726         cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
0727         qpair->counters.output_bytes += scsi_bufflen(cmd);
0728         qpair->counters.output_requests++;
0729     } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
0730         cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
0731         qpair->counters.input_bytes += scsi_bufflen(cmd);
0732         qpair->counters.input_requests++;
0733     }
0734 
0735     /* One DSD is available in the Command Type 3 IOCB */
0736     avail_dsds = 1;
0737     cur_dsd = &cmd_pkt->dsd;
0738 
0739     /* Load data segments */
0740 
0741     scsi_for_each_sg(cmd, sg, tot_dsds, i) {
0742         cont_a64_entry_t *cont_pkt;
0743 
0744         /* Allocate additional continuation packets? */
0745         if (avail_dsds == 0) {
0746             /*
0747              * Five DSDs are available in the Continuation
0748              * Type 1 IOCB.
0749              */
0750             cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
0751             cur_dsd = cont_pkt->dsd;
0752             avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
0753         }
0754 
0755         append_dsd64(&cur_dsd, sg);
0756         avail_dsds--;
0757     }
0758 }
0759 
0760 struct fw_dif_context {
0761     __le32  ref_tag;
0762     __le16  app_tag;
0763     uint8_t ref_tag_mask[4];    /* Validation/Replacement Mask*/
0764     uint8_t app_tag_mask[2];    /* Validation/Replacement Mask*/
0765 };
0766 
0767 /*
0768  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
0769  *
0770  */
0771 static inline void
0772 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
0773     unsigned int protcnt)
0774 {
0775     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
0776 
0777     pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
0778 
0779     if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
0780         qla2x00_hba_err_chk_enabled(sp)) {
0781         pkt->ref_tag_mask[0] = 0xff;
0782         pkt->ref_tag_mask[1] = 0xff;
0783         pkt->ref_tag_mask[2] = 0xff;
0784         pkt->ref_tag_mask[3] = 0xff;
0785     }
0786 
0787     pkt->app_tag = cpu_to_le16(0);
0788     pkt->app_tag_mask[0] = 0x0;
0789     pkt->app_tag_mask[1] = 0x0;
0790 }
0791 
0792 int
0793 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
0794     uint32_t *partial)
0795 {
0796     struct scatterlist *sg;
0797     uint32_t cumulative_partial, sg_len;
0798     dma_addr_t sg_dma_addr;
0799 
0800     if (sgx->num_bytes == sgx->tot_bytes)
0801         return 0;
0802 
0803     sg = sgx->cur_sg;
0804     cumulative_partial = sgx->tot_partial;
0805 
0806     sg_dma_addr = sg_dma_address(sg);
0807     sg_len = sg_dma_len(sg);
0808 
0809     sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
0810 
0811     if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
0812         sgx->dma_len = (blk_sz - cumulative_partial);
0813         sgx->tot_partial = 0;
0814         sgx->num_bytes += blk_sz;
0815         *partial = 0;
0816     } else {
0817         sgx->dma_len = sg_len - sgx->bytes_consumed;
0818         sgx->tot_partial += sgx->dma_len;
0819         *partial = 1;
0820     }
0821 
0822     sgx->bytes_consumed += sgx->dma_len;
0823 
0824     if (sg_len == sgx->bytes_consumed) {
0825         sg = sg_next(sg);
0826         sgx->num_sg++;
0827         sgx->cur_sg = sg;
0828         sgx->bytes_consumed = 0;
0829     }
0830 
0831     return 1;
0832 }
0833 
0834 int
0835 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
0836     struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
0837 {
0838     void *next_dsd;
0839     uint8_t avail_dsds = 0;
0840     uint32_t dsd_list_len;
0841     struct dsd_dma *dsd_ptr;
0842     struct scatterlist *sg_prot;
0843     struct dsd64 *cur_dsd = dsd;
0844     uint16_t    used_dsds = tot_dsds;
0845     uint32_t    prot_int; /* protection interval */
0846     uint32_t    partial;
0847     struct qla2_sgx sgx;
0848     dma_addr_t  sle_dma;
0849     uint32_t    sle_dma_len, tot_prot_dma_len = 0;
0850     struct scsi_cmnd *cmd;
0851 
0852     memset(&sgx, 0, sizeof(struct qla2_sgx));
0853     if (sp) {
0854         cmd = GET_CMD_SP(sp);
0855         prot_int = scsi_prot_interval(cmd);
0856 
0857         sgx.tot_bytes = scsi_bufflen(cmd);
0858         sgx.cur_sg = scsi_sglist(cmd);
0859         sgx.sp = sp;
0860 
0861         sg_prot = scsi_prot_sglist(cmd);
0862     } else if (tc) {
0863         prot_int      = tc->blk_sz;
0864         sgx.tot_bytes = tc->bufflen;
0865         sgx.cur_sg    = tc->sg;
0866         sg_prot       = tc->prot_sg;
0867     } else {
0868         BUG();
0869         return 1;
0870     }
0871 
0872     while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
0873 
0874         sle_dma = sgx.dma_addr;
0875         sle_dma_len = sgx.dma_len;
0876 alloc_and_fill:
0877         /* Allocate additional continuation packets? */
0878         if (avail_dsds == 0) {
0879             avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
0880                     QLA_DSDS_PER_IOCB : used_dsds;
0881             dsd_list_len = (avail_dsds + 1) * 12;
0882             used_dsds -= avail_dsds;
0883 
0884             /* allocate tracking DS */
0885             dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
0886             if (!dsd_ptr)
0887                 return 1;
0888 
0889             /* allocate new list */
0890             dsd_ptr->dsd_addr = next_dsd =
0891                 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
0892                 &dsd_ptr->dsd_list_dma);
0893 
0894             if (!next_dsd) {
0895                 /*
0896                  * Need to cleanup only this dsd_ptr, rest
0897                  * will be done by sp_free_dma()
0898                  */
0899                 kfree(dsd_ptr);
0900                 return 1;
0901             }
0902 
0903             if (sp) {
0904                 list_add_tail(&dsd_ptr->list,
0905                           &sp->u.scmd.crc_ctx->dsd_list);
0906 
0907                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
0908             } else {
0909                 list_add_tail(&dsd_ptr->list,
0910                     &(tc->ctx->dsd_list));
0911                 *tc->ctx_dsd_alloced = 1;
0912             }
0913 
0914 
0915             /* add new list to cmd iocb or last list */
0916             put_unaligned_le64(dsd_ptr->dsd_list_dma,
0917                        &cur_dsd->address);
0918             cur_dsd->length = cpu_to_le32(dsd_list_len);
0919             cur_dsd = next_dsd;
0920         }
0921         put_unaligned_le64(sle_dma, &cur_dsd->address);
0922         cur_dsd->length = cpu_to_le32(sle_dma_len);
0923         cur_dsd++;
0924         avail_dsds--;
0925 
0926         if (partial == 0) {
0927             /* Got a full protection interval */
0928             sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
0929             sle_dma_len = 8;
0930 
0931             tot_prot_dma_len += sle_dma_len;
0932             if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
0933                 tot_prot_dma_len = 0;
0934                 sg_prot = sg_next(sg_prot);
0935             }
0936 
0937             partial = 1; /* So as to not re-enter this block */
0938             goto alloc_and_fill;
0939         }
0940     }
0941     /* Null termination */
0942     cur_dsd->address = 0;
0943     cur_dsd->length = 0;
0944     cur_dsd++;
0945     return 0;
0946 }
0947 
0948 int
0949 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
0950     struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
0951 {
0952     void *next_dsd;
0953     uint8_t avail_dsds = 0;
0954     uint32_t dsd_list_len;
0955     struct dsd_dma *dsd_ptr;
0956     struct scatterlist *sg, *sgl;
0957     struct dsd64 *cur_dsd = dsd;
0958     int i;
0959     uint16_t    used_dsds = tot_dsds;
0960     struct scsi_cmnd *cmd;
0961 
0962     if (sp) {
0963         cmd = GET_CMD_SP(sp);
0964         sgl = scsi_sglist(cmd);
0965     } else if (tc) {
0966         sgl = tc->sg;
0967     } else {
0968         BUG();
0969         return 1;
0970     }
0971 
0972 
0973     for_each_sg(sgl, sg, tot_dsds, i) {
0974         /* Allocate additional continuation packets? */
0975         if (avail_dsds == 0) {
0976             avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
0977                     QLA_DSDS_PER_IOCB : used_dsds;
0978             dsd_list_len = (avail_dsds + 1) * 12;
0979             used_dsds -= avail_dsds;
0980 
0981             /* allocate tracking DS */
0982             dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
0983             if (!dsd_ptr)
0984                 return 1;
0985 
0986             /* allocate new list */
0987             dsd_ptr->dsd_addr = next_dsd =
0988                 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
0989                 &dsd_ptr->dsd_list_dma);
0990 
0991             if (!next_dsd) {
0992                 /*
0993                  * Need to cleanup only this dsd_ptr, rest
0994                  * will be done by sp_free_dma()
0995                  */
0996                 kfree(dsd_ptr);
0997                 return 1;
0998             }
0999 
1000             if (sp) {
1001                 list_add_tail(&dsd_ptr->list,
1002                           &sp->u.scmd.crc_ctx->dsd_list);
1003 
1004                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1005             } else {
1006                 list_add_tail(&dsd_ptr->list,
1007                     &(tc->ctx->dsd_list));
1008                 *tc->ctx_dsd_alloced = 1;
1009             }
1010 
1011             /* add new list to cmd iocb or last list */
1012             put_unaligned_le64(dsd_ptr->dsd_list_dma,
1013                        &cur_dsd->address);
1014             cur_dsd->length = cpu_to_le32(dsd_list_len);
1015             cur_dsd = next_dsd;
1016         }
1017         append_dsd64(&cur_dsd, sg);
1018         avail_dsds--;
1019 
1020     }
1021     /* Null termination */
1022     cur_dsd->address = 0;
1023     cur_dsd->length = 0;
1024     cur_dsd++;
1025     return 0;
1026 }
1027 
1028 int
1029 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1030     struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1031 {
1032     struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1033     struct scatterlist *sg, *sgl;
1034     struct crc_context *difctx = NULL;
1035     struct scsi_qla_host *vha;
1036     uint dsd_list_len;
1037     uint avail_dsds = 0;
1038     uint used_dsds = tot_dsds;
1039     bool dif_local_dma_alloc = false;
1040     bool direction_to_device = false;
1041     int i;
1042 
1043     if (sp) {
1044         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1045 
1046         sgl = scsi_prot_sglist(cmd);
1047         vha = sp->vha;
1048         difctx = sp->u.scmd.crc_ctx;
1049         direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1050         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1051           "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1052             __func__, cmd, difctx, sp);
1053     } else if (tc) {
1054         vha = tc->vha;
1055         sgl = tc->prot_sg;
1056         difctx = tc->ctx;
1057         direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1058     } else {
1059         BUG();
1060         return 1;
1061     }
1062 
1063     ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1064         "%s: enter (write=%u)\n", __func__, direction_to_device);
1065 
1066     /* if initiator doing write or target doing read */
1067     if (direction_to_device) {
1068         for_each_sg(sgl, sg, tot_dsds, i) {
1069             u64 sle_phys = sg_phys(sg);
1070 
1071             /* If SGE addr + len flips bits in upper 32-bits */
1072             if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1073                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1074                     "%s: page boundary crossing (phys=%llx len=%x)\n",
1075                     __func__, sle_phys, sg->length);
1076 
1077                 if (difctx) {
1078                     ha->dif_bundle_crossed_pages++;
1079                     dif_local_dma_alloc = true;
1080                 } else {
1081                     ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1082                         vha, 0xe022,
1083                         "%s: difctx pointer is NULL\n",
1084                         __func__);
1085                 }
1086                 break;
1087             }
1088         }
1089         ha->dif_bundle_writes++;
1090     } else {
1091         ha->dif_bundle_reads++;
1092     }
1093 
1094     if (ql2xdifbundlinginternalbuffers)
1095         dif_local_dma_alloc = direction_to_device;
1096 
1097     if (dif_local_dma_alloc) {
1098         u32 track_difbundl_buf = 0;
1099         u32 ldma_sg_len = 0;
1100         u8 ldma_needed = 1;
1101 
1102         difctx->no_dif_bundl = 0;
1103         difctx->dif_bundl_len = 0;
1104 
1105         /* Track DSD buffers */
1106         INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1107         /* Track local DMA buffers */
1108         INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1109 
1110         for_each_sg(sgl, sg, tot_dsds, i) {
1111             u32 sglen = sg_dma_len(sg);
1112 
1113             ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1114                 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1115                 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1116                 difctx->dif_bundl_len, ldma_needed);
1117 
1118             while (sglen) {
1119                 u32 xfrlen = 0;
1120 
1121                 if (ldma_needed) {
1122                     /*
1123                      * Allocate list item to store
1124                      * the DMA buffers
1125                      */
1126                     dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1127                         GFP_ATOMIC);
1128                     if (!dsd_ptr) {
1129                         ql_dbg(ql_dbg_tgt, vha, 0xe024,
1130                             "%s: failed alloc dsd_ptr\n",
1131                             __func__);
1132                         return 1;
1133                     }
1134                     ha->dif_bundle_kallocs++;
1135 
1136                     /* allocate dma buffer */
1137                     dsd_ptr->dsd_addr = dma_pool_alloc
1138                         (ha->dif_bundl_pool, GFP_ATOMIC,
1139                          &dsd_ptr->dsd_list_dma);
1140                     if (!dsd_ptr->dsd_addr) {
1141                         ql_dbg(ql_dbg_tgt, vha, 0xe024,
1142                             "%s: failed alloc ->dsd_ptr\n",
1143                             __func__);
1144                         /*
1145                          * need to cleanup only this
1146                          * dsd_ptr rest will be done
1147                          * by sp_free_dma()
1148                          */
1149                         kfree(dsd_ptr);
1150                         ha->dif_bundle_kallocs--;
1151                         return 1;
1152                     }
1153                     ha->dif_bundle_dma_allocs++;
1154                     ldma_needed = 0;
1155                     difctx->no_dif_bundl++;
1156                     list_add_tail(&dsd_ptr->list,
1157                         &difctx->ldif_dma_hndl_list);
1158                 }
1159 
1160                 /* xfrlen is min of dma pool size and sglen */
1161                 xfrlen = (sglen >
1162                    (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1163                     DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1164                     sglen;
1165 
1166                 /* replace with local allocated dma buffer */
1167                 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1168                     dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1169                     difctx->dif_bundl_len);
1170                 difctx->dif_bundl_len += xfrlen;
1171                 sglen -= xfrlen;
1172                 ldma_sg_len += xfrlen;
1173                 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1174                     sg_is_last(sg)) {
1175                     ldma_needed = 1;
1176                     ldma_sg_len = 0;
1177                 }
1178             }
1179         }
1180 
1181         track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1182         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1183             "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1184             difctx->dif_bundl_len, difctx->no_dif_bundl,
1185             track_difbundl_buf);
1186 
1187         if (sp)
1188             sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1189         else
1190             tc->prot_flags = DIF_BUNDL_DMA_VALID;
1191 
1192         list_for_each_entry_safe(dif_dsd, nxt_dsd,
1193             &difctx->ldif_dma_hndl_list, list) {
1194             u32 sglen = (difctx->dif_bundl_len >
1195                 DIF_BUNDLING_DMA_POOL_SIZE) ?
1196                 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1197 
1198             BUG_ON(track_difbundl_buf == 0);
1199 
1200             /* Allocate additional continuation packets? */
1201             if (avail_dsds == 0) {
1202                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1203                     0xe024,
1204                     "%s: adding continuation iocb's\n",
1205                     __func__);
1206                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1207                     QLA_DSDS_PER_IOCB : used_dsds;
1208                 dsd_list_len = (avail_dsds + 1) * 12;
1209                 used_dsds -= avail_dsds;
1210 
1211                 /* allocate tracking DS */
1212                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1213                 if (!dsd_ptr) {
1214                     ql_dbg(ql_dbg_tgt, vha, 0xe026,
1215                         "%s: failed alloc dsd_ptr\n",
1216                         __func__);
1217                     return 1;
1218                 }
1219                 ha->dif_bundle_kallocs++;
1220 
1221                 difctx->no_ldif_dsd++;
1222                 /* allocate new list */
1223                 dsd_ptr->dsd_addr =
1224                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1225                     &dsd_ptr->dsd_list_dma);
1226                 if (!dsd_ptr->dsd_addr) {
1227                     ql_dbg(ql_dbg_tgt, vha, 0xe026,
1228                         "%s: failed alloc ->dsd_addr\n",
1229                         __func__);
1230                     /*
1231                      * need to cleanup only this dsd_ptr
1232                      *  rest will be done by sp_free_dma()
1233                      */
1234                     kfree(dsd_ptr);
1235                     ha->dif_bundle_kallocs--;
1236                     return 1;
1237                 }
1238                 ha->dif_bundle_dma_allocs++;
1239 
1240                 if (sp) {
1241                     list_add_tail(&dsd_ptr->list,
1242                         &difctx->ldif_dsd_list);
1243                     sp->flags |= SRB_CRC_CTX_DSD_VALID;
1244                 } else {
1245                     list_add_tail(&dsd_ptr->list,
1246                         &difctx->ldif_dsd_list);
1247                     tc->ctx_dsd_alloced = 1;
1248                 }
1249 
1250                 /* add new list to cmd iocb or last list */
1251                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1252                            &cur_dsd->address);
1253                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1254                 cur_dsd = dsd_ptr->dsd_addr;
1255             }
1256             put_unaligned_le64(dif_dsd->dsd_list_dma,
1257                        &cur_dsd->address);
1258             cur_dsd->length = cpu_to_le32(sglen);
1259             cur_dsd++;
1260             avail_dsds--;
1261             difctx->dif_bundl_len -= sglen;
1262             track_difbundl_buf--;
1263         }
1264 
1265         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1266             "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1267             difctx->no_ldif_dsd, difctx->no_dif_bundl);
1268     } else {
1269         for_each_sg(sgl, sg, tot_dsds, i) {
1270             /* Allocate additional continuation packets? */
1271             if (avail_dsds == 0) {
1272                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1273                     QLA_DSDS_PER_IOCB : used_dsds;
1274                 dsd_list_len = (avail_dsds + 1) * 12;
1275                 used_dsds -= avail_dsds;
1276 
1277                 /* allocate tracking DS */
1278                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1279                 if (!dsd_ptr) {
1280                     ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1281                         vha, 0xe027,
1282                         "%s: failed alloc dsd_dma...\n",
1283                         __func__);
1284                     return 1;
1285                 }
1286 
1287                 /* allocate new list */
1288                 dsd_ptr->dsd_addr =
1289                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1290                     &dsd_ptr->dsd_list_dma);
1291                 if (!dsd_ptr->dsd_addr) {
1292                     /* need to cleanup only this dsd_ptr */
1293                     /* rest will be done by sp_free_dma() */
1294                     kfree(dsd_ptr);
1295                     return 1;
1296                 }
1297 
1298                 if (sp) {
1299                     list_add_tail(&dsd_ptr->list,
1300                         &difctx->dsd_list);
1301                     sp->flags |= SRB_CRC_CTX_DSD_VALID;
1302                 } else {
1303                     list_add_tail(&dsd_ptr->list,
1304                         &difctx->dsd_list);
1305                     tc->ctx_dsd_alloced = 1;
1306                 }
1307 
1308                 /* add new list to cmd iocb or last list */
1309                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1310                            &cur_dsd->address);
1311                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1312                 cur_dsd = dsd_ptr->dsd_addr;
1313             }
1314             append_dsd64(&cur_dsd, sg);
1315             avail_dsds--;
1316         }
1317     }
1318     /* Null termination */
1319     cur_dsd->address = 0;
1320     cur_dsd->length = 0;
1321     cur_dsd++;
1322     return 0;
1323 }
1324 
1325 /**
1326  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1327  *                          Type 6 IOCB types.
1328  *
1329  * @sp: SRB command to process
1330  * @cmd_pkt: Command type 3 IOCB
1331  * @tot_dsds: Total number of segments to transfer
1332  * @tot_prot_dsds: Total number of segments with protection information
1333  * @fw_prot_opts: Protection options to be passed to firmware
1334  */
1335 static inline int
1336 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1337     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1338 {
1339     struct dsd64        *cur_dsd;
1340     __be32          *fcp_dl;
1341     scsi_qla_host_t     *vha;
1342     struct scsi_cmnd    *cmd;
1343     uint32_t        total_bytes = 0;
1344     uint32_t        data_bytes;
1345     uint32_t        dif_bytes;
1346     uint8_t         bundling = 1;
1347     uint16_t        blk_size;
1348     struct crc_context  *crc_ctx_pkt = NULL;
1349     struct qla_hw_data  *ha;
1350     uint8_t         additional_fcpcdb_len;
1351     uint16_t        fcp_cmnd_len;
1352     struct fcp_cmnd     *fcp_cmnd;
1353     dma_addr_t      crc_ctx_dma;
1354 
1355     cmd = GET_CMD_SP(sp);
1356 
1357     /* Update entry type to indicate Command Type CRC_2 IOCB */
1358     put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1359 
1360     vha = sp->vha;
1361     ha = vha->hw;
1362 
1363     /* No data transfer */
1364     data_bytes = scsi_bufflen(cmd);
1365     if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1366         cmd_pkt->byte_count = cpu_to_le32(0);
1367         return QLA_SUCCESS;
1368     }
1369 
1370     cmd_pkt->vp_index = sp->vha->vp_idx;
1371 
1372     /* Set transfer direction */
1373     if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1374         cmd_pkt->control_flags =
1375             cpu_to_le16(CF_WRITE_DATA);
1376     } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1377         cmd_pkt->control_flags =
1378             cpu_to_le16(CF_READ_DATA);
1379     }
1380 
1381     if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1382         (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1383         (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1384         (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1385         bundling = 0;
1386 
1387     /* Allocate CRC context from global pool */
1388     crc_ctx_pkt = sp->u.scmd.crc_ctx =
1389         dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1390 
1391     if (!crc_ctx_pkt)
1392         goto crc_queuing_error;
1393 
1394     crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1395 
1396     sp->flags |= SRB_CRC_CTX_DMA_VALID;
1397 
1398     /* Set handle */
1399     crc_ctx_pkt->handle = cmd_pkt->handle;
1400 
1401     INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1402 
1403     qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1404         &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1405 
1406     put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1407     cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1408 
1409     /* Determine SCSI command length -- align to 4 byte boundary */
1410     if (cmd->cmd_len > 16) {
1411         additional_fcpcdb_len = cmd->cmd_len - 16;
1412         if ((cmd->cmd_len % 4) != 0) {
1413             /* SCSI cmd > 16 bytes must be multiple of 4 */
1414             goto crc_queuing_error;
1415         }
1416         fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1417     } else {
1418         additional_fcpcdb_len = 0;
1419         fcp_cmnd_len = 12 + 16 + 4;
1420     }
1421 
1422     fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1423 
1424     fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1425     if (cmd->sc_data_direction == DMA_TO_DEVICE)
1426         fcp_cmnd->additional_cdb_len |= 1;
1427     else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1428         fcp_cmnd->additional_cdb_len |= 2;
1429 
1430     int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1431     memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1432     cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1433     put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1434                &cmd_pkt->fcp_cmnd_dseg_address);
1435     fcp_cmnd->task_management = 0;
1436     fcp_cmnd->task_attribute = TSK_SIMPLE;
1437 
1438     cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1439 
1440     /* Compute dif len and adjust data len to incude protection */
1441     dif_bytes = 0;
1442     blk_size = cmd->device->sector_size;
1443     dif_bytes = (data_bytes / blk_size) * 8;
1444 
1445     switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1446     case SCSI_PROT_READ_INSERT:
1447     case SCSI_PROT_WRITE_STRIP:
1448         total_bytes = data_bytes;
1449         data_bytes += dif_bytes;
1450         break;
1451 
1452     case SCSI_PROT_READ_STRIP:
1453     case SCSI_PROT_WRITE_INSERT:
1454     case SCSI_PROT_READ_PASS:
1455     case SCSI_PROT_WRITE_PASS:
1456         total_bytes = data_bytes + dif_bytes;
1457         break;
1458     default:
1459         BUG();
1460     }
1461 
1462     if (!qla2x00_hba_err_chk_enabled(sp))
1463         fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1464     /* HBA error checking enabled */
1465     else if (IS_PI_UNINIT_CAPABLE(ha)) {
1466         if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1467             || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1468             SCSI_PROT_DIF_TYPE2))
1469             fw_prot_opts |= BIT_10;
1470         else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1471             SCSI_PROT_DIF_TYPE3)
1472             fw_prot_opts |= BIT_11;
1473     }
1474 
1475     if (!bundling) {
1476         cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1477     } else {
1478         /*
1479          * Configure Bundling if we need to fetch interlaving
1480          * protection PCI accesses
1481          */
1482         fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1483         crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1484         crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1485                             tot_prot_dsds);
1486         cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1487     }
1488 
1489     /* Finish the common fields of CRC pkt */
1490     crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1491     crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1492     crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1493     crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1494     /* Fibre channel byte count */
1495     cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1496     fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1497         additional_fcpcdb_len);
1498     *fcp_dl = htonl(total_bytes);
1499 
1500     if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1501         cmd_pkt->byte_count = cpu_to_le32(0);
1502         return QLA_SUCCESS;
1503     }
1504     /* Walks data segments */
1505 
1506     cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1507 
1508     if (!bundling && tot_prot_dsds) {
1509         if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1510             cur_dsd, tot_dsds, NULL))
1511             goto crc_queuing_error;
1512     } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1513             (tot_dsds - tot_prot_dsds), NULL))
1514         goto crc_queuing_error;
1515 
1516     if (bundling && tot_prot_dsds) {
1517         /* Walks dif segments */
1518         cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1519         cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1520         if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1521                 tot_prot_dsds, NULL))
1522             goto crc_queuing_error;
1523     }
1524     return QLA_SUCCESS;
1525 
1526 crc_queuing_error:
1527     /* Cleanup will be performed by the caller */
1528 
1529     return QLA_FUNCTION_FAILED;
1530 }
1531 
1532 /**
1533  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1534  * @sp: command to send to the ISP
1535  *
1536  * Returns non-zero if a failure occurred, else zero.
1537  */
1538 int
1539 qla24xx_start_scsi(srb_t *sp)
1540 {
1541     int     nseg;
1542     unsigned long   flags;
1543     uint32_t    *clr_ptr;
1544     uint32_t    handle;
1545     struct cmd_type_7 *cmd_pkt;
1546     uint16_t    cnt;
1547     uint16_t    req_cnt;
1548     uint16_t    tot_dsds;
1549     struct req_que *req = NULL;
1550     struct rsp_que *rsp;
1551     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1552     struct scsi_qla_host *vha = sp->vha;
1553     struct qla_hw_data *ha = vha->hw;
1554 
1555     if (sp->fcport->edif.enable  && (sp->fcport->flags & FCF_FCSP_DEVICE))
1556         return qla28xx_start_scsi_edif(sp);
1557 
1558     /* Setup device pointers. */
1559     req = vha->req;
1560     rsp = req->rsp;
1561 
1562     /* So we know we haven't pci_map'ed anything yet */
1563     tot_dsds = 0;
1564 
1565     /* Send marker if required */
1566     if (vha->marker_needed != 0) {
1567         if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1568             QLA_SUCCESS)
1569             return QLA_FUNCTION_FAILED;
1570         vha->marker_needed = 0;
1571     }
1572 
1573     /* Acquire ring specific lock */
1574     spin_lock_irqsave(&ha->hardware_lock, flags);
1575 
1576     handle = qla2xxx_get_next_handle(req);
1577     if (handle == 0)
1578         goto queuing_error;
1579 
1580     /* Map the sg table so we have an accurate count of sg entries needed */
1581     if (scsi_sg_count(cmd)) {
1582         nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1583             scsi_sg_count(cmd), cmd->sc_data_direction);
1584         if (unlikely(!nseg))
1585             goto queuing_error;
1586     } else
1587         nseg = 0;
1588 
1589     tot_dsds = nseg;
1590     req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1591 
1592     sp->iores.res_type = RESOURCE_INI;
1593     sp->iores.iocb_cnt = req_cnt;
1594     if (qla_get_iocbs(sp->qpair, &sp->iores))
1595         goto queuing_error;
1596 
1597     if (req->cnt < (req_cnt + 2)) {
1598         if (IS_SHADOW_REG_CAPABLE(ha)) {
1599             cnt = *req->out_ptr;
1600         } else {
1601             cnt = rd_reg_dword_relaxed(req->req_q_out);
1602             if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1603                 goto queuing_error;
1604         }
1605 
1606         if (req->ring_index < cnt)
1607             req->cnt = cnt - req->ring_index;
1608         else
1609             req->cnt = req->length -
1610                 (req->ring_index - cnt);
1611         if (req->cnt < (req_cnt + 2))
1612             goto queuing_error;
1613     }
1614 
1615     /* Build command packet. */
1616     req->current_outstanding_cmd = handle;
1617     req->outstanding_cmds[handle] = sp;
1618     sp->handle = handle;
1619     cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1620     req->cnt -= req_cnt;
1621 
1622     cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1623     cmd_pkt->handle = make_handle(req->id, handle);
1624 
1625     /* Zero out remaining portion of packet. */
1626     /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1627     clr_ptr = (uint32_t *)cmd_pkt + 2;
1628     memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1629     cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1630 
1631     /* Set NPORT-ID and LUN number*/
1632     cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1633     cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1634     cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1635     cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1636     cmd_pkt->vp_index = sp->vha->vp_idx;
1637 
1638     int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1639     host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1640 
1641     cmd_pkt->task = TSK_SIMPLE;
1642 
1643     /* Load SCSI command packet. */
1644     memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1645     host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1646 
1647     cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1648 
1649     /* Build IOCB segments */
1650     qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1651 
1652     /* Set total data segment count. */
1653     cmd_pkt->entry_count = (uint8_t)req_cnt;
1654     wmb();
1655     /* Adjust ring index. */
1656     req->ring_index++;
1657     if (req->ring_index == req->length) {
1658         req->ring_index = 0;
1659         req->ring_ptr = req->ring;
1660     } else
1661         req->ring_ptr++;
1662 
1663     sp->qpair->cmd_cnt++;
1664     sp->flags |= SRB_DMA_VALID;
1665 
1666     /* Set chip new ring index. */
1667     wrt_reg_dword(req->req_q_in, req->ring_index);
1668 
1669     /* Manage unprocessed RIO/ZIO commands in response queue. */
1670     if (vha->flags.process_response_queue &&
1671         rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1672         qla24xx_process_response_queue(vha, rsp);
1673 
1674     spin_unlock_irqrestore(&ha->hardware_lock, flags);
1675     return QLA_SUCCESS;
1676 
1677 queuing_error:
1678     if (tot_dsds)
1679         scsi_dma_unmap(cmd);
1680 
1681     qla_put_iocbs(sp->qpair, &sp->iores);
1682     spin_unlock_irqrestore(&ha->hardware_lock, flags);
1683 
1684     return QLA_FUNCTION_FAILED;
1685 }
1686 
1687 /**
1688  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1689  * @sp: command to send to the ISP
1690  *
1691  * Returns non-zero if a failure occurred, else zero.
1692  */
1693 int
1694 qla24xx_dif_start_scsi(srb_t *sp)
1695 {
1696     int         nseg;
1697     unsigned long       flags;
1698     uint32_t        *clr_ptr;
1699     uint32_t        handle;
1700     uint16_t        cnt;
1701     uint16_t        req_cnt = 0;
1702     uint16_t        tot_dsds;
1703     uint16_t        tot_prot_dsds;
1704     uint16_t        fw_prot_opts = 0;
1705     struct req_que      *req = NULL;
1706     struct rsp_que      *rsp = NULL;
1707     struct scsi_cmnd    *cmd = GET_CMD_SP(sp);
1708     struct scsi_qla_host    *vha = sp->vha;
1709     struct qla_hw_data  *ha = vha->hw;
1710     struct cmd_type_crc_2   *cmd_pkt;
1711     uint32_t        status = 0;
1712 
1713 #define QDSS_GOT_Q_SPACE    BIT_0
1714 
1715     /* Only process protection or >16 cdb in this routine */
1716     if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1717         if (cmd->cmd_len <= 16)
1718             return qla24xx_start_scsi(sp);
1719     }
1720 
1721     /* Setup device pointers. */
1722     req = vha->req;
1723     rsp = req->rsp;
1724 
1725     /* So we know we haven't pci_map'ed anything yet */
1726     tot_dsds = 0;
1727 
1728     /* Send marker if required */
1729     if (vha->marker_needed != 0) {
1730         if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1731             QLA_SUCCESS)
1732             return QLA_FUNCTION_FAILED;
1733         vha->marker_needed = 0;
1734     }
1735 
1736     /* Acquire ring specific lock */
1737     spin_lock_irqsave(&ha->hardware_lock, flags);
1738 
1739     handle = qla2xxx_get_next_handle(req);
1740     if (handle == 0)
1741         goto queuing_error;
1742 
1743     /* Compute number of required data segments */
1744     /* Map the sg table so we have an accurate count of sg entries needed */
1745     if (scsi_sg_count(cmd)) {
1746         nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1747             scsi_sg_count(cmd), cmd->sc_data_direction);
1748         if (unlikely(!nseg))
1749             goto queuing_error;
1750         else
1751             sp->flags |= SRB_DMA_VALID;
1752 
1753         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1754             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1755             struct qla2_sgx sgx;
1756             uint32_t    partial;
1757 
1758             memset(&sgx, 0, sizeof(struct qla2_sgx));
1759             sgx.tot_bytes = scsi_bufflen(cmd);
1760             sgx.cur_sg = scsi_sglist(cmd);
1761             sgx.sp = sp;
1762 
1763             nseg = 0;
1764             while (qla24xx_get_one_block_sg(
1765                 cmd->device->sector_size, &sgx, &partial))
1766                 nseg++;
1767         }
1768     } else
1769         nseg = 0;
1770 
1771     /* number of required data segments */
1772     tot_dsds = nseg;
1773 
1774     /* Compute number of required protection segments */
1775     if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1776         nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1777             scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1778         if (unlikely(!nseg))
1779             goto queuing_error;
1780         else
1781             sp->flags |= SRB_CRC_PROT_DMA_VALID;
1782 
1783         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1784             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1785             nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1786         }
1787     } else {
1788         nseg = 0;
1789     }
1790 
1791     req_cnt = 1;
1792     /* Total Data and protection sg segment(s) */
1793     tot_prot_dsds = nseg;
1794     tot_dsds += nseg;
1795 
1796     sp->iores.res_type = RESOURCE_INI;
1797     sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1798     if (qla_get_iocbs(sp->qpair, &sp->iores))
1799         goto queuing_error;
1800 
1801     if (req->cnt < (req_cnt + 2)) {
1802         if (IS_SHADOW_REG_CAPABLE(ha)) {
1803             cnt = *req->out_ptr;
1804         } else {
1805             cnt = rd_reg_dword_relaxed(req->req_q_out);
1806             if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1807                 goto queuing_error;
1808         }
1809         if (req->ring_index < cnt)
1810             req->cnt = cnt - req->ring_index;
1811         else
1812             req->cnt = req->length -
1813                 (req->ring_index - cnt);
1814         if (req->cnt < (req_cnt + 2))
1815             goto queuing_error;
1816     }
1817 
1818     status |= QDSS_GOT_Q_SPACE;
1819 
1820     /* Build header part of command packet (excluding the OPCODE). */
1821     req->current_outstanding_cmd = handle;
1822     req->outstanding_cmds[handle] = sp;
1823     sp->handle = handle;
1824     cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1825     req->cnt -= req_cnt;
1826 
1827     /* Fill-in common area */
1828     cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1829     cmd_pkt->handle = make_handle(req->id, handle);
1830 
1831     clr_ptr = (uint32_t *)cmd_pkt + 2;
1832     memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1833 
1834     /* Set NPORT-ID and LUN number*/
1835     cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1836     cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1837     cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1838     cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1839 
1840     int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1841     host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1842 
1843     /* Total Data and protection segment(s) */
1844     cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1845 
1846     /* Build IOCB segments and adjust for data protection segments */
1847     if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1848         req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1849         QLA_SUCCESS)
1850         goto queuing_error;
1851 
1852     cmd_pkt->entry_count = (uint8_t)req_cnt;
1853     /* Specify response queue number where completion should happen */
1854     cmd_pkt->entry_status = (uint8_t) rsp->id;
1855     cmd_pkt->timeout = cpu_to_le16(0);
1856     wmb();
1857 
1858     /* Adjust ring index. */
1859     req->ring_index++;
1860     if (req->ring_index == req->length) {
1861         req->ring_index = 0;
1862         req->ring_ptr = req->ring;
1863     } else
1864         req->ring_ptr++;
1865 
1866     sp->qpair->cmd_cnt++;
1867     /* Set chip new ring index. */
1868     wrt_reg_dword(req->req_q_in, req->ring_index);
1869 
1870     /* Manage unprocessed RIO/ZIO commands in response queue. */
1871     if (vha->flags.process_response_queue &&
1872         rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1873         qla24xx_process_response_queue(vha, rsp);
1874 
1875     spin_unlock_irqrestore(&ha->hardware_lock, flags);
1876 
1877     return QLA_SUCCESS;
1878 
1879 queuing_error:
1880     if (status & QDSS_GOT_Q_SPACE) {
1881         req->outstanding_cmds[handle] = NULL;
1882         req->cnt += req_cnt;
1883     }
1884     /* Cleanup will be performed by the caller (queuecommand) */
1885 
1886     qla_put_iocbs(sp->qpair, &sp->iores);
1887     spin_unlock_irqrestore(&ha->hardware_lock, flags);
1888 
1889     return QLA_FUNCTION_FAILED;
1890 }
1891 
1892 /**
1893  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1894  * @sp: command to send to the ISP
1895  *
1896  * Returns non-zero if a failure occurred, else zero.
1897  */
1898 static int
1899 qla2xxx_start_scsi_mq(srb_t *sp)
1900 {
1901     int     nseg;
1902     unsigned long   flags;
1903     uint32_t    *clr_ptr;
1904     uint32_t    handle;
1905     struct cmd_type_7 *cmd_pkt;
1906     uint16_t    cnt;
1907     uint16_t    req_cnt;
1908     uint16_t    tot_dsds;
1909     struct req_que *req = NULL;
1910     struct rsp_que *rsp;
1911     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1912     struct scsi_qla_host *vha = sp->fcport->vha;
1913     struct qla_hw_data *ha = vha->hw;
1914     struct qla_qpair *qpair = sp->qpair;
1915 
1916     if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
1917         return qla28xx_start_scsi_edif(sp);
1918 
1919     /* Acquire qpair specific lock */
1920     spin_lock_irqsave(&qpair->qp_lock, flags);
1921 
1922     /* Setup qpair pointers */
1923     req = qpair->req;
1924     rsp = qpair->rsp;
1925 
1926     /* So we know we haven't pci_map'ed anything yet */
1927     tot_dsds = 0;
1928 
1929     /* Send marker if required */
1930     if (vha->marker_needed != 0) {
1931         if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1932             QLA_SUCCESS) {
1933             spin_unlock_irqrestore(&qpair->qp_lock, flags);
1934             return QLA_FUNCTION_FAILED;
1935         }
1936         vha->marker_needed = 0;
1937     }
1938 
1939     handle = qla2xxx_get_next_handle(req);
1940     if (handle == 0)
1941         goto queuing_error;
1942 
1943     /* Map the sg table so we have an accurate count of sg entries needed */
1944     if (scsi_sg_count(cmd)) {
1945         nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1946             scsi_sg_count(cmd), cmd->sc_data_direction);
1947         if (unlikely(!nseg))
1948             goto queuing_error;
1949     } else
1950         nseg = 0;
1951 
1952     tot_dsds = nseg;
1953     req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1954 
1955     sp->iores.res_type = RESOURCE_INI;
1956     sp->iores.iocb_cnt = req_cnt;
1957     if (qla_get_iocbs(sp->qpair, &sp->iores))
1958         goto queuing_error;
1959 
1960     if (req->cnt < (req_cnt + 2)) {
1961         if (IS_SHADOW_REG_CAPABLE(ha)) {
1962             cnt = *req->out_ptr;
1963         } else {
1964             cnt = rd_reg_dword_relaxed(req->req_q_out);
1965             if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1966                 goto queuing_error;
1967         }
1968 
1969         if (req->ring_index < cnt)
1970             req->cnt = cnt - req->ring_index;
1971         else
1972             req->cnt = req->length -
1973                 (req->ring_index - cnt);
1974         if (req->cnt < (req_cnt + 2))
1975             goto queuing_error;
1976     }
1977 
1978     /* Build command packet. */
1979     req->current_outstanding_cmd = handle;
1980     req->outstanding_cmds[handle] = sp;
1981     sp->handle = handle;
1982     cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1983     req->cnt -= req_cnt;
1984 
1985     cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1986     cmd_pkt->handle = make_handle(req->id, handle);
1987 
1988     /* Zero out remaining portion of packet. */
1989     /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1990     clr_ptr = (uint32_t *)cmd_pkt + 2;
1991     memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1992     cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1993 
1994     /* Set NPORT-ID and LUN number*/
1995     cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1996     cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1997     cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1998     cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1999     cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2000 
2001     int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2002     host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2003 
2004     cmd_pkt->task = TSK_SIMPLE;
2005 
2006     /* Load SCSI command packet. */
2007     memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2008     host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2009 
2010     cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2011 
2012     /* Build IOCB segments */
2013     qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2014 
2015     /* Set total data segment count. */
2016     cmd_pkt->entry_count = (uint8_t)req_cnt;
2017     wmb();
2018     /* Adjust ring index. */
2019     req->ring_index++;
2020     if (req->ring_index == req->length) {
2021         req->ring_index = 0;
2022         req->ring_ptr = req->ring;
2023     } else
2024         req->ring_ptr++;
2025 
2026     sp->qpair->cmd_cnt++;
2027     sp->flags |= SRB_DMA_VALID;
2028 
2029     /* Set chip new ring index. */
2030     wrt_reg_dword(req->req_q_in, req->ring_index);
2031 
2032     /* Manage unprocessed RIO/ZIO commands in response queue. */
2033     if (vha->flags.process_response_queue &&
2034         rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2035         qla24xx_process_response_queue(vha, rsp);
2036 
2037     spin_unlock_irqrestore(&qpair->qp_lock, flags);
2038     return QLA_SUCCESS;
2039 
2040 queuing_error:
2041     if (tot_dsds)
2042         scsi_dma_unmap(cmd);
2043 
2044     qla_put_iocbs(sp->qpair, &sp->iores);
2045     spin_unlock_irqrestore(&qpair->qp_lock, flags);
2046 
2047     return QLA_FUNCTION_FAILED;
2048 }
2049 
2050 
2051 /**
2052  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2053  * @sp: command to send to the ISP
2054  *
2055  * Returns non-zero if a failure occurred, else zero.
2056  */
2057 int
2058 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2059 {
2060     int         nseg;
2061     unsigned long       flags;
2062     uint32_t        *clr_ptr;
2063     uint32_t        handle;
2064     uint16_t        cnt;
2065     uint16_t        req_cnt = 0;
2066     uint16_t        tot_dsds;
2067     uint16_t        tot_prot_dsds;
2068     uint16_t        fw_prot_opts = 0;
2069     struct req_que      *req = NULL;
2070     struct rsp_que      *rsp = NULL;
2071     struct scsi_cmnd    *cmd = GET_CMD_SP(sp);
2072     struct scsi_qla_host    *vha = sp->fcport->vha;
2073     struct qla_hw_data  *ha = vha->hw;
2074     struct cmd_type_crc_2   *cmd_pkt;
2075     uint32_t        status = 0;
2076     struct qla_qpair    *qpair = sp->qpair;
2077 
2078 #define QDSS_GOT_Q_SPACE    BIT_0
2079 
2080     /* Check for host side state */
2081     if (!qpair->online) {
2082         cmd->result = DID_NO_CONNECT << 16;
2083         return QLA_INTERFACE_ERROR;
2084     }
2085 
2086     if (!qpair->difdix_supported &&
2087         scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2088         cmd->result = DID_NO_CONNECT << 16;
2089         return QLA_INTERFACE_ERROR;
2090     }
2091 
2092     /* Only process protection or >16 cdb in this routine */
2093     if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2094         if (cmd->cmd_len <= 16)
2095             return qla2xxx_start_scsi_mq(sp);
2096     }
2097 
2098     spin_lock_irqsave(&qpair->qp_lock, flags);
2099 
2100     /* Setup qpair pointers */
2101     rsp = qpair->rsp;
2102     req = qpair->req;
2103 
2104     /* So we know we haven't pci_map'ed anything yet */
2105     tot_dsds = 0;
2106 
2107     /* Send marker if required */
2108     if (vha->marker_needed != 0) {
2109         if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2110             QLA_SUCCESS) {
2111             spin_unlock_irqrestore(&qpair->qp_lock, flags);
2112             return QLA_FUNCTION_FAILED;
2113         }
2114         vha->marker_needed = 0;
2115     }
2116 
2117     handle = qla2xxx_get_next_handle(req);
2118     if (handle == 0)
2119         goto queuing_error;
2120 
2121     /* Compute number of required data segments */
2122     /* Map the sg table so we have an accurate count of sg entries needed */
2123     if (scsi_sg_count(cmd)) {
2124         nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2125             scsi_sg_count(cmd), cmd->sc_data_direction);
2126         if (unlikely(!nseg))
2127             goto queuing_error;
2128         else
2129             sp->flags |= SRB_DMA_VALID;
2130 
2131         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2132             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2133             struct qla2_sgx sgx;
2134             uint32_t    partial;
2135 
2136             memset(&sgx, 0, sizeof(struct qla2_sgx));
2137             sgx.tot_bytes = scsi_bufflen(cmd);
2138             sgx.cur_sg = scsi_sglist(cmd);
2139             sgx.sp = sp;
2140 
2141             nseg = 0;
2142             while (qla24xx_get_one_block_sg(
2143                 cmd->device->sector_size, &sgx, &partial))
2144                 nseg++;
2145         }
2146     } else
2147         nseg = 0;
2148 
2149     /* number of required data segments */
2150     tot_dsds = nseg;
2151 
2152     /* Compute number of required protection segments */
2153     if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2154         nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2155             scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2156         if (unlikely(!nseg))
2157             goto queuing_error;
2158         else
2159             sp->flags |= SRB_CRC_PROT_DMA_VALID;
2160 
2161         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2162             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2163             nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2164         }
2165     } else {
2166         nseg = 0;
2167     }
2168 
2169     req_cnt = 1;
2170     /* Total Data and protection sg segment(s) */
2171     tot_prot_dsds = nseg;
2172     tot_dsds += nseg;
2173 
2174     sp->iores.res_type = RESOURCE_INI;
2175     sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2176     if (qla_get_iocbs(sp->qpair, &sp->iores))
2177         goto queuing_error;
2178 
2179     if (req->cnt < (req_cnt + 2)) {
2180         if (IS_SHADOW_REG_CAPABLE(ha)) {
2181             cnt = *req->out_ptr;
2182         } else {
2183             cnt = rd_reg_dword_relaxed(req->req_q_out);
2184             if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2185                 goto queuing_error;
2186         }
2187 
2188         if (req->ring_index < cnt)
2189             req->cnt = cnt - req->ring_index;
2190         else
2191             req->cnt = req->length -
2192                 (req->ring_index - cnt);
2193         if (req->cnt < (req_cnt + 2))
2194             goto queuing_error;
2195     }
2196 
2197     status |= QDSS_GOT_Q_SPACE;
2198 
2199     /* Build header part of command packet (excluding the OPCODE). */
2200     req->current_outstanding_cmd = handle;
2201     req->outstanding_cmds[handle] = sp;
2202     sp->handle = handle;
2203     cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2204     req->cnt -= req_cnt;
2205 
2206     /* Fill-in common area */
2207     cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2208     cmd_pkt->handle = make_handle(req->id, handle);
2209 
2210     clr_ptr = (uint32_t *)cmd_pkt + 2;
2211     memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2212 
2213     /* Set NPORT-ID and LUN number*/
2214     cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2215     cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2216     cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2217     cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2218 
2219     int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2220     host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2221 
2222     /* Total Data and protection segment(s) */
2223     cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2224 
2225     /* Build IOCB segments and adjust for data protection segments */
2226     if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2227         req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2228         QLA_SUCCESS)
2229         goto queuing_error;
2230 
2231     cmd_pkt->entry_count = (uint8_t)req_cnt;
2232     cmd_pkt->timeout = cpu_to_le16(0);
2233     wmb();
2234 
2235     /* Adjust ring index. */
2236     req->ring_index++;
2237     if (req->ring_index == req->length) {
2238         req->ring_index = 0;
2239         req->ring_ptr = req->ring;
2240     } else
2241         req->ring_ptr++;
2242 
2243     sp->qpair->cmd_cnt++;
2244     /* Set chip new ring index. */
2245     wrt_reg_dword(req->req_q_in, req->ring_index);
2246 
2247     /* Manage unprocessed RIO/ZIO commands in response queue. */
2248     if (vha->flags.process_response_queue &&
2249         rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2250         qla24xx_process_response_queue(vha, rsp);
2251 
2252     spin_unlock_irqrestore(&qpair->qp_lock, flags);
2253 
2254     return QLA_SUCCESS;
2255 
2256 queuing_error:
2257     if (status & QDSS_GOT_Q_SPACE) {
2258         req->outstanding_cmds[handle] = NULL;
2259         req->cnt += req_cnt;
2260     }
2261     /* Cleanup will be performed by the caller (queuecommand) */
2262 
2263     qla_put_iocbs(sp->qpair, &sp->iores);
2264     spin_unlock_irqrestore(&qpair->qp_lock, flags);
2265 
2266     return QLA_FUNCTION_FAILED;
2267 }
2268 
2269 /* Generic Control-SRB manipulation functions. */
2270 
2271 /* hardware_lock assumed to be held. */
2272 
2273 void *
2274 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2275 {
2276     scsi_qla_host_t *vha = qpair->vha;
2277     struct qla_hw_data *ha = vha->hw;
2278     struct req_que *req = qpair->req;
2279     device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2280     uint32_t handle;
2281     request_t *pkt;
2282     uint16_t cnt, req_cnt;
2283 
2284     pkt = NULL;
2285     req_cnt = 1;
2286     handle = 0;
2287 
2288     if (sp && (sp->type != SRB_SCSI_CMD)) {
2289         /* Adjust entry-counts as needed. */
2290         req_cnt = sp->iocbs;
2291     }
2292 
2293     /* Check for room on request queue. */
2294     if (req->cnt < req_cnt + 2) {
2295         if (qpair->use_shadow_reg)
2296             cnt = *req->out_ptr;
2297         else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2298             IS_QLA28XX(ha))
2299             cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
2300         else if (IS_P3P_TYPE(ha))
2301             cnt = rd_reg_dword(reg->isp82.req_q_out);
2302         else if (IS_FWI2_CAPABLE(ha))
2303             cnt = rd_reg_dword(&reg->isp24.req_q_out);
2304         else if (IS_QLAFX00(ha))
2305             cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
2306         else
2307             cnt = qla2x00_debounce_register(
2308                 ISP_REQ_Q_OUT(ha, &reg->isp));
2309 
2310         if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
2311             qla_schedule_eeh_work(vha);
2312             return NULL;
2313         }
2314 
2315         if  (req->ring_index < cnt)
2316             req->cnt = cnt - req->ring_index;
2317         else
2318             req->cnt = req->length -
2319                 (req->ring_index - cnt);
2320     }
2321     if (req->cnt < req_cnt + 2)
2322         goto queuing_error;
2323 
2324     if (sp) {
2325         handle = qla2xxx_get_next_handle(req);
2326         if (handle == 0) {
2327             ql_log(ql_log_warn, vha, 0x700b,
2328                 "No room on outstanding cmd array.\n");
2329             goto queuing_error;
2330         }
2331 
2332         /* Prep command array. */
2333         req->current_outstanding_cmd = handle;
2334         req->outstanding_cmds[handle] = sp;
2335         sp->handle = handle;
2336     }
2337 
2338     /* Prep packet */
2339     req->cnt -= req_cnt;
2340     pkt = req->ring_ptr;
2341     memset(pkt, 0, REQUEST_ENTRY_SIZE);
2342     if (IS_QLAFX00(ha)) {
2343         wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2344         wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2345     } else {
2346         pkt->entry_count = req_cnt;
2347         pkt->handle = handle;
2348     }
2349 
2350     return pkt;
2351 
2352 queuing_error:
2353     qpair->tgt_counters.num_alloc_iocb_failed++;
2354     return pkt;
2355 }
2356 
2357 void *
2358 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2359 {
2360     scsi_qla_host_t *vha = qpair->vha;
2361 
2362     if (qla2x00_reset_active(vha))
2363         return NULL;
2364 
2365     return __qla2x00_alloc_iocbs(qpair, sp);
2366 }
2367 
2368 void *
2369 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2370 {
2371     return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2372 }
2373 
2374 static void
2375 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2376 {
2377     struct srb_iocb *lio = &sp->u.iocb_cmd;
2378 
2379     logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2380     logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2381     if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2382         logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2383         if (sp->vha->flags.nvme_first_burst)
2384             logio->io_parameter[0] =
2385                 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2386         if (sp->vha->flags.nvme2_enabled) {
2387             /* Set service parameter BIT_7 for NVME CONF support */
2388             logio->io_parameter[0] |=
2389                 cpu_to_le32(NVME_PRLI_SP_CONF);
2390             /* Set service parameter BIT_8 for SLER support */
2391             logio->io_parameter[0] |=
2392                 cpu_to_le32(NVME_PRLI_SP_SLER);
2393             /* Set service parameter BIT_9 for PI control support */
2394             logio->io_parameter[0] |=
2395                 cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2396         }
2397     }
2398 
2399     logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2400     logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2401     logio->port_id[1] = sp->fcport->d_id.b.area;
2402     logio->port_id[2] = sp->fcport->d_id.b.domain;
2403     logio->vp_index = sp->vha->vp_idx;
2404 }
2405 
2406 static void
2407 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2408 {
2409     struct srb_iocb *lio = &sp->u.iocb_cmd;
2410 
2411     logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2412     logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2413 
2414     if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2415         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2416     } else {
2417         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2418         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2419             logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2420         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2421             logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2422         if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
2423             logio->control_flags |=
2424                 cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
2425             logio->io_parameter[0] =
2426                 cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
2427         }
2428     }
2429     logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2430     logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2431     logio->port_id[1] = sp->fcport->d_id.b.area;
2432     logio->port_id[2] = sp->fcport->d_id.b.domain;
2433     logio->vp_index = sp->vha->vp_idx;
2434 }
2435 
2436 static void
2437 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2438 {
2439     struct qla_hw_data *ha = sp->vha->hw;
2440     struct srb_iocb *lio = &sp->u.iocb_cmd;
2441     uint16_t opts;
2442 
2443     mbx->entry_type = MBX_IOCB_TYPE;
2444     SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2445     mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2446     opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2447     opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2448     if (HAS_EXTENDED_IDS(ha)) {
2449         mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2450         mbx->mb10 = cpu_to_le16(opts);
2451     } else {
2452         mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2453     }
2454     mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2455     mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2456         sp->fcport->d_id.b.al_pa);
2457     mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2458 }
2459 
2460 static void
2461 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2462 {
2463     u16 control_flags = LCF_COMMAND_LOGO;
2464     logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2465 
2466     if (sp->fcport->explicit_logout) {
2467         control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2468     } else {
2469         control_flags |= LCF_IMPL_LOGO;
2470 
2471         if (!sp->fcport->keep_nport_handle)
2472             control_flags |= LCF_FREE_NPORT;
2473     }
2474 
2475     logio->control_flags = cpu_to_le16(control_flags);
2476     logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2477     logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2478     logio->port_id[1] = sp->fcport->d_id.b.area;
2479     logio->port_id[2] = sp->fcport->d_id.b.domain;
2480     logio->vp_index = sp->vha->vp_idx;
2481 }
2482 
2483 static void
2484 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2485 {
2486     struct qla_hw_data *ha = sp->vha->hw;
2487 
2488     mbx->entry_type = MBX_IOCB_TYPE;
2489     SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2490     mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2491     mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2492         cpu_to_le16(sp->fcport->loop_id) :
2493         cpu_to_le16(sp->fcport->loop_id << 8);
2494     mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2495     mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2496         sp->fcport->d_id.b.al_pa);
2497     mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2498     /* Implicit: mbx->mbx10 = 0. */
2499 }
2500 
2501 static void
2502 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2503 {
2504     logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2505     logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2506     logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2507     logio->vp_index = sp->vha->vp_idx;
2508 }
2509 
2510 static void
2511 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2512 {
2513     struct qla_hw_data *ha = sp->vha->hw;
2514 
2515     mbx->entry_type = MBX_IOCB_TYPE;
2516     SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2517     mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2518     if (HAS_EXTENDED_IDS(ha)) {
2519         mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2520         mbx->mb10 = cpu_to_le16(BIT_0);
2521     } else {
2522         mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2523     }
2524     mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2525     mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2526     mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2527     mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2528     mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2529 }
2530 
2531 static void
2532 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2533 {
2534     uint32_t flags;
2535     uint64_t lun;
2536     struct fc_port *fcport = sp->fcport;
2537     scsi_qla_host_t *vha = fcport->vha;
2538     struct qla_hw_data *ha = vha->hw;
2539     struct srb_iocb *iocb = &sp->u.iocb_cmd;
2540     struct req_que *req = vha->req;
2541 
2542     flags = iocb->u.tmf.flags;
2543     lun = iocb->u.tmf.lun;
2544 
2545     tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2546     tsk->entry_count = 1;
2547     tsk->handle = make_handle(req->id, tsk->handle);
2548     tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2549     tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2550     tsk->control_flags = cpu_to_le32(flags);
2551     tsk->port_id[0] = fcport->d_id.b.al_pa;
2552     tsk->port_id[1] = fcport->d_id.b.area;
2553     tsk->port_id[2] = fcport->d_id.b.domain;
2554     tsk->vp_index = fcport->vha->vp_idx;
2555 
2556     if (flags == TCF_LUN_RESET) {
2557         int_to_scsilun(lun, &tsk->lun);
2558         host_to_fcp_swap((uint8_t *)&tsk->lun,
2559             sizeof(tsk->lun));
2560     }
2561 }
2562 
2563 static void
2564 qla2x00_async_done(struct srb *sp, int res)
2565 {
2566     if (del_timer(&sp->u.iocb_cmd.timer)) {
2567         /*
2568          * Successfully cancelled the timeout handler
2569          * ref: TMR
2570          */
2571         if (kref_put(&sp->cmd_kref, qla2x00_sp_release))
2572             return;
2573     }
2574     sp->async_done(sp, res);
2575 }
2576 
2577 void
2578 qla2x00_sp_release(struct kref *kref)
2579 {
2580     struct srb *sp = container_of(kref, struct srb, cmd_kref);
2581 
2582     sp->free(sp);
2583 }
2584 
2585 void
2586 qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
2587              void (*done)(struct srb *sp, int res))
2588 {
2589     timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2590     sp->done = qla2x00_async_done;
2591     sp->async_done = done;
2592     sp->free = qla2x00_sp_free;
2593     sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
2594     sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2595     if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2596         init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2597     sp->start_timer = 1;
2598 }
2599 
2600 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2601 {
2602     struct srb_iocb *elsio = &sp->u.iocb_cmd;
2603 
2604     kfree(sp->fcport);
2605 
2606     if (elsio->u.els_logo.els_logo_pyld)
2607         dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2608             elsio->u.els_logo.els_logo_pyld,
2609             elsio->u.els_logo.els_logo_pyld_dma);
2610 
2611     del_timer(&elsio->timer);
2612     qla2x00_rel_sp(sp);
2613 }
2614 
2615 static void
2616 qla2x00_els_dcmd_iocb_timeout(void *data)
2617 {
2618     srb_t *sp = data;
2619     fc_port_t *fcport = sp->fcport;
2620     struct scsi_qla_host *vha = sp->vha;
2621     struct srb_iocb *lio = &sp->u.iocb_cmd;
2622     unsigned long flags = 0;
2623     int res, h;
2624 
2625     ql_dbg(ql_dbg_io, vha, 0x3069,
2626         "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2627         sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2628         fcport->d_id.b.al_pa);
2629 
2630     /* Abort the exchange */
2631     res = qla24xx_async_abort_cmd(sp, false);
2632     if (res) {
2633         ql_dbg(ql_dbg_io, vha, 0x3070,
2634             "mbx abort_command failed.\n");
2635         spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2636         for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2637             if (sp->qpair->req->outstanding_cmds[h] == sp) {
2638                 sp->qpair->req->outstanding_cmds[h] = NULL;
2639                 break;
2640             }
2641         }
2642         spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2643         complete(&lio->u.els_logo.comp);
2644     } else {
2645         ql_dbg(ql_dbg_io, vha, 0x3071,
2646             "mbx abort_command success.\n");
2647     }
2648 }
2649 
2650 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2651 {
2652     fc_port_t *fcport = sp->fcport;
2653     struct srb_iocb *lio = &sp->u.iocb_cmd;
2654     struct scsi_qla_host *vha = sp->vha;
2655 
2656     ql_dbg(ql_dbg_io, vha, 0x3072,
2657         "%s hdl=%x, portid=%02x%02x%02x done\n",
2658         sp->name, sp->handle, fcport->d_id.b.domain,
2659         fcport->d_id.b.area, fcport->d_id.b.al_pa);
2660 
2661     complete(&lio->u.els_logo.comp);
2662 }
2663 
2664 int
2665 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2666     port_id_t remote_did)
2667 {
2668     srb_t *sp;
2669     fc_port_t *fcport = NULL;
2670     struct srb_iocb *elsio = NULL;
2671     struct qla_hw_data *ha = vha->hw;
2672     struct els_logo_payload logo_pyld;
2673     int rval = QLA_SUCCESS;
2674 
2675     fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2676     if (!fcport) {
2677            ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2678            return -ENOMEM;
2679     }
2680 
2681     /* Alloc SRB structure
2682      * ref: INIT
2683      */
2684     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2685     if (!sp) {
2686         kfree(fcport);
2687         ql_log(ql_log_info, vha, 0x70e6,
2688          "SRB allocation failed\n");
2689         return -ENOMEM;
2690     }
2691 
2692     elsio = &sp->u.iocb_cmd;
2693     fcport->loop_id = 0xFFFF;
2694     fcport->d_id.b.domain = remote_did.b.domain;
2695     fcport->d_id.b.area = remote_did.b.area;
2696     fcport->d_id.b.al_pa = remote_did.b.al_pa;
2697 
2698     ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2699         fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2700 
2701     sp->type = SRB_ELS_DCMD;
2702     sp->name = "ELS_DCMD";
2703     sp->fcport = fcport;
2704     qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT,
2705                   qla2x00_els_dcmd_sp_done);
2706     sp->free = qla2x00_els_dcmd_sp_free;
2707     sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout;
2708     init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2709 
2710     elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2711                 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2712                 GFP_KERNEL);
2713 
2714     if (!elsio->u.els_logo.els_logo_pyld) {
2715         /* ref: INIT */
2716         kref_put(&sp->cmd_kref, qla2x00_sp_release);
2717         return QLA_FUNCTION_FAILED;
2718     }
2719 
2720     memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2721 
2722     elsio->u.els_logo.els_cmd = els_opcode;
2723     logo_pyld.opcode = els_opcode;
2724     logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2725     logo_pyld.s_id[1] = vha->d_id.b.area;
2726     logo_pyld.s_id[2] = vha->d_id.b.domain;
2727     host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2728     memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2729 
2730     memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2731         sizeof(struct els_logo_payload));
2732     ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2733     ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2734                elsio->u.els_logo.els_logo_pyld,
2735                sizeof(*elsio->u.els_logo.els_logo_pyld));
2736 
2737     rval = qla2x00_start_sp(sp);
2738     if (rval != QLA_SUCCESS) {
2739         /* ref: INIT */
2740         kref_put(&sp->cmd_kref, qla2x00_sp_release);
2741         return QLA_FUNCTION_FAILED;
2742     }
2743 
2744     ql_dbg(ql_dbg_io, vha, 0x3074,
2745         "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2746         sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2747         fcport->d_id.b.area, fcport->d_id.b.al_pa);
2748 
2749     wait_for_completion(&elsio->u.els_logo.comp);
2750 
2751     /* ref: INIT */
2752     kref_put(&sp->cmd_kref, qla2x00_sp_release);
2753     return rval;
2754 }
2755 
2756 static void
2757 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2758 {
2759     scsi_qla_host_t *vha = sp->vha;
2760     struct srb_iocb *elsio = &sp->u.iocb_cmd;
2761 
2762     els_iocb->entry_type = ELS_IOCB_TYPE;
2763     els_iocb->entry_count = 1;
2764     els_iocb->sys_define = 0;
2765     els_iocb->entry_status = 0;
2766     els_iocb->handle = sp->handle;
2767     els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2768     els_iocb->tx_dsd_count = cpu_to_le16(1);
2769     els_iocb->vp_index = vha->vp_idx;
2770     els_iocb->sof_type = EST_SOFI3;
2771     els_iocb->rx_dsd_count = 0;
2772     els_iocb->opcode = elsio->u.els_logo.els_cmd;
2773 
2774     els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2775     els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2776     els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2777     /* For SID the byte order is different than DID */
2778     els_iocb->s_id[1] = vha->d_id.b.al_pa;
2779     els_iocb->s_id[2] = vha->d_id.b.area;
2780     els_iocb->s_id[0] = vha->d_id.b.domain;
2781 
2782     if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2783         if (vha->hw->flags.edif_enabled)
2784             els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN);
2785         else
2786             els_iocb->control_flags = 0;
2787         els_iocb->tx_byte_count = els_iocb->tx_len =
2788             cpu_to_le32(sizeof(struct els_plogi_payload));
2789         put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2790                    &els_iocb->tx_address);
2791         els_iocb->rx_dsd_count = cpu_to_le16(1);
2792         els_iocb->rx_byte_count = els_iocb->rx_len =
2793             cpu_to_le32(sizeof(struct els_plogi_payload));
2794         put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2795                    &els_iocb->rx_address);
2796 
2797         ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2798             "PLOGI ELS IOCB:\n");
2799         ql_dump_buffer(ql_log_info, vha, 0x0109,
2800             (uint8_t *)els_iocb,
2801             sizeof(*els_iocb));
2802     } else {
2803         els_iocb->tx_byte_count =
2804             cpu_to_le32(sizeof(struct els_logo_payload));
2805         put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2806                    &els_iocb->tx_address);
2807         els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2808 
2809         els_iocb->rx_byte_count = 0;
2810         els_iocb->rx_address = 0;
2811         els_iocb->rx_len = 0;
2812         ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2813                "LOGO ELS IOCB:");
2814         ql_dump_buffer(ql_log_info, vha, 0x010b,
2815                    els_iocb,
2816                    sizeof(*els_iocb));
2817     }
2818 
2819     sp->vha->qla_stats.control_requests++;
2820 }
2821 
2822 void
2823 qla2x00_els_dcmd2_iocb_timeout(void *data)
2824 {
2825     srb_t *sp = data;
2826     fc_port_t *fcport = sp->fcport;
2827     struct scsi_qla_host *vha = sp->vha;
2828     unsigned long flags = 0;
2829     int res, h;
2830 
2831     ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2832         "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2833         sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2834 
2835     /* Abort the exchange */
2836     res = qla24xx_async_abort_cmd(sp, false);
2837     ql_dbg(ql_dbg_io, vha, 0x3070,
2838         "mbx abort_command %s\n",
2839         (res == QLA_SUCCESS) ? "successful" : "failed");
2840     if (res) {
2841         spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2842         for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2843             if (sp->qpair->req->outstanding_cmds[h] == sp) {
2844                 sp->qpair->req->outstanding_cmds[h] = NULL;
2845                 break;
2846             }
2847         }
2848         spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2849         sp->done(sp, QLA_FUNCTION_TIMEOUT);
2850     }
2851 }
2852 
2853 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2854 {
2855     if (els_plogi->els_plogi_pyld)
2856         dma_free_coherent(&vha->hw->pdev->dev,
2857                   els_plogi->tx_size,
2858                   els_plogi->els_plogi_pyld,
2859                   els_plogi->els_plogi_pyld_dma);
2860 
2861     if (els_plogi->els_resp_pyld)
2862         dma_free_coherent(&vha->hw->pdev->dev,
2863                   els_plogi->rx_size,
2864                   els_plogi->els_resp_pyld,
2865                   els_plogi->els_resp_pyld_dma);
2866 }
2867 
2868 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2869 {
2870     fc_port_t *fcport = sp->fcport;
2871     struct srb_iocb *lio = &sp->u.iocb_cmd;
2872     struct scsi_qla_host *vha = sp->vha;
2873     struct event_arg ea;
2874     struct qla_work_evt *e;
2875     struct fc_port *conflict_fcport;
2876     port_id_t cid;  /* conflict Nport id */
2877     const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2878     u16 lid;
2879 
2880     ql_dbg(ql_dbg_disc, vha, 0x3072,
2881         "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2882         sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2883 
2884     fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2885     /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
2886     fcport->logout_on_delete = 1;
2887     fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2888 
2889     if (sp->flags & SRB_WAKEUP_ON_COMP)
2890         complete(&lio->u.els_plogi.comp);
2891     else {
2892         switch (le32_to_cpu(fw_status[0])) {
2893         case CS_DATA_UNDERRUN:
2894         case CS_COMPLETE:
2895             memset(&ea, 0, sizeof(ea));
2896             ea.fcport = fcport;
2897             ea.rc = res;
2898             qla_handle_els_plogi_done(vha, &ea);
2899             break;
2900 
2901         case CS_IOCB_ERROR:
2902             switch (le32_to_cpu(fw_status[1])) {
2903             case LSC_SCODE_PORTID_USED:
2904                 lid = le32_to_cpu(fw_status[2]) & 0xffff;
2905                 qlt_find_sess_invalidate_other(vha,
2906                     wwn_to_u64(fcport->port_name),
2907                     fcport->d_id, lid, &conflict_fcport);
2908                 if (conflict_fcport) {
2909                     /*
2910                      * Another fcport shares the same
2911                      * loop_id & nport id; conflict
2912                      * fcport needs to finish cleanup
2913                      * before this fcport can proceed
2914                      * to login.
2915                      */
2916                     conflict_fcport->conflict = fcport;
2917                     fcport->login_pause = 1;
2918                     ql_dbg(ql_dbg_disc, vha, 0x20ed,
2919                         "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2920                         __func__, __LINE__,
2921                         fcport->port_name,
2922                         fcport->d_id.b24, lid);
2923                 } else {
2924                     ql_dbg(ql_dbg_disc, vha, 0x20ed,
2925                         "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2926                         __func__, __LINE__,
2927                         fcport->port_name,
2928                         fcport->d_id.b24, lid);
2929                     qla2x00_clear_loop_id(fcport);
2930                     set_bit(lid, vha->hw->loop_id_map);
2931                     fcport->loop_id = lid;
2932                     fcport->keep_nport_handle = 0;
2933                     qlt_schedule_sess_for_deletion(fcport);
2934                 }
2935                 break;
2936 
2937             case LSC_SCODE_NPORT_USED:
2938                 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2939                     & 0xff;
2940                 cid.b.area   = (le32_to_cpu(fw_status[2]) >>  8)
2941                     & 0xff;
2942                 cid.b.al_pa  = le32_to_cpu(fw_status[2]) & 0xff;
2943                 cid.b.rsvd_1 = 0;
2944 
2945                 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2946                     "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2947                     __func__, __LINE__, fcport->port_name,
2948                     fcport->loop_id, cid.b24);
2949                 set_bit(fcport->loop_id,
2950                     vha->hw->loop_id_map);
2951                 fcport->loop_id = FC_NO_LOOP_ID;
2952                 qla24xx_post_gnl_work(vha, fcport);
2953                 break;
2954 
2955             case LSC_SCODE_NOXCB:
2956                 vha->hw->exch_starvation++;
2957                 if (vha->hw->exch_starvation > 5) {
2958                     ql_log(ql_log_warn, vha, 0xd046,
2959                         "Exchange starvation. Resetting RISC\n");
2960                     vha->hw->exch_starvation = 0;
2961                     set_bit(ISP_ABORT_NEEDED,
2962                         &vha->dpc_flags);
2963                     qla2xxx_wake_dpc(vha);
2964                     break;
2965                 }
2966                 fallthrough;
2967             default:
2968                 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2969                     "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2970                     __func__, sp->fcport->port_name,
2971                     fw_status[0], fw_status[1], fw_status[2]);
2972 
2973                 fcport->flags &= ~FCF_ASYNC_SENT;
2974                 qlt_schedule_sess_for_deletion(fcport);
2975                 break;
2976             }
2977             break;
2978 
2979         default:
2980             ql_dbg(ql_dbg_disc, vha, 0x20eb,
2981                 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2982                 __func__, sp->fcport->port_name,
2983                 fw_status[0], fw_status[1], fw_status[2]);
2984 
2985             sp->fcport->flags &= ~FCF_ASYNC_SENT;
2986             qlt_schedule_sess_for_deletion(fcport);
2987             break;
2988         }
2989 
2990         e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2991         if (!e) {
2992             struct srb_iocb *elsio = &sp->u.iocb_cmd;
2993 
2994             qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2995             /* ref: INIT */
2996             kref_put(&sp->cmd_kref, qla2x00_sp_release);
2997             return;
2998         }
2999         e->u.iosb.sp = sp;
3000         qla2x00_post_work(vha, e);
3001     }
3002 }
3003 
3004 int
3005 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
3006     fc_port_t *fcport, bool wait)
3007 {
3008     srb_t *sp;
3009     struct srb_iocb *elsio = NULL;
3010     struct qla_hw_data *ha = vha->hw;
3011     int rval = QLA_SUCCESS;
3012     void    *ptr, *resp_ptr;
3013 
3014     /* Alloc SRB structure
3015      * ref: INIT
3016      */
3017     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3018     if (!sp) {
3019         ql_log(ql_log_info, vha, 0x70e6,
3020          "SRB allocation failed\n");
3021         fcport->flags &= ~FCF_ASYNC_ACTIVE;
3022         return -ENOMEM;
3023     }
3024 
3025     fcport->flags |= FCF_ASYNC_SENT;
3026     qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
3027     elsio = &sp->u.iocb_cmd;
3028     ql_dbg(ql_dbg_io, vha, 0x3073,
3029            "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
3030 
3031     if (wait)
3032         sp->flags = SRB_WAKEUP_ON_COMP;
3033 
3034     sp->type = SRB_ELS_DCMD;
3035     sp->name = "ELS_DCMD";
3036     sp->fcport = fcport;
3037     qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2,
3038                  qla2x00_els_dcmd2_sp_done);
3039     sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
3040 
3041     elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
3042 
3043     ptr = elsio->u.els_plogi.els_plogi_pyld =
3044         dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
3045         &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
3046 
3047     if (!elsio->u.els_plogi.els_plogi_pyld) {
3048         rval = QLA_FUNCTION_FAILED;
3049         goto out;
3050     }
3051 
3052     resp_ptr = elsio->u.els_plogi.els_resp_pyld =
3053         dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3054         &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
3055 
3056     if (!elsio->u.els_plogi.els_resp_pyld) {
3057         rval = QLA_FUNCTION_FAILED;
3058         goto out;
3059     }
3060 
3061     ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
3062 
3063     memset(ptr, 0, sizeof(struct els_plogi_payload));
3064     memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
3065     memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
3066         &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
3067 
3068     elsio->u.els_plogi.els_cmd = els_opcode;
3069     elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
3070 
3071     if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
3072         struct fc_els_flogi *p = ptr;
3073 
3074         p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
3075     }
3076 
3077     ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3078     ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3079         (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3080         sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3081 
3082     init_completion(&elsio->u.els_plogi.comp);
3083     rval = qla2x00_start_sp(sp);
3084     if (rval != QLA_SUCCESS) {
3085         rval = QLA_FUNCTION_FAILED;
3086     } else {
3087         ql_dbg(ql_dbg_disc, vha, 0x3074,
3088             "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3089             sp->name, sp->handle, fcport->loop_id,
3090             fcport->d_id.b24, vha->d_id.b24);
3091     }
3092 
3093     if (wait) {
3094         wait_for_completion(&elsio->u.els_plogi.comp);
3095 
3096         if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3097             rval = QLA_FUNCTION_FAILED;
3098     } else {
3099         goto done;
3100     }
3101 
3102 out:
3103     fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3104     qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3105     /* ref: INIT */
3106     kref_put(&sp->cmd_kref, qla2x00_sp_release);
3107 done:
3108     return rval;
3109 }
3110 
3111 /* it is assume qpair lock is held */
3112 void qla_els_pt_iocb(struct scsi_qla_host *vha,
3113     struct els_entry_24xx *els_iocb,
3114     struct qla_els_pt_arg *a)
3115 {
3116     els_iocb->entry_type = ELS_IOCB_TYPE;
3117     els_iocb->entry_count = 1;
3118     els_iocb->sys_define = 0;
3119     els_iocb->entry_status = 0;
3120     els_iocb->handle = QLA_SKIP_HANDLE;
3121     els_iocb->nport_handle = a->nport_handle;
3122     els_iocb->rx_xchg_address = a->rx_xchg_address;
3123     els_iocb->tx_dsd_count = cpu_to_le16(1);
3124     els_iocb->vp_index = a->vp_idx;
3125     els_iocb->sof_type = EST_SOFI3;
3126     els_iocb->rx_dsd_count = cpu_to_le16(0);
3127     els_iocb->opcode = a->els_opcode;
3128 
3129     els_iocb->d_id[0] = a->did.b.al_pa;
3130     els_iocb->d_id[1] = a->did.b.area;
3131     els_iocb->d_id[2] = a->did.b.domain;
3132     /* For SID the byte order is different than DID */
3133     els_iocb->s_id[1] = vha->d_id.b.al_pa;
3134     els_iocb->s_id[2] = vha->d_id.b.area;
3135     els_iocb->s_id[0] = vha->d_id.b.domain;
3136 
3137     els_iocb->control_flags = cpu_to_le16(a->control_flags);
3138 
3139     els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
3140     els_iocb->tx_len = cpu_to_le32(a->tx_len);
3141     put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
3142 
3143     els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
3144     els_iocb->rx_len = cpu_to_le32(a->rx_len);
3145     put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
3146 }
3147 
3148 static void
3149 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3150 {
3151     struct bsg_job *bsg_job = sp->u.bsg_job;
3152     struct fc_bsg_request *bsg_request = bsg_job->request;
3153 
3154         els_iocb->entry_type = ELS_IOCB_TYPE;
3155         els_iocb->entry_count = 1;
3156         els_iocb->sys_define = 0;
3157         els_iocb->entry_status = 0;
3158         els_iocb->handle = sp->handle;
3159     els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3160     els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3161     els_iocb->vp_index = sp->vha->vp_idx;
3162         els_iocb->sof_type = EST_SOFI3;
3163     els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3164 
3165     els_iocb->opcode =
3166         sp->type == SRB_ELS_CMD_RPT ?
3167         bsg_request->rqst_data.r_els.els_code :
3168         bsg_request->rqst_data.h_els.command_code;
3169     els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3170     els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3171     els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3172         els_iocb->control_flags = 0;
3173         els_iocb->rx_byte_count =
3174             cpu_to_le32(bsg_job->reply_payload.payload_len);
3175         els_iocb->tx_byte_count =
3176             cpu_to_le32(bsg_job->request_payload.payload_len);
3177 
3178     put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3179                &els_iocb->tx_address);
3180         els_iocb->tx_len = cpu_to_le32(sg_dma_len
3181             (bsg_job->request_payload.sg_list));
3182 
3183     put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3184                &els_iocb->rx_address);
3185         els_iocb->rx_len = cpu_to_le32(sg_dma_len
3186             (bsg_job->reply_payload.sg_list));
3187 
3188     sp->vha->qla_stats.control_requests++;
3189 }
3190 
3191 static void
3192 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3193 {
3194     uint16_t        avail_dsds;
3195     struct dsd64    *cur_dsd;
3196     struct scatterlist *sg;
3197     int index;
3198     uint16_t tot_dsds;
3199     scsi_qla_host_t *vha = sp->vha;
3200     struct qla_hw_data *ha = vha->hw;
3201     struct bsg_job *bsg_job = sp->u.bsg_job;
3202     int entry_count = 1;
3203 
3204     memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3205     ct_iocb->entry_type = CT_IOCB_TYPE;
3206     ct_iocb->entry_status = 0;
3207     ct_iocb->handle1 = sp->handle;
3208     SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3209     ct_iocb->status = cpu_to_le16(0);
3210     ct_iocb->control_flags = cpu_to_le16(0);
3211     ct_iocb->timeout = 0;
3212     ct_iocb->cmd_dsd_count =
3213         cpu_to_le16(bsg_job->request_payload.sg_cnt);
3214     ct_iocb->total_dsd_count =
3215         cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3216     ct_iocb->req_bytecount =
3217         cpu_to_le32(bsg_job->request_payload.payload_len);
3218     ct_iocb->rsp_bytecount =
3219         cpu_to_le32(bsg_job->reply_payload.payload_len);
3220 
3221     put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3222                &ct_iocb->req_dsd.address);
3223     ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3224 
3225     put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3226                &ct_iocb->rsp_dsd.address);
3227     ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3228 
3229     avail_dsds = 1;
3230     cur_dsd = &ct_iocb->rsp_dsd;
3231     index = 0;
3232     tot_dsds = bsg_job->reply_payload.sg_cnt;
3233 
3234     for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3235         cont_a64_entry_t *cont_pkt;
3236 
3237         /* Allocate additional continuation packets? */
3238         if (avail_dsds == 0) {
3239             /*
3240             * Five DSDs are available in the Cont.
3241             * Type 1 IOCB.
3242                    */
3243             cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3244                 vha->hw->req_q_map[0]);
3245             cur_dsd = cont_pkt->dsd;
3246             avail_dsds = 5;
3247             entry_count++;
3248         }
3249 
3250         append_dsd64(&cur_dsd, sg);
3251         avail_dsds--;
3252     }
3253     ct_iocb->entry_count = entry_count;
3254 
3255     sp->vha->qla_stats.control_requests++;
3256 }
3257 
3258 static void
3259 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3260 {
3261     uint16_t        avail_dsds;
3262     struct dsd64    *cur_dsd;
3263     struct scatterlist *sg;
3264     int index;
3265     uint16_t cmd_dsds, rsp_dsds;
3266     scsi_qla_host_t *vha = sp->vha;
3267     struct qla_hw_data *ha = vha->hw;
3268     struct bsg_job *bsg_job = sp->u.bsg_job;
3269     int entry_count = 1;
3270     cont_a64_entry_t *cont_pkt = NULL;
3271 
3272     ct_iocb->entry_type = CT_IOCB_TYPE;
3273         ct_iocb->entry_status = 0;
3274         ct_iocb->sys_define = 0;
3275         ct_iocb->handle = sp->handle;
3276 
3277     ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3278     ct_iocb->vp_index = sp->vha->vp_idx;
3279     ct_iocb->comp_status = cpu_to_le16(0);
3280 
3281     cmd_dsds = bsg_job->request_payload.sg_cnt;
3282     rsp_dsds = bsg_job->reply_payload.sg_cnt;
3283 
3284     ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3285         ct_iocb->timeout = 0;
3286     ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3287         ct_iocb->cmd_byte_count =
3288             cpu_to_le32(bsg_job->request_payload.payload_len);
3289 
3290     avail_dsds = 2;
3291     cur_dsd = ct_iocb->dsd;
3292     index = 0;
3293 
3294     for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3295         /* Allocate additional continuation packets? */
3296         if (avail_dsds == 0) {
3297             /*
3298              * Five DSDs are available in the Cont.
3299              * Type 1 IOCB.
3300              */
3301             cont_pkt = qla2x00_prep_cont_type1_iocb(
3302                 vha, ha->req_q_map[0]);
3303             cur_dsd = cont_pkt->dsd;
3304             avail_dsds = 5;
3305             entry_count++;
3306         }
3307 
3308         append_dsd64(&cur_dsd, sg);
3309         avail_dsds--;
3310     }
3311 
3312     index = 0;
3313 
3314     for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3315         /* Allocate additional continuation packets? */
3316         if (avail_dsds == 0) {
3317             /*
3318             * Five DSDs are available in the Cont.
3319             * Type 1 IOCB.
3320                    */
3321             cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3322                 ha->req_q_map[0]);
3323             cur_dsd = cont_pkt->dsd;
3324             avail_dsds = 5;
3325             entry_count++;
3326         }
3327 
3328         append_dsd64(&cur_dsd, sg);
3329         avail_dsds--;
3330     }
3331         ct_iocb->entry_count = entry_count;
3332 }
3333 
3334 /*
3335  * qla82xx_start_scsi() - Send a SCSI command to the ISP
3336  * @sp: command to send to the ISP
3337  *
3338  * Returns non-zero if a failure occurred, else zero.
3339  */
3340 int
3341 qla82xx_start_scsi(srb_t *sp)
3342 {
3343     int     nseg;
3344     unsigned long   flags;
3345     struct scsi_cmnd *cmd;
3346     uint32_t    *clr_ptr;
3347     uint32_t    handle;
3348     uint16_t    cnt;
3349     uint16_t    req_cnt;
3350     uint16_t    tot_dsds;
3351     struct device_reg_82xx __iomem *reg;
3352     uint32_t dbval;
3353     __be32 *fcp_dl;
3354     uint8_t additional_cdb_len;
3355     struct ct6_dsd *ctx;
3356     struct scsi_qla_host *vha = sp->vha;
3357     struct qla_hw_data *ha = vha->hw;
3358     struct req_que *req = NULL;
3359     struct rsp_que *rsp = NULL;
3360 
3361     /* Setup device pointers. */
3362     reg = &ha->iobase->isp82;
3363     cmd = GET_CMD_SP(sp);
3364     req = vha->req;
3365     rsp = ha->rsp_q_map[0];
3366 
3367     /* So we know we haven't pci_map'ed anything yet */
3368     tot_dsds = 0;
3369 
3370     dbval = 0x04 | (ha->portnum << 5);
3371 
3372     /* Send marker if required */
3373     if (vha->marker_needed != 0) {
3374         if (qla2x00_marker(vha, ha->base_qpair,
3375             0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3376             ql_log(ql_log_warn, vha, 0x300c,
3377                 "qla2x00_marker failed for cmd=%p.\n", cmd);
3378             return QLA_FUNCTION_FAILED;
3379         }
3380         vha->marker_needed = 0;
3381     }
3382 
3383     /* Acquire ring specific lock */
3384     spin_lock_irqsave(&ha->hardware_lock, flags);
3385 
3386     handle = qla2xxx_get_next_handle(req);
3387     if (handle == 0)
3388         goto queuing_error;
3389 
3390     /* Map the sg table so we have an accurate count of sg entries needed */
3391     if (scsi_sg_count(cmd)) {
3392         nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3393             scsi_sg_count(cmd), cmd->sc_data_direction);
3394         if (unlikely(!nseg))
3395             goto queuing_error;
3396     } else
3397         nseg = 0;
3398 
3399     tot_dsds = nseg;
3400 
3401     if (tot_dsds > ql2xshiftctondsd) {
3402         struct cmd_type_6 *cmd_pkt;
3403         uint16_t more_dsd_lists = 0;
3404         struct dsd_dma *dsd_ptr;
3405         uint16_t i;
3406 
3407         more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3408         if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3409             ql_dbg(ql_dbg_io, vha, 0x300d,
3410                 "Num of DSD list %d is than %d for cmd=%p.\n",
3411                 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3412                 cmd);
3413             goto queuing_error;
3414         }
3415 
3416         if (more_dsd_lists <= ha->gbl_dsd_avail)
3417             goto sufficient_dsds;
3418         else
3419             more_dsd_lists -= ha->gbl_dsd_avail;
3420 
3421         for (i = 0; i < more_dsd_lists; i++) {
3422             dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3423             if (!dsd_ptr) {
3424                 ql_log(ql_log_fatal, vha, 0x300e,
3425                     "Failed to allocate memory for dsd_dma "
3426                     "for cmd=%p.\n", cmd);
3427                 goto queuing_error;
3428             }
3429 
3430             dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3431                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3432             if (!dsd_ptr->dsd_addr) {
3433                 kfree(dsd_ptr);
3434                 ql_log(ql_log_fatal, vha, 0x300f,
3435                     "Failed to allocate memory for dsd_addr "
3436                     "for cmd=%p.\n", cmd);
3437                 goto queuing_error;
3438             }
3439             list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3440             ha->gbl_dsd_avail++;
3441         }
3442 
3443 sufficient_dsds:
3444         req_cnt = 1;
3445 
3446         if (req->cnt < (req_cnt + 2)) {
3447             cnt = (uint16_t)rd_reg_dword_relaxed(
3448                 &reg->req_q_out[0]);
3449             if (req->ring_index < cnt)
3450                 req->cnt = cnt - req->ring_index;
3451             else
3452                 req->cnt = req->length -
3453                     (req->ring_index - cnt);
3454             if (req->cnt < (req_cnt + 2))
3455                 goto queuing_error;
3456         }
3457 
3458         ctx = sp->u.scmd.ct6_ctx =
3459             mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3460         if (!ctx) {
3461             ql_log(ql_log_fatal, vha, 0x3010,
3462                 "Failed to allocate ctx for cmd=%p.\n", cmd);
3463             goto queuing_error;
3464         }
3465 
3466         memset(ctx, 0, sizeof(struct ct6_dsd));
3467         ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3468             GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3469         if (!ctx->fcp_cmnd) {
3470             ql_log(ql_log_fatal, vha, 0x3011,
3471                 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3472             goto queuing_error;
3473         }
3474 
3475         /* Initialize the DSD list and dma handle */
3476         INIT_LIST_HEAD(&ctx->dsd_list);
3477         ctx->dsd_use_cnt = 0;
3478 
3479         if (cmd->cmd_len > 16) {
3480             additional_cdb_len = cmd->cmd_len - 16;
3481             if ((cmd->cmd_len % 4) != 0) {
3482                 /* SCSI command bigger than 16 bytes must be
3483                  * multiple of 4
3484                  */
3485                 ql_log(ql_log_warn, vha, 0x3012,
3486                     "scsi cmd len %d not multiple of 4 "
3487                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3488                 goto queuing_error_fcp_cmnd;
3489             }
3490             ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3491         } else {
3492             additional_cdb_len = 0;
3493             ctx->fcp_cmnd_len = 12 + 16 + 4;
3494         }
3495 
3496         cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3497         cmd_pkt->handle = make_handle(req->id, handle);
3498 
3499         /* Zero out remaining portion of packet. */
3500         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3501         clr_ptr = (uint32_t *)cmd_pkt + 2;
3502         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3503         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3504 
3505         /* Set NPORT-ID and LUN number*/
3506         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3507         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3508         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3509         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3510         cmd_pkt->vp_index = sp->vha->vp_idx;
3511 
3512         /* Build IOCB segments */
3513         if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3514             goto queuing_error_fcp_cmnd;
3515 
3516         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3517         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3518 
3519         /* build FCP_CMND IU */
3520         int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3521         ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3522 
3523         if (cmd->sc_data_direction == DMA_TO_DEVICE)
3524             ctx->fcp_cmnd->additional_cdb_len |= 1;
3525         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3526             ctx->fcp_cmnd->additional_cdb_len |= 2;
3527 
3528         /* Populate the FCP_PRIO. */
3529         if (ha->flags.fcp_prio_enabled)
3530             ctx->fcp_cmnd->task_attribute |=
3531                 sp->fcport->fcp_prio << 3;
3532 
3533         memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3534 
3535         fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3536             additional_cdb_len);
3537         *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3538 
3539         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3540         put_unaligned_le64(ctx->fcp_cmnd_dma,
3541                    &cmd_pkt->fcp_cmnd_dseg_address);
3542 
3543         sp->flags |= SRB_FCP_CMND_DMA_VALID;
3544         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3545         /* Set total data segment count. */
3546         cmd_pkt->entry_count = (uint8_t)req_cnt;
3547         /* Specify response queue number where
3548          * completion should happen
3549          */
3550         cmd_pkt->entry_status = (uint8_t) rsp->id;
3551     } else {
3552         struct cmd_type_7 *cmd_pkt;
3553 
3554         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3555         if (req->cnt < (req_cnt + 2)) {
3556             cnt = (uint16_t)rd_reg_dword_relaxed(
3557                 &reg->req_q_out[0]);
3558             if (req->ring_index < cnt)
3559                 req->cnt = cnt - req->ring_index;
3560             else
3561                 req->cnt = req->length -
3562                     (req->ring_index - cnt);
3563         }
3564         if (req->cnt < (req_cnt + 2))
3565             goto queuing_error;
3566 
3567         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3568         cmd_pkt->handle = make_handle(req->id, handle);
3569 
3570         /* Zero out remaining portion of packet. */
3571         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3572         clr_ptr = (uint32_t *)cmd_pkt + 2;
3573         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3574         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3575 
3576         /* Set NPORT-ID and LUN number*/
3577         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3578         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3579         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3580         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3581         cmd_pkt->vp_index = sp->vha->vp_idx;
3582 
3583         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3584         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3585             sizeof(cmd_pkt->lun));
3586 
3587         /* Populate the FCP_PRIO. */
3588         if (ha->flags.fcp_prio_enabled)
3589             cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3590 
3591         /* Load SCSI command packet. */
3592         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3593         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3594 
3595         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3596 
3597         /* Build IOCB segments */
3598         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3599 
3600         /* Set total data segment count. */
3601         cmd_pkt->entry_count = (uint8_t)req_cnt;
3602         /* Specify response queue number where
3603          * completion should happen.
3604          */
3605         cmd_pkt->entry_status = (uint8_t) rsp->id;
3606 
3607     }
3608     /* Build command packet. */
3609     req->current_outstanding_cmd = handle;
3610     req->outstanding_cmds[handle] = sp;
3611     sp->handle = handle;
3612     cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3613     req->cnt -= req_cnt;
3614     wmb();
3615 
3616     /* Adjust ring index. */
3617     req->ring_index++;
3618     if (req->ring_index == req->length) {
3619         req->ring_index = 0;
3620         req->ring_ptr = req->ring;
3621     } else
3622         req->ring_ptr++;
3623 
3624     sp->flags |= SRB_DMA_VALID;
3625 
3626     /* Set chip new ring index. */
3627     /* write, read and verify logic */
3628     dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3629     if (ql2xdbwr)
3630         qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3631     else {
3632         wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3633         wmb();
3634         while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3635             wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3636             wmb();
3637         }
3638     }
3639 
3640     /* Manage unprocessed RIO/ZIO commands in response queue. */
3641     if (vha->flags.process_response_queue &&
3642         rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3643         qla24xx_process_response_queue(vha, rsp);
3644 
3645     spin_unlock_irqrestore(&ha->hardware_lock, flags);
3646     return QLA_SUCCESS;
3647 
3648 queuing_error_fcp_cmnd:
3649     dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3650 queuing_error:
3651     if (tot_dsds)
3652         scsi_dma_unmap(cmd);
3653 
3654     if (sp->u.scmd.crc_ctx) {
3655         mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3656         sp->u.scmd.crc_ctx = NULL;
3657     }
3658     spin_unlock_irqrestore(&ha->hardware_lock, flags);
3659 
3660     return QLA_FUNCTION_FAILED;
3661 }
3662 
3663 static void
3664 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3665 {
3666     struct srb_iocb *aio = &sp->u.iocb_cmd;
3667     scsi_qla_host_t *vha = sp->vha;
3668     struct req_que *req = sp->qpair->req;
3669     srb_t *orig_sp = sp->cmd_sp;
3670 
3671     memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3672     abt_iocb->entry_type = ABORT_IOCB_TYPE;
3673     abt_iocb->entry_count = 1;
3674     abt_iocb->handle = make_handle(req->id, sp->handle);
3675     if (sp->fcport) {
3676         abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3677         abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3678         abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3679         abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3680     }
3681     abt_iocb->handle_to_abort =
3682         make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3683                 aio->u.abt.cmd_hndl);
3684     abt_iocb->vp_index = vha->vp_idx;
3685     abt_iocb->req_que_no = aio->u.abt.req_que_no;
3686 
3687     /* need to pass original sp */
3688     if (orig_sp)
3689         qla_nvme_abort_set_option(abt_iocb, orig_sp);
3690 
3691     /* Send the command to the firmware */
3692     wmb();
3693 }
3694 
3695 static void
3696 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3697 {
3698     int i, sz;
3699 
3700     mbx->entry_type = MBX_IOCB_TYPE;
3701     mbx->handle = sp->handle;
3702     sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3703 
3704     for (i = 0; i < sz; i++)
3705         mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3706 }
3707 
3708 static void
3709 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3710 {
3711     sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3712     qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3713     ct_pkt->handle = sp->handle;
3714 }
3715 
3716 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3717     struct nack_to_isp *nack)
3718 {
3719     struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3720 
3721     nack->entry_type = NOTIFY_ACK_TYPE;
3722     nack->entry_count = 1;
3723     nack->ox_id = ntfy->ox_id;
3724 
3725     nack->u.isp24.handle = sp->handle;
3726     nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3727     if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3728         nack->u.isp24.flags = ntfy->u.isp24.flags &
3729             cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3730     }
3731     nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3732     nack->u.isp24.status = ntfy->u.isp24.status;
3733     nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3734     nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3735     nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3736     nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3737     nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3738     nack->u.isp24.srr_flags = 0;
3739     nack->u.isp24.srr_reject_code = 0;
3740     nack->u.isp24.srr_reject_code_expl = 0;
3741     nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3742 
3743     if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
3744         (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
3745         sp->vha->hw->flags.edif_enabled) {
3746         ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
3747             "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
3748             sp->name, sp->handle, sp->fcport->loop_id,
3749             sp->fcport->d_id.b24);
3750         nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
3751     }
3752 }
3753 
3754 /*
3755  * Build NVME LS request
3756  */
3757 static void
3758 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3759 {
3760     struct srb_iocb *nvme;
3761 
3762     nvme = &sp->u.iocb_cmd;
3763     cmd_pkt->entry_type = PT_LS4_REQUEST;
3764     cmd_pkt->entry_count = 1;
3765     cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3766 
3767     cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3768     cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3769     cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3770 
3771     cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3772     cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3773     cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3774     put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3775 
3776     cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3777     cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3778     cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3779     put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3780 }
3781 
3782 static void
3783 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3784 {
3785     int map, pos;
3786 
3787     vce->entry_type = VP_CTRL_IOCB_TYPE;
3788     vce->handle = sp->handle;
3789     vce->entry_count = 1;
3790     vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3791     vce->vp_count = cpu_to_le16(1);
3792 
3793     /*
3794      * index map in firmware starts with 1; decrement index
3795      * this is ok as we never use index 0
3796      */
3797     map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3798     pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3799     vce->vp_idx_map[map] |= 1 << pos;
3800 }
3801 
3802 static void
3803 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3804 {
3805     logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3806     logio->control_flags =
3807         cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3808 
3809     logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3810     logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3811     logio->port_id[1] = sp->fcport->d_id.b.area;
3812     logio->port_id[2] = sp->fcport->d_id.b.domain;
3813     logio->vp_index = sp->fcport->vha->vp_idx;
3814 }
3815 
3816 int
3817 qla2x00_start_sp(srb_t *sp)
3818 {
3819     int rval = QLA_SUCCESS;
3820     scsi_qla_host_t *vha = sp->vha;
3821     struct qla_hw_data *ha = vha->hw;
3822     struct qla_qpair *qp = sp->qpair;
3823     void *pkt;
3824     unsigned long flags;
3825 
3826     if (vha->hw->flags.eeh_busy)
3827         return -EIO;
3828 
3829     spin_lock_irqsave(qp->qp_lock_ptr, flags);
3830     pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3831     if (!pkt) {
3832         rval = EAGAIN;
3833         ql_log(ql_log_warn, vha, 0x700c,
3834             "qla2x00_alloc_iocbs failed.\n");
3835         goto done;
3836     }
3837 
3838     switch (sp->type) {
3839     case SRB_LOGIN_CMD:
3840         IS_FWI2_CAPABLE(ha) ?
3841             qla24xx_login_iocb(sp, pkt) :
3842             qla2x00_login_iocb(sp, pkt);
3843         break;
3844     case SRB_PRLI_CMD:
3845         qla24xx_prli_iocb(sp, pkt);
3846         break;
3847     case SRB_LOGOUT_CMD:
3848         IS_FWI2_CAPABLE(ha) ?
3849             qla24xx_logout_iocb(sp, pkt) :
3850             qla2x00_logout_iocb(sp, pkt);
3851         break;
3852     case SRB_ELS_CMD_RPT:
3853     case SRB_ELS_CMD_HST:
3854         qla24xx_els_iocb(sp, pkt);
3855         break;
3856     case SRB_ELS_CMD_HST_NOLOGIN:
3857         qla_els_pt_iocb(sp->vha, pkt,  &sp->u.bsg_cmd.u.els_arg);
3858         ((struct els_entry_24xx *)pkt)->handle = sp->handle;
3859         break;
3860     case SRB_CT_CMD:
3861         IS_FWI2_CAPABLE(ha) ?
3862             qla24xx_ct_iocb(sp, pkt) :
3863             qla2x00_ct_iocb(sp, pkt);
3864         break;
3865     case SRB_ADISC_CMD:
3866         IS_FWI2_CAPABLE(ha) ?
3867             qla24xx_adisc_iocb(sp, pkt) :
3868             qla2x00_adisc_iocb(sp, pkt);
3869         break;
3870     case SRB_TM_CMD:
3871         IS_QLAFX00(ha) ?
3872             qlafx00_tm_iocb(sp, pkt) :
3873             qla24xx_tm_iocb(sp, pkt);
3874         break;
3875     case SRB_FXIOCB_DCMD:
3876     case SRB_FXIOCB_BCMD:
3877         qlafx00_fxdisc_iocb(sp, pkt);
3878         break;
3879     case SRB_NVME_LS:
3880         qla_nvme_ls(sp, pkt);
3881         break;
3882     case SRB_ABT_CMD:
3883         IS_QLAFX00(ha) ?
3884             qlafx00_abort_iocb(sp, pkt) :
3885             qla24xx_abort_iocb(sp, pkt);
3886         break;
3887     case SRB_ELS_DCMD:
3888         qla24xx_els_logo_iocb(sp, pkt);
3889         break;
3890     case SRB_CT_PTHRU_CMD:
3891         qla2x00_ctpthru_cmd_iocb(sp, pkt);
3892         break;
3893     case SRB_MB_IOCB:
3894         qla2x00_mb_iocb(sp, pkt);
3895         break;
3896     case SRB_NACK_PLOGI:
3897     case SRB_NACK_PRLI:
3898     case SRB_NACK_LOGO:
3899         qla2x00_send_notify_ack_iocb(sp, pkt);
3900         break;
3901     case SRB_CTRL_VP:
3902         qla25xx_ctrlvp_iocb(sp, pkt);
3903         break;
3904     case SRB_PRLO_CMD:
3905         qla24xx_prlo_iocb(sp, pkt);
3906         break;
3907     case SRB_SA_UPDATE:
3908         qla24xx_sa_update_iocb(sp, pkt);
3909         break;
3910     case SRB_SA_REPLACE:
3911         qla24xx_sa_replace_iocb(sp, pkt);
3912         break;
3913     default:
3914         break;
3915     }
3916 
3917     if (sp->start_timer) {
3918         /* ref: TMR timer ref
3919          * this code should be just before start_iocbs function
3920          * This will make sure that caller function don't to do
3921          * kref_put even on failure
3922          */
3923         kref_get(&sp->cmd_kref);
3924         add_timer(&sp->u.iocb_cmd.timer);
3925     }
3926 
3927     wmb();
3928     qla2x00_start_iocbs(vha, qp->req);
3929 done:
3930     spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3931     return rval;
3932 }
3933 
3934 static void
3935 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3936                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3937 {
3938     uint16_t avail_dsds;
3939     struct dsd64 *cur_dsd;
3940     uint32_t req_data_len = 0;
3941     uint32_t rsp_data_len = 0;
3942     struct scatterlist *sg;
3943     int index;
3944     int entry_count = 1;
3945     struct bsg_job *bsg_job = sp->u.bsg_job;
3946 
3947     /*Update entry type to indicate bidir command */
3948     put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3949 
3950     /* Set the transfer direction, in this set both flags
3951      * Also set the BD_WRAP_BACK flag, firmware will take care
3952      * assigning DID=SID for outgoing pkts.
3953      */
3954     cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3955     cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3956     cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3957                             BD_WRAP_BACK);
3958 
3959     req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3960     cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3961     cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3962     cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3963 
3964     vha->bidi_stats.transfer_bytes += req_data_len;
3965     vha->bidi_stats.io_count++;
3966 
3967     vha->qla_stats.output_bytes += req_data_len;
3968     vha->qla_stats.output_requests++;
3969 
3970     /* Only one dsd is available for bidirectional IOCB, remaining dsds
3971      * are bundled in continuation iocb
3972      */
3973     avail_dsds = 1;
3974     cur_dsd = &cmd_pkt->fcp_dsd;
3975 
3976     index = 0;
3977 
3978     for_each_sg(bsg_job->request_payload.sg_list, sg,
3979                 bsg_job->request_payload.sg_cnt, index) {
3980         cont_a64_entry_t *cont_pkt;
3981 
3982         /* Allocate additional continuation packets */
3983         if (avail_dsds == 0) {
3984             /* Continuation type 1 IOCB can accomodate
3985              * 5 DSDS
3986              */
3987             cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3988             cur_dsd = cont_pkt->dsd;
3989             avail_dsds = 5;
3990             entry_count++;
3991         }
3992         append_dsd64(&cur_dsd, sg);
3993         avail_dsds--;
3994     }
3995     /* For read request DSD will always goes to continuation IOCB
3996      * and follow the write DSD. If there is room on the current IOCB
3997      * then it is added to that IOCB else new continuation IOCB is
3998      * allocated.
3999      */
4000     for_each_sg(bsg_job->reply_payload.sg_list, sg,
4001                 bsg_job->reply_payload.sg_cnt, index) {
4002         cont_a64_entry_t *cont_pkt;
4003 
4004         /* Allocate additional continuation packets */
4005         if (avail_dsds == 0) {
4006             /* Continuation type 1 IOCB can accomodate
4007              * 5 DSDS
4008              */
4009             cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
4010             cur_dsd = cont_pkt->dsd;
4011             avail_dsds = 5;
4012             entry_count++;
4013         }
4014         append_dsd64(&cur_dsd, sg);
4015         avail_dsds--;
4016     }
4017     /* This value should be same as number of IOCB required for this cmd */
4018     cmd_pkt->entry_count = entry_count;
4019 }
4020 
4021 int
4022 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
4023 {
4024 
4025     struct qla_hw_data *ha = vha->hw;
4026     unsigned long flags;
4027     uint32_t handle;
4028     uint16_t req_cnt;
4029     uint16_t cnt;
4030     uint32_t *clr_ptr;
4031     struct cmd_bidir *cmd_pkt = NULL;
4032     struct rsp_que *rsp;
4033     struct req_que *req;
4034     int rval = EXT_STATUS_OK;
4035 
4036     rval = QLA_SUCCESS;
4037 
4038     rsp = ha->rsp_q_map[0];
4039     req = vha->req;
4040 
4041     /* Send marker if required */
4042     if (vha->marker_needed != 0) {
4043         if (qla2x00_marker(vha, ha->base_qpair,
4044             0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
4045             return EXT_STATUS_MAILBOX;
4046         vha->marker_needed = 0;
4047     }
4048 
4049     /* Acquire ring specific lock */
4050     spin_lock_irqsave(&ha->hardware_lock, flags);
4051 
4052     handle = qla2xxx_get_next_handle(req);
4053     if (handle == 0) {
4054         rval = EXT_STATUS_BUSY;
4055         goto queuing_error;
4056     }
4057 
4058     /* Calculate number of IOCB required */
4059     req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
4060 
4061     /* Check for room on request queue. */
4062     if (req->cnt < req_cnt + 2) {
4063         if (IS_SHADOW_REG_CAPABLE(ha)) {
4064             cnt = *req->out_ptr;
4065         } else {
4066             cnt = rd_reg_dword_relaxed(req->req_q_out);
4067             if (qla2x00_check_reg16_for_disconnect(vha, cnt))
4068                 goto queuing_error;
4069         }
4070 
4071         if  (req->ring_index < cnt)
4072             req->cnt = cnt - req->ring_index;
4073         else
4074             req->cnt = req->length -
4075                 (req->ring_index - cnt);
4076     }
4077     if (req->cnt < req_cnt + 2) {
4078         rval = EXT_STATUS_BUSY;
4079         goto queuing_error;
4080     }
4081 
4082     cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
4083     cmd_pkt->handle = make_handle(req->id, handle);
4084 
4085     /* Zero out remaining portion of packet. */
4086     /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
4087     clr_ptr = (uint32_t *)cmd_pkt + 2;
4088     memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
4089 
4090     /* Set NPORT-ID  (of vha)*/
4091     cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
4092     cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
4093     cmd_pkt->port_id[1] = vha->d_id.b.area;
4094     cmd_pkt->port_id[2] = vha->d_id.b.domain;
4095 
4096     qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
4097     cmd_pkt->entry_status = (uint8_t) rsp->id;
4098     /* Build command packet. */
4099     req->current_outstanding_cmd = handle;
4100     req->outstanding_cmds[handle] = sp;
4101     sp->handle = handle;
4102     req->cnt -= req_cnt;
4103 
4104     /* Send the command to the firmware */
4105     wmb();
4106     qla2x00_start_iocbs(vha, req);
4107 queuing_error:
4108     spin_unlock_irqrestore(&ha->hardware_lock, flags);
4109 
4110     return rval;
4111 }