Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * QLogic Fibre Channel HBA Driver
0004  * Copyright (c)  2003-2017 QLogic Corporation
0005  */
0006 #include "qla_nvme.h"
0007 #include <linux/scatterlist.h>
0008 #include <linux/delay.h>
0009 #include <linux/nvme.h>
0010 #include <linux/nvme-fc.h>
0011 #include <linux/blk-mq-pci.h>
0012 #include <linux/blk-mq.h>
0013 
0014 static struct nvme_fc_port_template qla_nvme_fc_transport;
0015 
0016 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
0017 {
0018     struct qla_nvme_rport *rport;
0019     struct nvme_fc_port_info req;
0020     int ret;
0021 
0022     if (!IS_ENABLED(CONFIG_NVME_FC))
0023         return 0;
0024 
0025     if (!vha->flags.nvme_enabled) {
0026         ql_log(ql_log_info, vha, 0x2100,
0027             "%s: Not registering target since Host NVME is not enabled\n",
0028             __func__);
0029         return 0;
0030     }
0031 
0032     if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
0033         return 0;
0034 
0035     if (!(fcport->nvme_prli_service_param &
0036         (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
0037         (fcport->nvme_flag & NVME_FLAG_REGISTERED))
0038         return 0;
0039 
0040     fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
0041 
0042     memset(&req, 0, sizeof(struct nvme_fc_port_info));
0043     req.port_name = wwn_to_u64(fcport->port_name);
0044     req.node_name = wwn_to_u64(fcport->node_name);
0045     req.port_role = 0;
0046     req.dev_loss_tmo = fcport->dev_loss_tmo;
0047 
0048     if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
0049         req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
0050 
0051     if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
0052         req.port_role |= FC_PORT_ROLE_NVME_TARGET;
0053 
0054     if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
0055         req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
0056 
0057     req.port_id = fcport->d_id.b24;
0058 
0059     ql_log(ql_log_info, vha, 0x2102,
0060         "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
0061         __func__, req.node_name, req.port_name,
0062         req.port_id);
0063 
0064     ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
0065         &fcport->nvme_remote_port);
0066     if (ret) {
0067         ql_log(ql_log_warn, vha, 0x212e,
0068             "Failed to register remote port. Transport returned %d\n",
0069             ret);
0070         return ret;
0071     }
0072 
0073     nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
0074                        fcport->dev_loss_tmo);
0075 
0076     if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
0077         ql_log(ql_log_info, vha, 0x212a,
0078                "PortID:%06x Supports SLER\n", req.port_id);
0079 
0080     if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
0081         ql_log(ql_log_info, vha, 0x212b,
0082                "PortID:%06x Supports PI control\n", req.port_id);
0083 
0084     rport = fcport->nvme_remote_port->private;
0085     rport->fcport = fcport;
0086 
0087     fcport->nvme_flag |= NVME_FLAG_REGISTERED;
0088     return 0;
0089 }
0090 
0091 /* Allocate a queue for NVMe traffic */
0092 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
0093     unsigned int qidx, u16 qsize, void **handle)
0094 {
0095     struct scsi_qla_host *vha;
0096     struct qla_hw_data *ha;
0097     struct qla_qpair *qpair;
0098 
0099     /* Map admin queue and 1st IO queue to index 0 */
0100     if (qidx)
0101         qidx--;
0102 
0103     vha = (struct scsi_qla_host *)lport->private;
0104     ha = vha->hw;
0105 
0106     ql_log(ql_log_info, vha, 0x2104,
0107         "%s: handle %p, idx =%d, qsize %d\n",
0108         __func__, handle, qidx, qsize);
0109 
0110     if (qidx > qla_nvme_fc_transport.max_hw_queues) {
0111         ql_log(ql_log_warn, vha, 0x212f,
0112             "%s: Illegal qidx=%d. Max=%d\n",
0113             __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
0114         return -EINVAL;
0115     }
0116 
0117     /* Use base qpair if max_qpairs is 0 */
0118     if (!ha->max_qpairs) {
0119         qpair = ha->base_qpair;
0120     } else {
0121         if (ha->queue_pair_map[qidx]) {
0122             *handle = ha->queue_pair_map[qidx];
0123             ql_log(ql_log_info, vha, 0x2121,
0124                    "Returning existing qpair of %p for idx=%x\n",
0125                    *handle, qidx);
0126             return 0;
0127         }
0128 
0129         qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
0130         if (!qpair) {
0131             ql_log(ql_log_warn, vha, 0x2122,
0132                    "Failed to allocate qpair\n");
0133             return -EINVAL;
0134         }
0135     }
0136     *handle = qpair;
0137 
0138     return 0;
0139 }
0140 
0141 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
0142 {
0143     struct srb *sp = container_of(kref, struct srb, cmd_kref);
0144     struct nvme_private *priv = (struct nvme_private *)sp->priv;
0145     struct nvmefc_fcp_req *fd;
0146     struct srb_iocb *nvme;
0147     unsigned long flags;
0148 
0149     if (!priv)
0150         goto out;
0151 
0152     nvme = &sp->u.iocb_cmd;
0153     fd = nvme->u.nvme.desc;
0154 
0155     spin_lock_irqsave(&priv->cmd_lock, flags);
0156     priv->sp = NULL;
0157     sp->priv = NULL;
0158     if (priv->comp_status == QLA_SUCCESS) {
0159         fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
0160         fd->status = NVME_SC_SUCCESS;
0161     } else {
0162         fd->rcv_rsplen = 0;
0163         fd->transferred_length = 0;
0164         fd->status = NVME_SC_INTERNAL;
0165     }
0166     spin_unlock_irqrestore(&priv->cmd_lock, flags);
0167 
0168     fd->done(fd);
0169 out:
0170     qla2xxx_rel_qpair_sp(sp->qpair, sp);
0171 }
0172 
0173 static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
0174 {
0175     if (sp->flags & SRB_DMA_VALID) {
0176         struct srb_iocb *nvme = &sp->u.iocb_cmd;
0177         struct qla_hw_data *ha = sp->fcport->vha->hw;
0178 
0179         dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
0180                  fd->rqstlen, DMA_TO_DEVICE);
0181         sp->flags &= ~SRB_DMA_VALID;
0182     }
0183 }
0184 
0185 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
0186 {
0187     struct srb *sp = container_of(kref, struct srb, cmd_kref);
0188     struct nvme_private *priv = (struct nvme_private *)sp->priv;
0189     struct nvmefc_ls_req *fd;
0190     unsigned long flags;
0191 
0192     if (!priv)
0193         goto out;
0194 
0195     spin_lock_irqsave(&priv->cmd_lock, flags);
0196     priv->sp = NULL;
0197     sp->priv = NULL;
0198     spin_unlock_irqrestore(&priv->cmd_lock, flags);
0199 
0200     fd = priv->fd;
0201 
0202     qla_nvme_ls_unmap(sp, fd);
0203     fd->done(fd, priv->comp_status);
0204 out:
0205     qla2x00_rel_sp(sp);
0206 }
0207 
0208 static void qla_nvme_ls_complete(struct work_struct *work)
0209 {
0210     struct nvme_private *priv =
0211         container_of(work, struct nvme_private, ls_work);
0212 
0213     kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
0214 }
0215 
0216 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
0217 {
0218     struct nvme_private *priv = sp->priv;
0219 
0220     if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
0221         return;
0222 
0223     if (res)
0224         res = -EINVAL;
0225 
0226     priv->comp_status = res;
0227     INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
0228     schedule_work(&priv->ls_work);
0229 }
0230 
0231 /* it assumed that QPair lock is held. */
0232 static void qla_nvme_sp_done(srb_t *sp, int res)
0233 {
0234     struct nvme_private *priv = sp->priv;
0235 
0236     priv->comp_status = res;
0237     kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
0238 
0239     return;
0240 }
0241 
0242 static void qla_nvme_abort_work(struct work_struct *work)
0243 {
0244     struct nvme_private *priv =
0245         container_of(work, struct nvme_private, abort_work);
0246     srb_t *sp = priv->sp;
0247     fc_port_t *fcport = sp->fcport;
0248     struct qla_hw_data *ha = fcport->vha->hw;
0249     int rval, abts_done_called = 1;
0250     bool io_wait_for_abort_done;
0251     uint32_t handle;
0252 
0253     ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
0254            "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
0255            __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
0256 
0257     if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
0258         goto out;
0259 
0260     if (ha->flags.host_shutting_down) {
0261         ql_log(ql_log_info, sp->fcport->vha, 0xffff,
0262             "%s Calling done on sp: %p, type: 0x%x\n",
0263             __func__, sp, sp->type);
0264         sp->done(sp, 0);
0265         goto out;
0266     }
0267 
0268     /*
0269      * sp may not be valid after abort_command if return code is either
0270      * SUCCESS or ERR_FROM_FW codes, so cache the value here.
0271      */
0272     io_wait_for_abort_done = ql2xabts_wait_nvme &&
0273                     QLA_ABTS_WAIT_ENABLED(sp);
0274     handle = sp->handle;
0275 
0276     rval = ha->isp_ops->abort_command(sp);
0277 
0278     ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
0279         "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
0280         __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
0281         sp, handle, fcport, rval);
0282 
0283     /*
0284      * If async tmf is enabled, the abort callback is called only on
0285      * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
0286      */
0287     if (ql2xasynctmfenable &&
0288         rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
0289         abts_done_called = 0;
0290 
0291     /*
0292      * Returned before decreasing kref so that I/O requests
0293      * are waited until ABTS complete. This kref is decreased
0294      * at qla24xx_abort_sp_done function.
0295      */
0296     if (abts_done_called && io_wait_for_abort_done)
0297         return;
0298 out:
0299     /* kref_get was done before work was schedule. */
0300     kref_put(&sp->cmd_kref, sp->put_fn);
0301 }
0302 
0303 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
0304     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
0305 {
0306     struct nvme_private *priv = fd->private;
0307     unsigned long flags;
0308 
0309     spin_lock_irqsave(&priv->cmd_lock, flags);
0310     if (!priv->sp) {
0311         spin_unlock_irqrestore(&priv->cmd_lock, flags);
0312         return;
0313     }
0314 
0315     if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
0316         spin_unlock_irqrestore(&priv->cmd_lock, flags);
0317         return;
0318     }
0319     spin_unlock_irqrestore(&priv->cmd_lock, flags);
0320 
0321     INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
0322     schedule_work(&priv->abort_work);
0323 }
0324 
0325 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
0326     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
0327 {
0328     struct qla_nvme_rport *qla_rport = rport->private;
0329     fc_port_t *fcport = qla_rport->fcport;
0330     struct srb_iocb   *nvme;
0331     struct nvme_private *priv = fd->private;
0332     struct scsi_qla_host *vha;
0333     int     rval = QLA_FUNCTION_FAILED;
0334     struct qla_hw_data *ha;
0335     srb_t           *sp;
0336 
0337     if (!fcport || fcport->deleted)
0338         return rval;
0339 
0340     vha = fcport->vha;
0341     ha = vha->hw;
0342 
0343     if (!ha->flags.fw_started)
0344         return rval;
0345 
0346     /* Alloc SRB structure */
0347     sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
0348     if (!sp)
0349         return rval;
0350 
0351     sp->type = SRB_NVME_LS;
0352     sp->name = "nvme_ls";
0353     sp->done = qla_nvme_sp_ls_done;
0354     sp->put_fn = qla_nvme_release_ls_cmd_kref;
0355     sp->priv = priv;
0356     priv->sp = sp;
0357     kref_init(&sp->cmd_kref);
0358     spin_lock_init(&priv->cmd_lock);
0359     nvme = &sp->u.iocb_cmd;
0360     priv->fd = fd;
0361     nvme->u.nvme.desc = fd;
0362     nvme->u.nvme.dir = 0;
0363     nvme->u.nvme.dl = 0;
0364     nvme->u.nvme.cmd_len = fd->rqstlen;
0365     nvme->u.nvme.rsp_len = fd->rsplen;
0366     nvme->u.nvme.rsp_dma = fd->rspdma;
0367     nvme->u.nvme.timeout_sec = fd->timeout;
0368     nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
0369         fd->rqstlen, DMA_TO_DEVICE);
0370     dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
0371         fd->rqstlen, DMA_TO_DEVICE);
0372 
0373     sp->flags |= SRB_DMA_VALID;
0374 
0375     rval = qla2x00_start_sp(sp);
0376     if (rval != QLA_SUCCESS) {
0377         ql_log(ql_log_warn, vha, 0x700e,
0378             "qla2x00_start_sp failed = %d\n", rval);
0379         wake_up(&sp->nvme_ls_waitq);
0380         sp->priv = NULL;
0381         priv->sp = NULL;
0382         qla_nvme_ls_unmap(sp, fd);
0383         qla2x00_rel_sp(sp);
0384         return rval;
0385     }
0386 
0387     return rval;
0388 }
0389 
0390 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
0391     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
0392     struct nvmefc_fcp_req *fd)
0393 {
0394     struct nvme_private *priv = fd->private;
0395     unsigned long flags;
0396 
0397     spin_lock_irqsave(&priv->cmd_lock, flags);
0398     if (!priv->sp) {
0399         spin_unlock_irqrestore(&priv->cmd_lock, flags);
0400         return;
0401     }
0402     if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
0403         spin_unlock_irqrestore(&priv->cmd_lock, flags);
0404         return;
0405     }
0406     spin_unlock_irqrestore(&priv->cmd_lock, flags);
0407 
0408     INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
0409     schedule_work(&priv->abort_work);
0410 }
0411 
0412 static inline int qla2x00_start_nvme_mq(srb_t *sp)
0413 {
0414     unsigned long   flags;
0415     uint32_t        *clr_ptr;
0416     uint32_t        handle;
0417     struct cmd_nvme *cmd_pkt;
0418     uint16_t        cnt, i;
0419     uint16_t        req_cnt;
0420     uint16_t        tot_dsds;
0421     uint16_t    avail_dsds;
0422     struct dsd64    *cur_dsd;
0423     struct req_que *req = NULL;
0424     struct rsp_que *rsp = NULL;
0425     struct scsi_qla_host *vha = sp->fcport->vha;
0426     struct qla_hw_data *ha = vha->hw;
0427     struct qla_qpair *qpair = sp->qpair;
0428     struct srb_iocb *nvme = &sp->u.iocb_cmd;
0429     struct scatterlist *sgl, *sg;
0430     struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
0431     struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
0432     uint32_t        rval = QLA_SUCCESS;
0433 
0434     /* Setup qpair pointers */
0435     req = qpair->req;
0436     rsp = qpair->rsp;
0437     tot_dsds = fd->sg_cnt;
0438 
0439     /* Acquire qpair specific lock */
0440     spin_lock_irqsave(&qpair->qp_lock, flags);
0441 
0442     handle = qla2xxx_get_next_handle(req);
0443     if (handle == 0) {
0444         rval = -EBUSY;
0445         goto queuing_error;
0446     }
0447     req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
0448     if (req->cnt < (req_cnt + 2)) {
0449         if (IS_SHADOW_REG_CAPABLE(ha)) {
0450             cnt = *req->out_ptr;
0451         } else {
0452             cnt = rd_reg_dword_relaxed(req->req_q_out);
0453             if (qla2x00_check_reg16_for_disconnect(vha, cnt))
0454                 goto queuing_error;
0455         }
0456 
0457         if (req->ring_index < cnt)
0458             req->cnt = cnt - req->ring_index;
0459         else
0460             req->cnt = req->length - (req->ring_index - cnt);
0461 
0462         if (req->cnt < (req_cnt + 2)){
0463             rval = -EBUSY;
0464             goto queuing_error;
0465         }
0466     }
0467 
0468     if (unlikely(!fd->sqid)) {
0469         if (cmd->sqe.common.opcode == nvme_admin_async_event) {
0470             nvme->u.nvme.aen_op = 1;
0471             atomic_inc(&ha->nvme_active_aen_cnt);
0472         }
0473     }
0474 
0475     /* Build command packet. */
0476     req->current_outstanding_cmd = handle;
0477     req->outstanding_cmds[handle] = sp;
0478     sp->handle = handle;
0479     req->cnt -= req_cnt;
0480 
0481     cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
0482     cmd_pkt->handle = make_handle(req->id, handle);
0483 
0484     /* Zero out remaining portion of packet. */
0485     clr_ptr = (uint32_t *)cmd_pkt + 2;
0486     memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
0487 
0488     cmd_pkt->entry_status = 0;
0489 
0490     /* Update entry type to indicate Command NVME IOCB */
0491     cmd_pkt->entry_type = COMMAND_NVME;
0492 
0493     /* No data transfer how do we check buffer len == 0?? */
0494     if (fd->io_dir == NVMEFC_FCP_READ) {
0495         cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
0496         qpair->counters.input_bytes += fd->payload_length;
0497         qpair->counters.input_requests++;
0498     } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
0499         cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
0500         if ((vha->flags.nvme_first_burst) &&
0501             (sp->fcport->nvme_prli_service_param &
0502             NVME_PRLI_SP_FIRST_BURST)) {
0503             if ((fd->payload_length <=
0504                 sp->fcport->nvme_first_burst_size) ||
0505                 (sp->fcport->nvme_first_burst_size == 0))
0506                 cmd_pkt->control_flags |=
0507                     cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
0508         }
0509         qpair->counters.output_bytes += fd->payload_length;
0510         qpair->counters.output_requests++;
0511     } else if (fd->io_dir == 0) {
0512         cmd_pkt->control_flags = 0;
0513     }
0514 
0515     if (sp->fcport->edif.enable && fd->io_dir != 0)
0516         cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
0517 
0518     /* Set BIT_13 of control flags for Async event */
0519     if (vha->flags.nvme2_enabled &&
0520         cmd->sqe.common.opcode == nvme_admin_async_event) {
0521         cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
0522     }
0523 
0524     /* Set NPORT-ID */
0525     cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
0526     cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
0527     cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
0528     cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
0529     cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
0530 
0531     /* NVME RSP IU */
0532     cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
0533     put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
0534 
0535     /* NVME CNMD IU */
0536     cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
0537     cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
0538 
0539     cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
0540     cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
0541 
0542     /* One DSD is available in the Command Type NVME IOCB */
0543     avail_dsds = 1;
0544     cur_dsd = &cmd_pkt->nvme_dsd;
0545     sgl = fd->first_sgl;
0546 
0547     /* Load data segments */
0548     for_each_sg(sgl, sg, tot_dsds, i) {
0549         cont_a64_entry_t *cont_pkt;
0550 
0551         /* Allocate additional continuation packets? */
0552         if (avail_dsds == 0) {
0553             /*
0554              * Five DSDs are available in the Continuation
0555              * Type 1 IOCB.
0556              */
0557 
0558             /* Adjust ring index */
0559             req->ring_index++;
0560             if (req->ring_index == req->length) {
0561                 req->ring_index = 0;
0562                 req->ring_ptr = req->ring;
0563             } else {
0564                 req->ring_ptr++;
0565             }
0566             cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
0567             put_unaligned_le32(CONTINUE_A64_TYPE,
0568                        &cont_pkt->entry_type);
0569 
0570             cur_dsd = cont_pkt->dsd;
0571             avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
0572         }
0573 
0574         append_dsd64(&cur_dsd, sg);
0575         avail_dsds--;
0576     }
0577 
0578     /* Set total entry count. */
0579     cmd_pkt->entry_count = (uint8_t)req_cnt;
0580     wmb();
0581 
0582     /* Adjust ring index. */
0583     req->ring_index++;
0584     if (req->ring_index == req->length) {
0585         req->ring_index = 0;
0586         req->ring_ptr = req->ring;
0587     } else {
0588         req->ring_ptr++;
0589     }
0590 
0591     /* ignore nvme async cmd due to long timeout */
0592     if (!nvme->u.nvme.aen_op)
0593         sp->qpair->cmd_cnt++;
0594 
0595     /* Set chip new ring index. */
0596     wrt_reg_dword(req->req_q_in, req->ring_index);
0597 
0598     if (vha->flags.process_response_queue &&
0599         rsp->ring_ptr->signature != RESPONSE_PROCESSED)
0600         qla24xx_process_response_queue(vha, rsp);
0601 
0602 queuing_error:
0603     spin_unlock_irqrestore(&qpair->qp_lock, flags);
0604 
0605     return rval;
0606 }
0607 
0608 /* Post a command */
0609 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
0610     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
0611     struct nvmefc_fcp_req *fd)
0612 {
0613     fc_port_t *fcport;
0614     struct srb_iocb *nvme;
0615     struct scsi_qla_host *vha;
0616     int rval;
0617     srb_t *sp;
0618     struct qla_qpair *qpair = hw_queue_handle;
0619     struct nvme_private *priv = fd->private;
0620     struct qla_nvme_rport *qla_rport = rport->private;
0621 
0622     if (!priv) {
0623         /* nvme association has been torn down */
0624         return -ENODEV;
0625     }
0626 
0627     fcport = qla_rport->fcport;
0628 
0629     if (unlikely(!qpair || !fcport || fcport->deleted))
0630         return -EBUSY;
0631 
0632     if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
0633         return -ENODEV;
0634 
0635     vha = fcport->vha;
0636 
0637     if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
0638         return -EBUSY;
0639 
0640     /*
0641      * If we know the dev is going away while the transport is still sending
0642      * IO's return busy back to stall the IO Q.  This happens when the
0643      * link goes away and fw hasn't notified us yet, but IO's are being
0644      * returned. If the dev comes back quickly we won't exhaust the IO
0645      * retry count at the core.
0646      */
0647     if (fcport->nvme_flag & NVME_FLAG_RESETTING)
0648         return -EBUSY;
0649 
0650     /* Alloc SRB structure */
0651     sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
0652     if (!sp)
0653         return -EBUSY;
0654 
0655     init_waitqueue_head(&sp->nvme_ls_waitq);
0656     kref_init(&sp->cmd_kref);
0657     spin_lock_init(&priv->cmd_lock);
0658     sp->priv = priv;
0659     priv->sp = sp;
0660     sp->type = SRB_NVME_CMD;
0661     sp->name = "nvme_cmd";
0662     sp->done = qla_nvme_sp_done;
0663     sp->put_fn = qla_nvme_release_fcp_cmd_kref;
0664     sp->qpair = qpair;
0665     sp->vha = vha;
0666     sp->cmd_sp = sp;
0667     nvme = &sp->u.iocb_cmd;
0668     nvme->u.nvme.desc = fd;
0669 
0670     rval = qla2x00_start_nvme_mq(sp);
0671     if (rval != QLA_SUCCESS) {
0672         ql_log(ql_log_warn, vha, 0x212d,
0673             "qla2x00_start_nvme_mq failed = %d\n", rval);
0674         wake_up(&sp->nvme_ls_waitq);
0675         sp->priv = NULL;
0676         priv->sp = NULL;
0677         qla2xxx_rel_qpair_sp(sp->qpair, sp);
0678     }
0679 
0680     return rval;
0681 }
0682 
0683 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
0684         struct blk_mq_queue_map *map)
0685 {
0686     struct scsi_qla_host *vha = lport->private;
0687     int rc;
0688 
0689     rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
0690     if (rc)
0691         ql_log(ql_log_warn, vha, 0x21de,
0692                "pci map queue failed 0x%x", rc);
0693 }
0694 
0695 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
0696 {
0697     struct scsi_qla_host *vha = lport->private;
0698 
0699     ql_log(ql_log_info, vha, 0x210f,
0700         "localport delete of %p completed.\n", vha->nvme_local_port);
0701     vha->nvme_local_port = NULL;
0702     complete(&vha->nvme_del_done);
0703 }
0704 
0705 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
0706 {
0707     fc_port_t *fcport;
0708     struct qla_nvme_rport *qla_rport = rport->private;
0709 
0710     fcport = qla_rport->fcport;
0711     fcport->nvme_remote_port = NULL;
0712     fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
0713     fcport->nvme_flag &= ~NVME_FLAG_DELETING;
0714     ql_log(ql_log_info, fcport->vha, 0x2110,
0715         "remoteport_delete of %p %8phN completed.\n",
0716         fcport, fcport->port_name);
0717     complete(&fcport->nvme_del_done);
0718 }
0719 
0720 static struct nvme_fc_port_template qla_nvme_fc_transport = {
0721     .localport_delete = qla_nvme_localport_delete,
0722     .remoteport_delete = qla_nvme_remoteport_delete,
0723     .create_queue   = qla_nvme_alloc_queue,
0724     .delete_queue   = NULL,
0725     .ls_req     = qla_nvme_ls_req,
0726     .ls_abort   = qla_nvme_ls_abort,
0727     .fcp_io     = qla_nvme_post_cmd,
0728     .fcp_abort  = qla_nvme_fcp_abort,
0729     .map_queues = qla_nvme_map_queues,
0730     .max_hw_queues  = DEF_NVME_HW_QUEUES,
0731     .max_sgl_segments = 1024,
0732     .max_dif_sgl_segments = 64,
0733     .dma_boundary = 0xFFFFFFFF,
0734     .local_priv_sz  = 8,
0735     .remote_priv_sz = sizeof(struct qla_nvme_rport),
0736     .lsrqst_priv_sz = sizeof(struct nvme_private),
0737     .fcprqst_priv_sz = sizeof(struct nvme_private),
0738 };
0739 
0740 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
0741 {
0742     int ret;
0743 
0744     if (!IS_ENABLED(CONFIG_NVME_FC))
0745         return;
0746 
0747     ql_log(ql_log_warn, fcport->vha, 0x2112,
0748         "%s: unregister remoteport on %p %8phN\n",
0749         __func__, fcport, fcport->port_name);
0750 
0751     if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
0752         nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
0753 
0754     init_completion(&fcport->nvme_del_done);
0755     ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
0756     if (ret)
0757         ql_log(ql_log_info, fcport->vha, 0x2114,
0758             "%s: Failed to unregister nvme_remote_port (%d)\n",
0759                 __func__, ret);
0760     wait_for_completion(&fcport->nvme_del_done);
0761 }
0762 
0763 void qla_nvme_delete(struct scsi_qla_host *vha)
0764 {
0765     int nv_ret;
0766 
0767     if (!IS_ENABLED(CONFIG_NVME_FC))
0768         return;
0769 
0770     if (vha->nvme_local_port) {
0771         init_completion(&vha->nvme_del_done);
0772         ql_log(ql_log_info, vha, 0x2116,
0773             "unregister localport=%p\n",
0774             vha->nvme_local_port);
0775         nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
0776         if (nv_ret)
0777             ql_log(ql_log_info, vha, 0x2115,
0778                 "Unregister of localport failed\n");
0779         else
0780             wait_for_completion(&vha->nvme_del_done);
0781     }
0782 }
0783 
0784 int qla_nvme_register_hba(struct scsi_qla_host *vha)
0785 {
0786     struct nvme_fc_port_template *tmpl;
0787     struct qla_hw_data *ha;
0788     struct nvme_fc_port_info pinfo;
0789     int ret = -EINVAL;
0790 
0791     if (!IS_ENABLED(CONFIG_NVME_FC))
0792         return ret;
0793 
0794     ha = vha->hw;
0795     tmpl = &qla_nvme_fc_transport;
0796 
0797     if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
0798         ql_log(ql_log_warn, vha, 0xfffd,
0799             "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
0800             ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
0801         ql2xnvme_queues = DEF_NVME_HW_QUEUES;
0802     } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
0803         ql_log(ql_log_warn, vha, 0xfffd,
0804                "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
0805                ql2xnvme_queues, (ha->max_qpairs - 1),
0806                (ha->max_qpairs - 1));
0807         ql2xnvme_queues = ((ha->max_qpairs - 1));
0808     }
0809 
0810     qla_nvme_fc_transport.max_hw_queues =
0811         min((uint8_t)(ql2xnvme_queues),
0812         (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
0813 
0814     ql_log(ql_log_info, vha, 0xfffb,
0815            "Number of NVME queues used for this port: %d\n",
0816         qla_nvme_fc_transport.max_hw_queues);
0817 
0818     pinfo.node_name = wwn_to_u64(vha->node_name);
0819     pinfo.port_name = wwn_to_u64(vha->port_name);
0820     pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
0821     pinfo.port_id = vha->d_id.b24;
0822 
0823     mutex_lock(&ha->vport_lock);
0824     /*
0825      * Check again for nvme_local_port to see if any other thread raced
0826      * with this one and finished registration.
0827      */
0828     if (!vha->nvme_local_port) {
0829         ql_log(ql_log_info, vha, 0xffff,
0830             "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
0831             pinfo.node_name, pinfo.port_name, pinfo.port_id);
0832         qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
0833 
0834         ret = nvme_fc_register_localport(&pinfo, tmpl,
0835                          get_device(&ha->pdev->dev),
0836                          &vha->nvme_local_port);
0837         mutex_unlock(&ha->vport_lock);
0838     } else {
0839         mutex_unlock(&ha->vport_lock);
0840         return 0;
0841     }
0842     if (ret) {
0843         ql_log(ql_log_warn, vha, 0xffff,
0844             "register_localport failed: ret=%x\n", ret);
0845     } else {
0846         vha->nvme_local_port->private = vha;
0847     }
0848 
0849     return ret;
0850 }
0851 
0852 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
0853 {
0854     struct qla_hw_data *ha;
0855 
0856     if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
0857         return;
0858 
0859     ha = orig_sp->fcport->vha->hw;
0860 
0861     WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
0862     /* Use Driver Specified Retry Count */
0863     abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
0864     abt->drv.abts_rty_cnt = cpu_to_le16(2);
0865     /* Use specified response timeout */
0866     abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
0867     /* set it to 2 * r_a_tov in secs */
0868     abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
0869 }
0870 
0871 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
0872 {
0873     u16 comp_status;
0874     struct scsi_qla_host *vha;
0875 
0876     if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
0877         return;
0878 
0879     vha = orig_sp->fcport->vha;
0880 
0881     comp_status = le16_to_cpu(abt->comp_status);
0882     switch (comp_status) {
0883     case CS_RESET:      /* reset event aborted */
0884     case CS_ABORTED:    /* IOCB was cleaned */
0885     /* N_Port handle is not currently logged in */
0886     case CS_TIMEOUT:
0887     /* N_Port handle was logged out while waiting for ABTS to complete */
0888     case CS_PORT_UNAVAILABLE:
0889     /* Firmware found that the port name changed */
0890     case CS_PORT_LOGGED_OUT:
0891     /* BA_RJT was received for the ABTS */
0892     case CS_PORT_CONFIG_CHG:
0893         ql_dbg(ql_dbg_async, vha, 0xf09d,
0894                "Abort I/O IOCB completed with error, comp_status=%x\n",
0895         comp_status);
0896         break;
0897 
0898     /* BA_RJT was received for the ABTS */
0899     case CS_REJECT_RECEIVED:
0900         ql_dbg(ql_dbg_async, vha, 0xf09e,
0901                "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
0902             abt->fw.ba_rjt_vendorUnique);
0903         ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
0904                "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
0905                abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
0906         break;
0907 
0908     case CS_COMPLETE:
0909         ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
0910                "IOCB request is completed successfully comp_status=%x\n",
0911         comp_status);
0912         break;
0913 
0914     case CS_IOCB_ERROR:
0915         ql_dbg(ql_dbg_async, vha, 0xf0a0,
0916                "IOCB request is failed, comp_status=%x\n", comp_status);
0917         break;
0918 
0919     default:
0920         ql_dbg(ql_dbg_async, vha, 0xf0a1,
0921                "Invalid Abort IO IOCB Completion Status %x\n",
0922         comp_status);
0923         break;
0924     }
0925 }
0926 
0927 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
0928 {
0929     if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
0930         return;
0931     kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
0932 }