0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/pci.h>
0024 #include <linux/slab.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/delay.h>
0027 #include <asm/unaligned.h>
0028 #include <linux/crc-t10dif.h>
0029 #include <net/checksum.h>
0030
0031 #include <scsi/scsi.h>
0032 #include <scsi/scsi_device.h>
0033 #include <scsi/scsi_eh.h>
0034 #include <scsi/scsi_host.h>
0035 #include <scsi/scsi_tcq.h>
0036 #include <scsi/scsi_transport_fc.h>
0037 #include <scsi/fc/fc_fs.h>
0038
0039 #include "lpfc_version.h"
0040 #include "lpfc_hw4.h"
0041 #include "lpfc_hw.h"
0042 #include "lpfc_sli.h"
0043 #include "lpfc_sli4.h"
0044 #include "lpfc_nl.h"
0045 #include "lpfc_disc.h"
0046 #include "lpfc.h"
0047 #include "lpfc_nvme.h"
0048 #include "lpfc_scsi.h"
0049 #include "lpfc_logmsg.h"
0050 #include "lpfc_crtn.h"
0051 #include "lpfc_vport.h"
0052 #include "lpfc_debugfs.h"
0053
0054
0055
0056 static struct lpfc_io_buf *
0057 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
0058 int idx, int expedite);
0059
0060 static void
0061 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
0062
0063 static struct nvme_fc_port_template lpfc_nvme_template;
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 static int
0082 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
0083 unsigned int qidx, u16 qsize,
0084 void **handle)
0085 {
0086 struct lpfc_nvme_lport *lport;
0087 struct lpfc_vport *vport;
0088 struct lpfc_nvme_qhandle *qhandle;
0089 char *str;
0090
0091 if (!pnvme_lport->private)
0092 return -ENOMEM;
0093
0094 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
0095 vport = lport->vport;
0096
0097 if (!vport || vport->load_flag & FC_UNLOADING ||
0098 vport->phba->hba_flag & HBA_IOQ_FLUSH)
0099 return -ENODEV;
0100
0101 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
0102 if (qhandle == NULL)
0103 return -ENOMEM;
0104
0105 qhandle->cpu_id = raw_smp_processor_id();
0106 qhandle->qidx = qidx;
0107
0108
0109
0110
0111
0112 if (qidx) {
0113 str = "IO ";
0114 qhandle->index = ((qidx - 1) %
0115 lpfc_nvme_template.max_hw_queues);
0116 } else {
0117 str = "ADM";
0118 qhandle->index = qidx;
0119 }
0120
0121 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
0122 "6073 Binding %s HdwQueue %d (cpu %d) to "
0123 "hdw_queue %d qhandle x%px\n", str,
0124 qidx, qhandle->cpu_id, qhandle->index, qhandle);
0125 *handle = (void *)qhandle;
0126 return 0;
0127 }
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143 static void
0144 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
0145 unsigned int qidx,
0146 void *handle)
0147 {
0148 struct lpfc_nvme_lport *lport;
0149 struct lpfc_vport *vport;
0150
0151 if (!pnvme_lport->private)
0152 return;
0153
0154 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
0155 vport = lport->vport;
0156
0157 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
0158 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
0159 lport, qidx, handle);
0160 kfree(handle);
0161 }
0162
0163 static void
0164 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
0165 {
0166 struct lpfc_nvme_lport *lport = localport->private;
0167
0168 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
0169 "6173 localport x%px delete complete\n",
0170 lport);
0171
0172
0173 if (lport->vport->localport)
0174 complete(lport->lport_unreg_cmp);
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 static void
0189 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
0190 {
0191 struct lpfc_nvme_rport *rport = remoteport->private;
0192 struct lpfc_vport *vport;
0193 struct lpfc_nodelist *ndlp;
0194 u32 fc4_xpt_flags;
0195
0196 ndlp = rport->ndlp;
0197 if (!ndlp) {
0198 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
0199 __func__, rport, remoteport);
0200 goto rport_err;
0201 }
0202
0203 vport = ndlp->vport;
0204 if (!vport) {
0205 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
0206 __func__, ndlp, ndlp->nlp_state, rport);
0207 goto rport_err;
0208 }
0209
0210 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
0211
0212
0213
0214
0215
0216 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
0217 "6146 remoteport delete of remoteport x%px, ndlp x%px "
0218 "DID x%x xflags x%x\n",
0219 remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
0220 spin_lock_irq(&ndlp->lock);
0221
0222
0223
0224
0225 if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
0226 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
0227
0228 spin_unlock_irq(&ndlp->lock);
0229
0230
0231
0232
0233
0234 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
0235 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
0236
0237 rport_err:
0238 return;
0239 }
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 int
0258 lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
0259 struct lpfc_async_xchg_ctx *axchg)
0260 {
0261 #if (IS_ENABLED(CONFIG_NVME_FC))
0262 struct lpfc_vport *vport;
0263 struct lpfc_nvme_rport *lpfc_rport;
0264 struct nvme_fc_remote_port *remoteport;
0265 struct lpfc_nvme_lport *lport;
0266 uint32_t *payload = axchg->payload;
0267 int rc;
0268
0269 vport = axchg->ndlp->vport;
0270 lpfc_rport = axchg->ndlp->nrport;
0271 if (!lpfc_rport)
0272 return -EINVAL;
0273
0274 remoteport = lpfc_rport->remoteport;
0275 if (!vport->localport ||
0276 vport->phba->hba_flag & HBA_IOQ_FLUSH)
0277 return -EINVAL;
0278
0279 lport = vport->localport->private;
0280 if (!lport)
0281 return -EINVAL;
0282
0283 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
0284 axchg->size);
0285
0286 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
0287 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
0288 "%08x %08x %08x\n",
0289 axchg->size, rc,
0290 *payload, *(payload+1), *(payload+2),
0291 *(payload+3), *(payload+4), *(payload+5));
0292
0293 if (!rc)
0294 return 0;
0295 #endif
0296 return 1;
0297 }
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312 void
0313 __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
0314 struct lpfc_iocbq *cmdwqe,
0315 struct lpfc_wcqe_complete *wcqe)
0316 {
0317 struct nvmefc_ls_req *pnvme_lsreq;
0318 struct lpfc_dmabuf *buf_ptr;
0319 struct lpfc_nodelist *ndlp;
0320 uint32_t status;
0321
0322 pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
0323 ndlp = cmdwqe->ndlp;
0324 buf_ptr = cmdwqe->bpl_dmabuf;
0325
0326 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
0327
0328 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
0329 "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
0330 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
0331 "ndlp:x%px\n",
0332 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
0333 cmdwqe->sli4_xritag, status,
0334 (wcqe->parameter & 0xffff),
0335 cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
0336 ndlp);
0337
0338 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
0339 cmdwqe->sli4_xritag, status, wcqe->parameter);
0340
0341 if (buf_ptr) {
0342 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
0343 kfree(buf_ptr);
0344 cmdwqe->bpl_dmabuf = NULL;
0345 }
0346 if (pnvme_lsreq->done)
0347 pnvme_lsreq->done(pnvme_lsreq, status);
0348 else
0349 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0350 "6046 NVMEx cmpl without done call back? "
0351 "Data x%px DID %x Xri: %x status %x\n",
0352 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
0353 cmdwqe->sli4_xritag, status);
0354 if (ndlp) {
0355 lpfc_nlp_put(ndlp);
0356 cmdwqe->ndlp = NULL;
0357 }
0358 lpfc_sli_release_iocbq(phba, cmdwqe);
0359 }
0360
0361 static void
0362 lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
0363 struct lpfc_iocbq *rspwqe)
0364 {
0365 struct lpfc_vport *vport = cmdwqe->vport;
0366 struct lpfc_nvme_lport *lport;
0367 uint32_t status;
0368 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
0369
0370 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
0371
0372 if (vport->localport) {
0373 lport = (struct lpfc_nvme_lport *)vport->localport->private;
0374 if (lport) {
0375 atomic_inc(&lport->fc4NvmeLsCmpls);
0376 if (status) {
0377 if (bf_get(lpfc_wcqe_c_xb, wcqe))
0378 atomic_inc(&lport->cmpl_ls_xb);
0379 atomic_inc(&lport->cmpl_ls_err);
0380 }
0381 }
0382 }
0383
0384 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
0385 }
0386
0387 static int
0388 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
0389 struct lpfc_dmabuf *inp,
0390 struct nvmefc_ls_req *pnvme_lsreq,
0391 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
0392 struct lpfc_iocbq *),
0393 struct lpfc_nodelist *ndlp, uint32_t num_entry,
0394 uint32_t tmo, uint8_t retry)
0395 {
0396 struct lpfc_hba *phba = vport->phba;
0397 union lpfc_wqe128 *wqe;
0398 struct lpfc_iocbq *genwqe;
0399 struct ulp_bde64 *bpl;
0400 struct ulp_bde64 bde;
0401 int i, rc, xmit_len, first_len;
0402
0403
0404 genwqe = lpfc_sli_get_iocbq(phba);
0405 if (genwqe == NULL)
0406 return 1;
0407
0408 wqe = &genwqe->wqe;
0409
0410 memset(wqe, 0, sizeof(union lpfc_wqe));
0411
0412 genwqe->bpl_dmabuf = bmp;
0413 genwqe->cmd_flag |= LPFC_IO_NVME_LS;
0414
0415
0416 genwqe->ndlp = lpfc_nlp_get(ndlp);
0417 if (!genwqe->ndlp) {
0418 dev_warn(&phba->pcidev->dev,
0419 "Warning: Failed node ref, not sending LS_REQ\n");
0420 lpfc_sli_release_iocbq(phba, genwqe);
0421 return 1;
0422 }
0423
0424 genwqe->context_un.nvme_lsreq = pnvme_lsreq;
0425
0426
0427 if (!tmo)
0428
0429 tmo = (3 * phba->fc_ratov);
0430
0431
0432 xmit_len = 0;
0433 first_len = 0;
0434 bpl = (struct ulp_bde64 *)bmp->virt;
0435 for (i = 0; i < num_entry; i++) {
0436 bde.tus.w = bpl[i].tus.w;
0437 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
0438 break;
0439 xmit_len += bde.tus.f.bdeSize;
0440 if (i == 0)
0441 first_len = xmit_len;
0442 }
0443
0444 genwqe->num_bdes = num_entry;
0445 genwqe->hba_wqidx = 0;
0446
0447
0448 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
0449 wqe->generic.bde.tus.f.bdeSize = first_len;
0450 wqe->generic.bde.addrLow = bpl[0].addrLow;
0451 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
0452
0453
0454 wqe->gen_req.request_payload_len = first_len;
0455
0456
0457
0458
0459 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
0460 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
0461 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
0462 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
0463 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
0464
0465
0466 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
0467 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
0468 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
0469
0470
0471 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
0472 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
0473 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
0474 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
0475
0476
0477 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
0478
0479
0480 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
0481
0482
0483 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
0484 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
0485 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
0486 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
0487 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
0488
0489
0490 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
0491 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
0492
0493
0494
0495 genwqe->cmd_cmpl = cmpl;
0496 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
0497 genwqe->vport = vport;
0498 genwqe->retry = retry;
0499
0500 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
0501 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
0502
0503 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
0504 if (rc) {
0505 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0506 "6045 Issue GEN REQ WQE to NPORT x%x "
0507 "Data: x%x x%x rc x%x\n",
0508 ndlp->nlp_DID, genwqe->iotag,
0509 vport->port_state, rc);
0510 lpfc_nlp_put(ndlp);
0511 lpfc_sli_release_iocbq(phba, genwqe);
0512 return 1;
0513 }
0514
0515 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
0516 "6050 Issue GEN REQ WQE to NPORT x%x "
0517 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
0518 "bmp:x%px xmit:%d 1st:%d\n",
0519 ndlp->nlp_DID, genwqe->sli4_xritag,
0520 vport->port_state,
0521 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
0522 return 0;
0523 }
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540 int
0541 __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
0542 struct nvmefc_ls_req *pnvme_lsreq,
0543 void (*gen_req_cmp)(struct lpfc_hba *phba,
0544 struct lpfc_iocbq *cmdwqe,
0545 struct lpfc_iocbq *rspwqe))
0546 {
0547 struct lpfc_dmabuf *bmp;
0548 struct ulp_bde64 *bpl;
0549 int ret;
0550 uint16_t ntype, nstate;
0551
0552 if (!ndlp) {
0553 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0554 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
0555 "LS Req\n",
0556 ndlp);
0557 return -ENODEV;
0558 }
0559
0560 ntype = ndlp->nlp_type;
0561 nstate = ndlp->nlp_state;
0562 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
0563 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
0564 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0565 "6088 NVMEx LS REQ: Fail DID x%06x not "
0566 "ready for IO. Type x%x, State x%x\n",
0567 ndlp->nlp_DID, ntype, nstate);
0568 return -ENODEV;
0569 }
0570 if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
0571 return -ENODEV;
0572
0573 if (!vport->phba->sli4_hba.nvmels_wq)
0574 return -ENOMEM;
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
0589 if (!bmp) {
0590 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0591 "6044 NVMEx LS REQ: Could not alloc LS buf "
0592 "for DID %x\n",
0593 ndlp->nlp_DID);
0594 return -ENOMEM;
0595 }
0596
0597 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
0598 if (!bmp->virt) {
0599 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0600 "6042 NVMEx LS REQ: Could not alloc mbuf "
0601 "for DID %x\n",
0602 ndlp->nlp_DID);
0603 kfree(bmp);
0604 return -ENOMEM;
0605 }
0606
0607 INIT_LIST_HEAD(&bmp->list);
0608
0609 bpl = (struct ulp_bde64 *)bmp->virt;
0610 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
0611 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
0612 bpl->tus.f.bdeFlags = 0;
0613 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
0614 bpl->tus.w = le32_to_cpu(bpl->tus.w);
0615 bpl++;
0616
0617 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
0618 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
0619 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
0620 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
0621 bpl->tus.w = le32_to_cpu(bpl->tus.w);
0622
0623 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
0624 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
0625 "rqstlen:%d rsplen:%d %pad %pad\n",
0626 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
0627 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
0628 &pnvme_lsreq->rspdma);
0629
0630 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
0631 pnvme_lsreq, gen_req_cmp, ndlp, 2,
0632 pnvme_lsreq->timeout, 0);
0633 if (ret != WQE_SUCCESS) {
0634 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0635 "6052 NVMEx REQ: EXIT. issue ls wqe failed "
0636 "lsreq x%px Status %x DID %x\n",
0637 pnvme_lsreq, ret, ndlp->nlp_DID);
0638 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
0639 kfree(bmp);
0640 return -EIO;
0641 }
0642
0643 return 0;
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659 static int
0660 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
0661 struct nvme_fc_remote_port *pnvme_rport,
0662 struct nvmefc_ls_req *pnvme_lsreq)
0663 {
0664 struct lpfc_nvme_lport *lport;
0665 struct lpfc_nvme_rport *rport;
0666 struct lpfc_vport *vport;
0667 int ret;
0668
0669 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
0670 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
0671 if (unlikely(!lport) || unlikely(!rport))
0672 return -EINVAL;
0673
0674 vport = lport->vport;
0675 if (vport->load_flag & FC_UNLOADING ||
0676 vport->phba->hba_flag & HBA_IOQ_FLUSH)
0677 return -ENODEV;
0678
0679 atomic_inc(&lport->fc4NvmeLsRequests);
0680
0681 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
0682 lpfc_nvme_ls_req_cmp);
0683 if (ret)
0684 atomic_inc(&lport->xmt_ls_err);
0685
0686 return ret;
0687 }
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703 int
0704 __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
0705 struct nvmefc_ls_req *pnvme_lsreq)
0706 {
0707 struct lpfc_hba *phba = vport->phba;
0708 struct lpfc_sli_ring *pring;
0709 struct lpfc_iocbq *wqe, *next_wqe;
0710 bool foundit = false;
0711
0712 if (!ndlp) {
0713 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0714 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
0715 "x%06x, Failing LS Req\n",
0716 ndlp, ndlp ? ndlp->nlp_DID : 0);
0717 return -EINVAL;
0718 }
0719
0720 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
0721 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
0722 "x%px rqstlen:%d rsplen:%d %pad %pad\n",
0723 pnvme_lsreq, pnvme_lsreq->rqstlen,
0724 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
0725 &pnvme_lsreq->rspdma);
0726
0727
0728
0729
0730
0731 pring = phba->sli4_hba.nvmels_wq->pring;
0732 spin_lock_irq(&phba->hbalock);
0733 spin_lock(&pring->ring_lock);
0734 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
0735 if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
0736 wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
0737 foundit = true;
0738 break;
0739 }
0740 }
0741 spin_unlock(&pring->ring_lock);
0742
0743 if (foundit)
0744 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
0745 spin_unlock_irq(&phba->hbalock);
0746
0747 if (foundit)
0748 return 0;
0749
0750 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
0751 "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
0752 pnvme_lsreq);
0753 return -EINVAL;
0754 }
0755
0756 static int
0757 lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
0758 struct nvme_fc_remote_port *remoteport,
0759 struct nvmefc_ls_rsp *ls_rsp)
0760 {
0761 struct lpfc_async_xchg_ctx *axchg =
0762 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
0763 struct lpfc_nvme_lport *lport;
0764 int rc;
0765
0766 if (axchg->phba->pport->load_flag & FC_UNLOADING)
0767 return -ENODEV;
0768
0769 lport = (struct lpfc_nvme_lport *)localport->private;
0770
0771 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
0772
0773 if (rc) {
0774
0775
0776
0777
0778
0779 if (rc != -EALREADY)
0780 atomic_inc(&lport->xmt_ls_abort);
0781 return rc;
0782 }
0783
0784 return 0;
0785 }
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 static void
0797 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
0798 struct nvme_fc_remote_port *pnvme_rport,
0799 struct nvmefc_ls_req *pnvme_lsreq)
0800 {
0801 struct lpfc_nvme_lport *lport;
0802 struct lpfc_vport *vport;
0803 struct lpfc_nodelist *ndlp;
0804 int ret;
0805
0806 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
0807 if (unlikely(!lport))
0808 return;
0809 vport = lport->vport;
0810
0811 if (vport->load_flag & FC_UNLOADING)
0812 return;
0813
0814 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
0815
0816 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
0817 if (!ret)
0818 atomic_inc(&lport->xmt_ls_abort);
0819 }
0820
0821
0822 static inline void
0823 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
0824 struct lpfc_io_buf *lpfc_ncmd,
0825 struct nvmefc_fcp_req *nCmd)
0826 {
0827 struct lpfc_hba *phba = vport->phba;
0828 struct sli4_sge *sgl;
0829 union lpfc_wqe128 *wqe;
0830 uint32_t *wptr, *dptr;
0831
0832
0833
0834
0835
0836
0837
0838 wqe = &lpfc_ncmd->cur_iocbq.wqe;
0839
0840
0841
0842
0843
0844
0845
0846
0847 sgl = lpfc_ncmd->dma_sgl;
0848 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
0849 if (phba->cfg_nvme_embed_cmd) {
0850 sgl->addr_hi = 0;
0851 sgl->addr_lo = 0;
0852
0853
0854 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
0855 wqe->generic.bde.tus.f.bdeSize = 56;
0856 wqe->generic.bde.addrHigh = 0;
0857 wqe->generic.bde.addrLow = 64;
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869 wptr = &wqe->words[16];
0870 dptr = (uint32_t *)nCmd->cmdaddr;
0871 dptr++;
0872
0873 *wptr++ = *dptr++;
0874 *wptr++ = *dptr++;
0875 *wptr++ = *dptr++;
0876 *wptr++ = *dptr++;
0877 dptr++;
0878 *wptr++ = *dptr++;
0879 *wptr++ = *dptr++;
0880 dptr += 8;
0881 *wptr++ = *dptr++;
0882 *wptr++ = *dptr++;
0883 *wptr++ = *dptr++;
0884 *wptr++ = *dptr++;
0885 *wptr++ = *dptr++;
0886 *wptr++ = *dptr++;
0887 *wptr++ = *dptr++;
0888 *wptr = *dptr;
0889 } else {
0890 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
0891 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
0892
0893
0894 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
0895 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
0896 wqe->generic.bde.addrHigh = sgl->addr_hi;
0897 wqe->generic.bde.addrLow = sgl->addr_lo;
0898
0899
0900 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
0901 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
0902 }
0903
0904 sgl++;
0905
0906
0907 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
0908 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
0909 sgl->word2 = le32_to_cpu(sgl->word2);
0910 if (nCmd->sg_cnt)
0911 bf_set(lpfc_sli4_sge_last, sgl, 0);
0912 else
0913 bf_set(lpfc_sli4_sge_last, sgl, 1);
0914 sgl->word2 = cpu_to_le32(sgl->word2);
0915 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
0916 }
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930 static void
0931 lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
0932 struct lpfc_iocbq *pwqeOut)
0933 {
0934 struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
0935 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
0936 struct lpfc_vport *vport = pwqeIn->vport;
0937 struct nvmefc_fcp_req *nCmd;
0938 struct nvme_fc_ersp_iu *ep;
0939 struct nvme_fc_cmd_iu *cp;
0940 struct lpfc_nodelist *ndlp;
0941 struct lpfc_nvme_fcpreq_priv *freqpriv;
0942 struct lpfc_nvme_lport *lport;
0943 uint32_t code, status, idx;
0944 uint16_t cid, sqhd, data;
0945 uint32_t *ptr;
0946 uint32_t lat;
0947 bool call_done = false;
0948 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0949 int cpu;
0950 #endif
0951 int offline = 0;
0952
0953
0954 if (!lpfc_ncmd) {
0955 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0956 "6071 Null lpfc_ncmd pointer. No "
0957 "release, skip completion\n");
0958 return;
0959 }
0960
0961
0962 spin_lock(&lpfc_ncmd->buf_lock);
0963
0964 if (!lpfc_ncmd->nvmeCmd) {
0965 spin_unlock(&lpfc_ncmd->buf_lock);
0966 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0967 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
0968 "nvmeCmd x%px\n",
0969 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
0970
0971
0972 lpfc_release_nvme_buf(phba, lpfc_ncmd);
0973 return;
0974 }
0975 nCmd = lpfc_ncmd->nvmeCmd;
0976 status = bf_get(lpfc_wcqe_c_status, wcqe);
0977
0978 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
0979 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
0980
0981 if (unlikely(status && vport->localport)) {
0982 lport = (struct lpfc_nvme_lport *)vport->localport->private;
0983 if (lport) {
0984 if (bf_get(lpfc_wcqe_c_xb, wcqe))
0985 atomic_inc(&lport->cmpl_fcp_xb);
0986 atomic_inc(&lport->cmpl_fcp_err);
0987 }
0988 }
0989
0990 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
0991 lpfc_ncmd->cur_iocbq.sli4_xritag,
0992 status, wcqe->parameter);
0993
0994
0995
0996
0997 ndlp = lpfc_ncmd->ndlp;
0998 if (!ndlp) {
0999 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1000 "6062 Ignoring NVME cmpl. No ndlp\n");
1001 goto out_err;
1002 }
1003
1004 code = bf_get(lpfc_wcqe_c_code, wcqe);
1005 if (code == CQE_CODE_NVME_ERSP) {
1006
1007 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1008
1009
1010
1011
1012
1013 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1014 cid = cp->sqe.common.command_id;
1015
1016
1017
1018
1019
1020
1021
1022 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1023
1024
1025 ep->iu_len = cpu_to_be16(8);
1026 ep->rsn = wcqe->parameter;
1027 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1028 ep->rsvd12 = 0;
1029 ptr = (uint32_t *)&ep->cqe.result.u64;
1030 *ptr++ = wcqe->total_data_placed;
1031 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1032 *ptr = (uint32_t)data;
1033 ep->cqe.sq_head = sqhd;
1034 ep->cqe.sq_id = nCmd->sqid;
1035 ep->cqe.command_id = cid;
1036 ep->cqe.status = 0;
1037
1038 lpfc_ncmd->status = IOSTAT_SUCCESS;
1039 lpfc_ncmd->result = 0;
1040 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1041 nCmd->transferred_length = nCmd->payload_length;
1042 } else {
1043 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1044 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 switch (lpfc_ncmd->status) {
1058 case IOSTAT_SUCCESS:
1059 nCmd->transferred_length = wcqe->total_data_placed;
1060 nCmd->rcv_rsplen = 0;
1061 nCmd->status = 0;
1062 break;
1063 case IOSTAT_FCP_RSP_ERROR:
1064 nCmd->transferred_length = wcqe->total_data_placed;
1065 nCmd->rcv_rsplen = wcqe->parameter;
1066 nCmd->status = 0;
1067
1068
1069 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1070 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1071
1072
1073 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
1074 lpfc_ncmd->status = IOSTAT_SUCCESS;
1075 lpfc_ncmd->result = 0;
1076
1077 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1078 "6084 NVME FCP_ERR ERSP: "
1079 "xri %x placed x%x opcode x%x cmd_id "
1080 "x%x cqe_status x%x\n",
1081 lpfc_ncmd->cur_iocbq.sli4_xritag,
1082 wcqe->total_data_placed,
1083 cp->sqe.common.opcode,
1084 cp->sqe.common.command_id,
1085 ep->cqe.status);
1086 break;
1087 }
1088 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1089 "6081 NVME Completion Protocol Error: "
1090 "xri %x status x%x result x%x "
1091 "placed x%x opcode x%x cmd_id x%x, "
1092 "cqe_status x%x\n",
1093 lpfc_ncmd->cur_iocbq.sli4_xritag,
1094 lpfc_ncmd->status, lpfc_ncmd->result,
1095 wcqe->total_data_placed,
1096 cp->sqe.common.opcode,
1097 cp->sqe.common.command_id,
1098 ep->cqe.status);
1099 break;
1100 case IOSTAT_LOCAL_REJECT:
1101
1102 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1103 lpfc_printf_vlog(vport, KERN_INFO,
1104 LOG_NVME_IOERR,
1105 "6032 Delay Aborted cmd x%px "
1106 "nvme cmd x%px, xri x%x, "
1107 "xb %d\n",
1108 lpfc_ncmd, nCmd,
1109 lpfc_ncmd->cur_iocbq.sli4_xritag,
1110 bf_get(lpfc_wcqe_c_xb, wcqe));
1111 fallthrough;
1112 default:
1113 out_err:
1114 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1115 "6072 NVME Completion Error: xri %x "
1116 "status x%x result x%x [x%x] "
1117 "placed x%x\n",
1118 lpfc_ncmd->cur_iocbq.sli4_xritag,
1119 lpfc_ncmd->status, lpfc_ncmd->result,
1120 wcqe->parameter,
1121 wcqe->total_data_placed);
1122 nCmd->transferred_length = 0;
1123 nCmd->rcv_rsplen = 0;
1124 nCmd->status = NVME_SC_INTERNAL;
1125 offline = pci_channel_offline(vport->phba->pcidev);
1126 }
1127 }
1128
1129
1130 if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
1131 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1132 else
1133 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1134
1135
1136
1137
1138
1139 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1140 if (lpfc_ncmd->ts_cmd_start) {
1141 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1142 lpfc_ncmd->ts_data_io = ktime_get_ns();
1143 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1144 lpfc_io_ktime(phba, lpfc_ncmd);
1145 }
1146 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1147 cpu = raw_smp_processor_id();
1148 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1149 if (lpfc_ncmd->cpu != cpu)
1150 lpfc_printf_vlog(vport,
1151 KERN_INFO, LOG_NVME_IOERR,
1152 "6701 CPU Check cmpl: "
1153 "cpu %d expect %d\n",
1154 cpu, lpfc_ncmd->cpu);
1155 }
1156 #endif
1157
1158
1159
1160
1161
1162 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1163 freqpriv = nCmd->private;
1164 freqpriv->nvme_buf = NULL;
1165 lpfc_ncmd->nvmeCmd = NULL;
1166 call_done = true;
1167 }
1168 spin_unlock(&lpfc_ncmd->buf_lock);
1169
1170
1171 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1172 nCmd->io_dir == NVMEFC_FCP_READ &&
1173 nCmd->payload_length) {
1174
1175 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
1176 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
1177 }
1178
1179 if (call_done)
1180 nCmd->done(nCmd);
1181
1182
1183 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 static int
1203 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1204 struct lpfc_io_buf *lpfc_ncmd,
1205 struct lpfc_nodelist *pnode,
1206 struct lpfc_fc4_ctrl_stat *cstat)
1207 {
1208 struct lpfc_hba *phba = vport->phba;
1209 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1210 struct nvme_common_command *sqe;
1211 struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
1212 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1213 uint32_t req_len;
1214
1215
1216
1217
1218
1219 if (nCmd->sg_cnt) {
1220 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1221
1222 memcpy(&wqe->words[7],
1223 &lpfc_iwrite_cmd_template.words[7],
1224 sizeof(uint32_t) * 5);
1225
1226
1227 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1228
1229
1230 if ((phba->cfg_nvme_enable_fb) &&
1231 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1232 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1233 if (req_len < pnode->nvme_fb_size)
1234 wqe->fcp_iwrite.initial_xfer_len =
1235 req_len;
1236 else
1237 wqe->fcp_iwrite.initial_xfer_len =
1238 pnode->nvme_fb_size;
1239 } else {
1240 wqe->fcp_iwrite.initial_xfer_len = 0;
1241 }
1242 cstat->output_requests++;
1243 } else {
1244
1245 memcpy(&wqe->words[7],
1246 &lpfc_iread_cmd_template.words[7],
1247 sizeof(uint32_t) * 5);
1248
1249
1250 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1251
1252
1253 wqe->fcp_iread.rsrvd5 = 0;
1254
1255
1256 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
1257 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
1258 LPFC_WQE_IOD_NONE);
1259 cstat->input_requests++;
1260 }
1261 } else {
1262
1263 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1264 sizeof(uint32_t) * 8);
1265 cstat->control_requests++;
1266 }
1267
1268 if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
1269 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1270 sqe = &((struct nvme_fc_cmd_iu *)
1271 nCmd->cmdaddr)->sqe.common;
1272 if (sqe->opcode == nvme_admin_async_event)
1273 bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
1274 }
1275
1276
1277
1278
1279
1280
1281
1282 bf_set(payload_offset_len, &wqe->fcp_icmd,
1283 (nCmd->rsplen + nCmd->cmdlen));
1284
1285
1286 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1287 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1288 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1289
1290
1291 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1292
1293
1294 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1295
1296
1297 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1298
1299
1300
1301
1302 if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
1303 if (phba->pport->vmid_priority_tagging) {
1304 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
1305 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
1306 lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
1307 } else {
1308 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
1309 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
1310 wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
1311 }
1312 }
1313
1314 pwqeq->vport = vport;
1315 return 0;
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 static int
1333 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1334 struct lpfc_io_buf *lpfc_ncmd)
1335 {
1336 struct lpfc_hba *phba = vport->phba;
1337 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1338 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1339 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1340 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1341 struct scatterlist *data_sg;
1342 struct sli4_sge *first_data_sgl;
1343 struct ulp_bde64 *bde;
1344 dma_addr_t physaddr = 0;
1345 uint32_t dma_len = 0;
1346 uint32_t dma_offset = 0;
1347 int nseg, i, j;
1348 bool lsp_just_set = false;
1349
1350
1351 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1352
1353
1354
1355
1356
1357 if (nCmd->sg_cnt) {
1358
1359
1360
1361
1362 sgl += 2;
1363
1364 first_data_sgl = sgl;
1365 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1366 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1368 "6058 Too many sg segments from "
1369 "NVME Transport. Max %d, "
1370 "nvmeIO sg_cnt %d\n",
1371 phba->cfg_nvme_seg_cnt + 1,
1372 lpfc_ncmd->seg_cnt);
1373 lpfc_ncmd->seg_cnt = 0;
1374 return 1;
1375 }
1376
1377
1378
1379
1380
1381
1382
1383 nseg = nCmd->sg_cnt;
1384 data_sg = nCmd->first_sgl;
1385
1386
1387 j = 2;
1388 for (i = 0; i < nseg; i++) {
1389 if (data_sg == NULL) {
1390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1391 "6059 dptr err %d, nseg %d\n",
1392 i, nseg);
1393 lpfc_ncmd->seg_cnt = 0;
1394 return 1;
1395 }
1396
1397 sgl->word2 = 0;
1398 if (nseg == 1) {
1399 bf_set(lpfc_sli4_sge_last, sgl, 1);
1400 bf_set(lpfc_sli4_sge_type, sgl,
1401 LPFC_SGE_TYPE_DATA);
1402 } else {
1403 bf_set(lpfc_sli4_sge_last, sgl, 0);
1404
1405
1406 if (!lsp_just_set &&
1407 !((j + 1) % phba->border_sge_num) &&
1408 ((nseg - 1) != i)) {
1409
1410 bf_set(lpfc_sli4_sge_type, sgl,
1411 LPFC_SGE_TYPE_LSP);
1412
1413 sgl_xtra = lpfc_get_sgl_per_hdwq(
1414 phba, lpfc_ncmd);
1415
1416 if (unlikely(!sgl_xtra)) {
1417 lpfc_ncmd->seg_cnt = 0;
1418 return 1;
1419 }
1420 sgl->addr_lo = cpu_to_le32(putPaddrLow(
1421 sgl_xtra->dma_phys_sgl));
1422 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1423 sgl_xtra->dma_phys_sgl));
1424
1425 } else {
1426 bf_set(lpfc_sli4_sge_type, sgl,
1427 LPFC_SGE_TYPE_DATA);
1428 }
1429 }
1430
1431 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1432 LPFC_SGE_TYPE_LSP)) {
1433 if ((nseg - 1) == i)
1434 bf_set(lpfc_sli4_sge_last, sgl, 1);
1435
1436 physaddr = sg_dma_address(data_sg);
1437 dma_len = sg_dma_len(data_sg);
1438 sgl->addr_lo = cpu_to_le32(
1439 putPaddrLow(physaddr));
1440 sgl->addr_hi = cpu_to_le32(
1441 putPaddrHigh(physaddr));
1442
1443 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1444 sgl->word2 = cpu_to_le32(sgl->word2);
1445 sgl->sge_len = cpu_to_le32(dma_len);
1446
1447 dma_offset += dma_len;
1448 data_sg = sg_next(data_sg);
1449
1450 sgl++;
1451
1452 lsp_just_set = false;
1453 } else {
1454 sgl->word2 = cpu_to_le32(sgl->word2);
1455
1456 sgl->sge_len = cpu_to_le32(
1457 phba->cfg_sg_dma_buf_size);
1458
1459 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1460 i = i - 1;
1461
1462 lsp_just_set = true;
1463 }
1464
1465 j++;
1466 }
1467
1468
1469 if (nseg == 1 && phba->cfg_enable_pbde) {
1470
1471 bde = (struct ulp_bde64 *)
1472 &wqe->words[13];
1473 bde->addrLow = first_data_sgl->addr_lo;
1474 bde->addrHigh = first_data_sgl->addr_hi;
1475 bde->tus.f.bdeSize =
1476 le32_to_cpu(first_data_sgl->sge_len);
1477 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1478 bde->tus.w = cpu_to_le32(bde->tus.w);
1479
1480
1481 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1482 } else {
1483 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1484
1485 }
1486
1487 } else {
1488 lpfc_ncmd->seg_cnt = 0;
1489
1490
1491
1492
1493 if (nCmd->payload_length != 0) {
1494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1495 "6063 NVME DMA Prep Err: sg_cnt %d "
1496 "payload_length x%x\n",
1497 nCmd->sg_cnt, nCmd->payload_length);
1498 return 1;
1499 }
1500 }
1501 return 0;
1502 }
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 static int
1520 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1521 struct nvme_fc_remote_port *pnvme_rport,
1522 void *hw_queue_handle,
1523 struct nvmefc_fcp_req *pnvme_fcreq)
1524 {
1525 int ret = 0;
1526 int expedite = 0;
1527 int idx, cpu;
1528 struct lpfc_nvme_lport *lport;
1529 struct lpfc_fc4_ctrl_stat *cstat;
1530 struct lpfc_vport *vport;
1531 struct lpfc_hba *phba;
1532 struct lpfc_nodelist *ndlp;
1533 struct lpfc_io_buf *lpfc_ncmd;
1534 struct lpfc_nvme_rport *rport;
1535 struct lpfc_nvme_qhandle *lpfc_queue_info;
1536 struct lpfc_nvme_fcpreq_priv *freqpriv;
1537 struct nvme_common_command *sqe;
1538 uint64_t start = 0;
1539 #if (IS_ENABLED(CONFIG_NVME_FC))
1540 u8 *uuid = NULL;
1541 int err;
1542 enum dma_data_direction iodir;
1543 #endif
1544
1545
1546
1547
1548 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1549 if (unlikely(!lport)) {
1550 ret = -EINVAL;
1551 goto out_fail;
1552 }
1553
1554 vport = lport->vport;
1555
1556 if (unlikely(!hw_queue_handle)) {
1557 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1558 "6117 Fail IO, NULL hw_queue_handle\n");
1559 atomic_inc(&lport->xmt_fcp_err);
1560 ret = -EBUSY;
1561 goto out_fail;
1562 }
1563
1564 phba = vport->phba;
1565
1566 if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
1567 phba->hba_flag & HBA_IOQ_FLUSH) {
1568 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1569 "6124 Fail IO, Driver unload\n");
1570 atomic_inc(&lport->xmt_fcp_err);
1571 ret = -ENODEV;
1572 goto out_fail;
1573 }
1574
1575 freqpriv = pnvme_fcreq->private;
1576 if (unlikely(!freqpriv)) {
1577 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1578 "6158 Fail IO, NULL request data\n");
1579 atomic_inc(&lport->xmt_fcp_err);
1580 ret = -EINVAL;
1581 goto out_fail;
1582 }
1583
1584 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1585 if (phba->ktime_on)
1586 start = ktime_get_ns();
1587 #endif
1588 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1589 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1590
1591
1592
1593
1594
1595 ndlp = rport->ndlp;
1596 if (!ndlp) {
1597 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1598 "6053 Busy IO, ndlp not ready: rport x%px "
1599 "ndlp x%px, DID x%06x\n",
1600 rport, ndlp, pnvme_rport->port_id);
1601 atomic_inc(&lport->xmt_fcp_err);
1602 ret = -EBUSY;
1603 goto out_fail;
1604 }
1605
1606
1607 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1608 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1609 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1610 "6036 Fail IO, DID x%06x not ready for "
1611 "IO. State x%x, Type x%x Flg x%x\n",
1612 pnvme_rport->port_id,
1613 ndlp->nlp_state, ndlp->nlp_type,
1614 ndlp->fc4_xpt_flags);
1615 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1616 ret = -EBUSY;
1617 goto out_fail;
1618
1619 }
1620
1621
1622
1623
1624
1625 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1626 sqe = &((struct nvme_fc_cmd_iu *)
1627 pnvme_fcreq->cmdaddr)->sqe.common;
1628 if (sqe->opcode == nvme_admin_keep_alive)
1629 expedite = 1;
1630 }
1631
1632
1633 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1634 pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
1635 pnvme_fcreq->payload_length) {
1636 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
1637 if (ret) {
1638 ret = -EBUSY;
1639 goto out_fail;
1640 }
1641
1642 start = ktime_get_ns();
1643 }
1644
1645
1646
1647
1648 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1649 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1650 !expedite) {
1651 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1652 "6174 Fail IO, ndlp qdepth exceeded: "
1653 "idx %d DID %x pend %d qdepth %d\n",
1654 lpfc_queue_info->index, ndlp->nlp_DID,
1655 atomic_read(&ndlp->cmd_pending),
1656 ndlp->cmd_qdepth);
1657 atomic_inc(&lport->xmt_fcp_qdepth);
1658 ret = -EBUSY;
1659 goto out_fail1;
1660 }
1661 }
1662
1663
1664 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1665 idx = lpfc_queue_info->index;
1666 } else {
1667 cpu = raw_smp_processor_id();
1668 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1669 }
1670
1671 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1672 if (lpfc_ncmd == NULL) {
1673 atomic_inc(&lport->xmt_fcp_noxri);
1674 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1675 "6065 Fail IO, driver buffer pool is empty: "
1676 "idx %d DID %x\n",
1677 lpfc_queue_info->index, ndlp->nlp_DID);
1678 ret = -EBUSY;
1679 goto out_fail1;
1680 }
1681 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1682 if (start) {
1683 lpfc_ncmd->ts_cmd_start = start;
1684 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1685 } else {
1686 lpfc_ncmd->ts_cmd_start = 0;
1687 }
1688 #endif
1689 lpfc_ncmd->rx_cmd_start = start;
1690
1691
1692
1693
1694
1695
1696
1697 freqpriv->nvme_buf = lpfc_ncmd;
1698 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1699 lpfc_ncmd->ndlp = ndlp;
1700 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1701
1702 #if (IS_ENABLED(CONFIG_NVME_FC))
1703
1704 if (lpfc_is_vmid_enabled(phba) &&
1705 (ndlp->vmid_support ||
1706 phba->pport->vmid_priority_tagging ==
1707 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
1708
1709
1710 uuid = nvme_fc_io_getuuid(pnvme_fcreq);
1711
1712 if (uuid) {
1713 if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
1714 iodir = DMA_TO_DEVICE;
1715 else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
1716 iodir = DMA_FROM_DEVICE;
1717 else
1718 iodir = DMA_NONE;
1719
1720 err = lpfc_vmid_get_appid(vport, uuid, iodir,
1721 (union lpfc_vmid_io_tag *)
1722 &lpfc_ncmd->cur_iocbq.vmid_tag);
1723 if (!err)
1724 lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
1725 }
1726 }
1727 #endif
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1738 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1739
1740 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1741 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1742 if (ret) {
1743 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1744 "6175 Fail IO, Prep DMA: "
1745 "idx %d DID %x\n",
1746 lpfc_queue_info->index, ndlp->nlp_DID);
1747 atomic_inc(&lport->xmt_fcp_err);
1748 ret = -ENOMEM;
1749 goto out_free_nvme_buf;
1750 }
1751
1752 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1753 lpfc_ncmd->cur_iocbq.sli4_xritag,
1754 lpfc_queue_info->index, ndlp->nlp_DID);
1755
1756 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1757 if (ret) {
1758 atomic_inc(&lport->xmt_fcp_wqerr);
1759 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1760 "6113 Fail IO, Could not issue WQE err %x "
1761 "sid: x%x did: x%x oxid: x%x\n",
1762 ret, vport->fc_myDID, ndlp->nlp_DID,
1763 lpfc_ncmd->cur_iocbq.sli4_xritag);
1764 goto out_free_nvme_buf;
1765 }
1766
1767 if (phba->cfg_xri_rebalancing)
1768 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1769
1770 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1771 if (lpfc_ncmd->ts_cmd_start)
1772 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1773
1774 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1775 cpu = raw_smp_processor_id();
1776 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1777 lpfc_ncmd->cpu = cpu;
1778 if (idx != cpu)
1779 lpfc_printf_vlog(vport,
1780 KERN_INFO, LOG_NVME_IOERR,
1781 "6702 CPU Check cmd: "
1782 "cpu %d wq %d\n",
1783 lpfc_ncmd->cpu,
1784 lpfc_queue_info->index);
1785 }
1786 #endif
1787 return 0;
1788
1789 out_free_nvme_buf:
1790 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1791 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1792 cstat->output_requests--;
1793 else
1794 cstat->input_requests--;
1795 } else
1796 cstat->control_requests--;
1797 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1798 out_fail1:
1799 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
1800 pnvme_fcreq->payload_length, NULL);
1801 out_fail:
1802 return ret;
1803 }
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816 void
1817 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1818 struct lpfc_iocbq *rspiocb)
1819 {
1820 struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
1821
1822 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1823 "6145 ABORT_XRI_CN completing on rpi x%x "
1824 "original iotag x%x, abort cmd iotag x%x "
1825 "req_tag x%x, status x%x, hwstatus x%x\n",
1826 bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
1827 get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
1828 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1829 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1830 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1831 lpfc_sli_release_iocbq(phba, cmdiocb);
1832 }
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850 static void
1851 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1852 struct nvme_fc_remote_port *pnvme_rport,
1853 void *hw_queue_handle,
1854 struct nvmefc_fcp_req *pnvme_fcreq)
1855 {
1856 struct lpfc_nvme_lport *lport;
1857 struct lpfc_vport *vport;
1858 struct lpfc_hba *phba;
1859 struct lpfc_io_buf *lpfc_nbuf;
1860 struct lpfc_iocbq *nvmereq_wqe;
1861 struct lpfc_nvme_fcpreq_priv *freqpriv;
1862 unsigned long flags;
1863 int ret_val;
1864 struct nvme_fc_cmd_iu *cp;
1865
1866
1867
1868
1869 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1870 if (unlikely(!lport))
1871 return;
1872
1873 vport = lport->vport;
1874
1875 if (unlikely(!hw_queue_handle)) {
1876 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1877 "6129 Fail Abort, HW Queue Handle NULL.\n");
1878 return;
1879 }
1880
1881 phba = vport->phba;
1882 freqpriv = pnvme_fcreq->private;
1883
1884 if (unlikely(!freqpriv))
1885 return;
1886 if (vport->load_flag & FC_UNLOADING)
1887 return;
1888
1889
1890 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1891 "6002 Abort Request to rport DID x%06x "
1892 "for nvme_fc_req x%px\n",
1893 pnvme_rport->port_id,
1894 pnvme_fcreq);
1895
1896
1897
1898
1899 spin_lock_irqsave(&phba->hbalock, flags);
1900
1901 if (phba->hba_flag & HBA_IOQ_FLUSH) {
1902 spin_unlock_irqrestore(&phba->hbalock, flags);
1903 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1904 "6139 Driver in reset cleanup - flushing "
1905 "NVME Req now. hba_flag x%x\n",
1906 phba->hba_flag);
1907 return;
1908 }
1909
1910 lpfc_nbuf = freqpriv->nvme_buf;
1911 if (!lpfc_nbuf) {
1912 spin_unlock_irqrestore(&phba->hbalock, flags);
1913 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1914 "6140 NVME IO req has no matching lpfc nvme "
1915 "io buffer. Skipping abort req.\n");
1916 return;
1917 } else if (!lpfc_nbuf->nvmeCmd) {
1918 spin_unlock_irqrestore(&phba->hbalock, flags);
1919 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1920 "6141 lpfc NVME IO req has no nvme_fcreq "
1921 "io buffer. Skipping abort req.\n");
1922 return;
1923 }
1924 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1925
1926
1927 spin_lock(&lpfc_nbuf->buf_lock);
1928
1929
1930
1931
1932
1933
1934
1935
1936 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1937 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1938 "6143 NVME req mismatch: "
1939 "lpfc_nbuf x%px nvmeCmd x%px, "
1940 "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
1941 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1942 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1943 goto out_unlock;
1944 }
1945
1946
1947 if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
1948 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1949 "6142 NVME IO req x%px not queued - skipping "
1950 "abort req xri x%x\n",
1951 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1952 goto out_unlock;
1953 }
1954
1955 atomic_inc(&lport->xmt_fcp_abort);
1956 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1957 nvmereq_wqe->sli4_xritag,
1958 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1959
1960
1961 if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
1962 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1963 "6144 Outstanding NVME I/O Abort Request "
1964 "still pending on nvme_fcreq x%px, "
1965 "lpfc_ncmd x%px xri x%x\n",
1966 pnvme_fcreq, lpfc_nbuf,
1967 nvmereq_wqe->sli4_xritag);
1968 goto out_unlock;
1969 }
1970
1971 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1972 lpfc_nvme_abort_fcreq_cmpl);
1973
1974 spin_unlock(&lpfc_nbuf->buf_lock);
1975 spin_unlock_irqrestore(&phba->hbalock, flags);
1976
1977
1978 lpfc_issue_hb_tmo(phba);
1979
1980 if (ret_val != WQE_SUCCESS) {
1981 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1982 "6137 Failed abts issue_wqe with status x%x "
1983 "for nvme_fcreq x%px.\n",
1984 ret_val, pnvme_fcreq);
1985 return;
1986 }
1987
1988
1989
1990
1991
1992 cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
1993 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1994 "6138 Transport Abort NVME Request Issued for "
1995 "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
1996 nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
1997 cp->sqe.common.command_id);
1998 return;
1999
2000 out_unlock:
2001 spin_unlock(&lpfc_nbuf->buf_lock);
2002 spin_unlock_irqrestore(&phba->hbalock, flags);
2003 return;
2004 }
2005
2006
2007 static struct nvme_fc_port_template lpfc_nvme_template = {
2008
2009 .localport_delete = lpfc_nvme_localport_delete,
2010 .remoteport_delete = lpfc_nvme_remoteport_delete,
2011 .create_queue = lpfc_nvme_create_queue,
2012 .delete_queue = lpfc_nvme_delete_queue,
2013 .ls_req = lpfc_nvme_ls_req,
2014 .fcp_io = lpfc_nvme_fcp_io_submit,
2015 .ls_abort = lpfc_nvme_ls_abort,
2016 .fcp_abort = lpfc_nvme_fcp_abort,
2017 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
2018
2019 .max_hw_queues = 1,
2020 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
2021 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
2022 .dma_boundary = 0xFFFFFFFF,
2023
2024
2025
2026
2027 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
2028 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
2029 .lsrqst_priv_sz = 0,
2030 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
2031 };
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 static struct lpfc_io_buf *
2044 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2045 int idx, int expedite)
2046 {
2047 struct lpfc_io_buf *lpfc_ncmd;
2048 struct lpfc_sli4_hdw_queue *qp;
2049 struct sli4_sge *sgl;
2050 struct lpfc_iocbq *pwqeq;
2051 union lpfc_wqe128 *wqe;
2052
2053 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
2054
2055 if (lpfc_ncmd) {
2056 pwqeq = &(lpfc_ncmd->cur_iocbq);
2057 wqe = &pwqeq->wqe;
2058
2059
2060
2061
2062 pwqeq->cmd_flag = LPFC_IO_NVME;
2063 pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
2064 lpfc_ncmd->start_time = jiffies;
2065 lpfc_ncmd->flags = 0;
2066
2067
2068
2069
2070
2071 sgl = lpfc_ncmd->dma_sgl;
2072 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2073 bf_set(lpfc_sli4_sge_last, sgl, 0);
2074 sgl->word2 = cpu_to_le32(sgl->word2);
2075
2076
2077
2078 memset(wqe, 0, sizeof(union lpfc_wqe));
2079
2080 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2081 atomic_inc(&ndlp->cmd_pending);
2082 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2083 }
2084
2085 } else {
2086 qp = &phba->sli4_hba.hdwq[idx];
2087 qp->empty_io_bufs++;
2088 }
2089
2090 return lpfc_ncmd;
2091 }
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103 static void
2104 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2105 {
2106 struct lpfc_sli4_hdw_queue *qp;
2107 unsigned long iflag = 0;
2108
2109 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2110 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2111
2112 lpfc_ncmd->ndlp = NULL;
2113 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2114
2115 qp = lpfc_ncmd->hdwq;
2116 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
2117 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2118 "6310 XB release deferred for "
2119 "ox_id x%x on reqtag x%x\n",
2120 lpfc_ncmd->cur_iocbq.sli4_xritag,
2121 lpfc_ncmd->cur_iocbq.iotag);
2122
2123 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2124 list_add_tail(&lpfc_ncmd->list,
2125 &qp->lpfc_abts_io_buf_list);
2126 qp->abts_nvme_io_bufs++;
2127 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2128 } else
2129 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2130 }
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148 int
2149 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2150 {
2151 int ret = 0;
2152 struct lpfc_hba *phba = vport->phba;
2153 struct nvme_fc_port_info nfcp_info;
2154 struct nvme_fc_local_port *localport;
2155 struct lpfc_nvme_lport *lport;
2156
2157
2158
2159
2160 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2161 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2162 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2163 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2164
2165
2166
2167
2168
2169 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2170
2171
2172
2173
2174 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2175
2176 if (!IS_ENABLED(CONFIG_NVME_FC))
2177 return ret;
2178
2179
2180
2181
2182
2183 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2184 &vport->phba->pcidev->dev, &localport);
2185 if (!ret) {
2186 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2187 "6005 Successfully registered local "
2188 "NVME port num %d, localP x%px, private "
2189 "x%px, sg_seg %d\n",
2190 localport->port_num, localport,
2191 localport->private,
2192 lpfc_nvme_template.max_sgl_segments);
2193
2194
2195 lport = (struct lpfc_nvme_lport *)localport->private;
2196 vport->localport = localport;
2197 lport->vport = vport;
2198 vport->nvmei_support = 1;
2199
2200 atomic_set(&lport->xmt_fcp_noxri, 0);
2201 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2202 atomic_set(&lport->xmt_fcp_qdepth, 0);
2203 atomic_set(&lport->xmt_fcp_err, 0);
2204 atomic_set(&lport->xmt_fcp_wqerr, 0);
2205 atomic_set(&lport->xmt_fcp_abort, 0);
2206 atomic_set(&lport->xmt_ls_abort, 0);
2207 atomic_set(&lport->xmt_ls_err, 0);
2208 atomic_set(&lport->cmpl_fcp_xb, 0);
2209 atomic_set(&lport->cmpl_fcp_err, 0);
2210 atomic_set(&lport->cmpl_ls_xb, 0);
2211 atomic_set(&lport->cmpl_ls_err, 0);
2212
2213 atomic_set(&lport->fc4NvmeLsRequests, 0);
2214 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2215 }
2216
2217 return ret;
2218 }
2219
2220 #if (IS_ENABLED(CONFIG_NVME_FC))
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231 static void
2232 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2233 struct lpfc_nvme_lport *lport,
2234 struct completion *lport_unreg_cmp)
2235 {
2236 u32 wait_tmo;
2237 int ret, i, pending = 0;
2238 struct lpfc_sli_ring *pring;
2239 struct lpfc_hba *phba = vport->phba;
2240 struct lpfc_sli4_hdw_queue *qp;
2241 int abts_scsi, abts_nvme;
2242
2243
2244
2245
2246
2247 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2248 while (true) {
2249 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2250 if (unlikely(!ret)) {
2251 pending = 0;
2252 abts_scsi = 0;
2253 abts_nvme = 0;
2254 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2255 qp = &phba->sli4_hba.hdwq[i];
2256 if (!vport->localport || !qp || !qp->io_wq)
2257 return;
2258
2259 pring = qp->io_wq->pring;
2260 if (!pring)
2261 continue;
2262 pending += pring->txcmplq_cnt;
2263 abts_scsi += qp->abts_scsi_io_bufs;
2264 abts_nvme += qp->abts_nvme_io_bufs;
2265 }
2266 if (!vport->localport ||
2267 test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
2268 vport->load_flag & FC_UNLOADING)
2269 return;
2270
2271 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2272 "6176 Lport x%px Localport x%px wait "
2273 "timed out. Pending %d [%d:%d]. "
2274 "Renewing.\n",
2275 lport, vport->localport, pending,
2276 abts_scsi, abts_nvme);
2277 continue;
2278 }
2279 break;
2280 }
2281 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2282 "6177 Lport x%px Localport x%px Complete Success\n",
2283 lport, vport->localport);
2284 }
2285 #endif
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297 void
2298 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2299 {
2300 #if (IS_ENABLED(CONFIG_NVME_FC))
2301 struct nvme_fc_local_port *localport;
2302 struct lpfc_nvme_lport *lport;
2303 int ret;
2304 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2305
2306 if (vport->nvmei_support == 0)
2307 return;
2308
2309 localport = vport->localport;
2310 if (!localport)
2311 return;
2312 lport = (struct lpfc_nvme_lport *)localport->private;
2313
2314 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2315 "6011 Destroying NVME localport x%px\n",
2316 localport);
2317
2318
2319
2320
2321 lport->lport_unreg_cmp = &lport_unreg_cmp;
2322 ret = nvme_fc_unregister_localport(localport);
2323
2324
2325
2326
2327 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2328 vport->localport = NULL;
2329
2330
2331
2332
2333
2334 vport->nvmei_support = 0;
2335 if (ret == 0) {
2336 lpfc_printf_vlog(vport,
2337 KERN_INFO, LOG_NVME_DISC,
2338 "6009 Unregistered lport Success\n");
2339 } else {
2340 lpfc_printf_vlog(vport,
2341 KERN_INFO, LOG_NVME_DISC,
2342 "6010 Unregistered lport "
2343 "Failed, status x%x\n",
2344 ret);
2345 }
2346 #endif
2347 }
2348
2349 void
2350 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2351 {
2352 #if (IS_ENABLED(CONFIG_NVME_FC))
2353 struct nvme_fc_local_port *localport;
2354 struct lpfc_nvme_lport *lport;
2355
2356 localport = vport->localport;
2357 if (!localport) {
2358 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2359 "6710 Update NVME fail. No localport\n");
2360 return;
2361 }
2362 lport = (struct lpfc_nvme_lport *)localport->private;
2363 if (!lport) {
2364 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2365 "6171 Update NVME fail. localP x%px, No lport\n",
2366 localport);
2367 return;
2368 }
2369 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2370 "6012 Update NVME lport x%px did x%x\n",
2371 localport, vport->fc_myDID);
2372
2373 localport->port_id = vport->fc_myDID;
2374 if (localport->port_id == 0)
2375 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2376 else
2377 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2378
2379 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2380 "6030 bound lport x%px to DID x%06x\n",
2381 lport, localport->port_id);
2382 #endif
2383 }
2384
2385 int
2386 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2387 {
2388 #if (IS_ENABLED(CONFIG_NVME_FC))
2389 int ret = 0;
2390 struct nvme_fc_local_port *localport;
2391 struct lpfc_nvme_lport *lport;
2392 struct lpfc_nvme_rport *rport;
2393 struct lpfc_nvme_rport *oldrport;
2394 struct nvme_fc_remote_port *remote_port;
2395 struct nvme_fc_port_info rpinfo;
2396 struct lpfc_nodelist *prev_ndlp = NULL;
2397 struct fc_rport *srport = ndlp->rport;
2398
2399 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2400 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2401 ndlp->nlp_DID, ndlp->nlp_type);
2402
2403 localport = vport->localport;
2404 if (!localport)
2405 return 0;
2406
2407 lport = (struct lpfc_nvme_lport *)localport->private;
2408
2409
2410
2411
2412
2413
2414
2415 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2416 rpinfo.port_id = ndlp->nlp_DID;
2417 if (ndlp->nlp_type & NLP_NVME_TARGET)
2418 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2419 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2420 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2421
2422 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2423 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2424
2425 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2426 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2427 if (srport)
2428 rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2429 else
2430 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2431
2432 spin_lock_irq(&ndlp->lock);
2433
2434
2435
2436
2437
2438 oldrport = lpfc_ndlp_get_nrport(ndlp);
2439 if (oldrport) {
2440 prev_ndlp = oldrport->ndlp;
2441 spin_unlock_irq(&ndlp->lock);
2442 } else {
2443 spin_unlock_irq(&ndlp->lock);
2444 if (!lpfc_nlp_get(ndlp)) {
2445 dev_warn(&vport->phba->pcidev->dev,
2446 "Warning - No node ref - exit register\n");
2447 return 0;
2448 }
2449 }
2450
2451 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2452 if (!ret) {
2453
2454
2455
2456
2457
2458
2459
2460 spin_lock_irq(&ndlp->lock);
2461 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2462 ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2463 spin_unlock_irq(&ndlp->lock);
2464 rport = remote_port->private;
2465 if (oldrport) {
2466
2467
2468
2469
2470
2471 spin_lock_irq(&ndlp->lock);
2472 ndlp->nrport = NULL;
2473 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2474 spin_unlock_irq(&ndlp->lock);
2475 rport->ndlp = NULL;
2476 rport->remoteport = NULL;
2477
2478
2479
2480
2481
2482 if (prev_ndlp && prev_ndlp != ndlp) {
2483 if (!prev_ndlp->nrport)
2484 lpfc_nlp_put(prev_ndlp);
2485 }
2486 }
2487
2488
2489 rport->remoteport = remote_port;
2490 rport->lport = lport;
2491 rport->ndlp = ndlp;
2492 spin_lock_irq(&ndlp->lock);
2493 ndlp->nrport = rport;
2494 spin_unlock_irq(&ndlp->lock);
2495 lpfc_printf_vlog(vport, KERN_INFO,
2496 LOG_NVME_DISC | LOG_NODE,
2497 "6022 Bind lport x%px to remoteport x%px "
2498 "rport x%px WWNN 0x%llx, "
2499 "Rport WWPN 0x%llx DID "
2500 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2501 lport, remote_port, rport,
2502 rpinfo.node_name, rpinfo.port_name,
2503 rpinfo.port_id, rpinfo.port_role,
2504 ndlp, prev_ndlp);
2505 } else {
2506 lpfc_printf_vlog(vport, KERN_ERR,
2507 LOG_TRACE_EVENT,
2508 "6031 RemotePort Registration failed "
2509 "err: %d, DID x%06x\n",
2510 ret, ndlp->nlp_DID);
2511 }
2512
2513 return ret;
2514 #else
2515 return 0;
2516 #endif
2517 }
2518
2519
2520
2521
2522
2523
2524
2525
2526 void
2527 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2528 {
2529 #if (IS_ENABLED(CONFIG_NVME_FC))
2530 struct lpfc_nvme_rport *nrport;
2531 struct nvme_fc_remote_port *remoteport = NULL;
2532
2533 spin_lock_irq(&ndlp->lock);
2534 nrport = lpfc_ndlp_get_nrport(ndlp);
2535 if (nrport)
2536 remoteport = nrport->remoteport;
2537 spin_unlock_irq(&ndlp->lock);
2538
2539 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2540 "6170 Rescan NPort DID x%06x type x%x "
2541 "state x%x nrport x%px remoteport x%px\n",
2542 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2543 nrport, remoteport);
2544
2545 if (!nrport || !remoteport)
2546 goto rescan_exit;
2547
2548
2549 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2550 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2551 nvme_fc_rescan_remoteport(remoteport);
2552
2553 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2554 "6172 NVME rescanned DID x%06x "
2555 "port_state x%x\n",
2556 ndlp->nlp_DID, remoteport->port_state);
2557 }
2558 return;
2559 rescan_exit:
2560 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2561 "6169 Skip NVME Rport Rescan, NVME remoteport "
2562 "unregistered\n");
2563 #endif
2564 }
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578 void
2579 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2580 {
2581 #if (IS_ENABLED(CONFIG_NVME_FC))
2582 int ret;
2583 struct nvme_fc_local_port *localport;
2584 struct lpfc_nvme_lport *lport;
2585 struct lpfc_nvme_rport *rport;
2586 struct nvme_fc_remote_port *remoteport = NULL;
2587
2588 localport = vport->localport;
2589
2590
2591
2592
2593 if (!localport)
2594 return;
2595
2596 lport = (struct lpfc_nvme_lport *)localport->private;
2597 if (!lport)
2598 goto input_err;
2599
2600 spin_lock_irq(&ndlp->lock);
2601 rport = lpfc_ndlp_get_nrport(ndlp);
2602 if (rport)
2603 remoteport = rport->remoteport;
2604 spin_unlock_irq(&ndlp->lock);
2605 if (!remoteport)
2606 goto input_err;
2607
2608 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2609 "6033 Unreg nvme remoteport x%px, portname x%llx, "
2610 "port_id x%06x, portstate x%x port type x%x "
2611 "refcnt %d\n",
2612 remoteport, remoteport->port_name,
2613 remoteport->port_id, remoteport->port_state,
2614 ndlp->nlp_type, kref_read(&ndlp->kref));
2615
2616
2617
2618
2619
2620 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2621
2622
2623
2624 spin_lock_irq(&vport->phba->hbalock);
2625 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
2626 spin_unlock_irq(&vport->phba->hbalock);
2627
2628
2629
2630
2631
2632
2633 if (vport->load_flag & FC_UNLOADING)
2634 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2635
2636 ret = nvme_fc_unregister_remoteport(remoteport);
2637
2638
2639
2640
2641
2642
2643 ndlp->nrport = NULL;
2644 lpfc_nlp_put(ndlp);
2645 if (ret != 0) {
2646 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2647 "6167 NVME unregister failed %d "
2648 "port_state x%x\n",
2649 ret, remoteport->port_state);
2650 }
2651 }
2652 return;
2653
2654 input_err:
2655 #endif
2656 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2657 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2658 vport->localport, ndlp->rport, ndlp->nlp_DID);
2659 }
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670 void
2671 lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
2672 struct lpfc_io_buf *lpfc_ncmd)
2673 {
2674 struct nvmefc_fcp_req *nvme_cmd = NULL;
2675
2676 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2677 "6533 %s nvme_cmd %p tag x%x abort complete and "
2678 "xri released\n", __func__,
2679 lpfc_ncmd->nvmeCmd,
2680 lpfc_ncmd->cur_iocbq.iotag);
2681
2682
2683
2684
2685
2686 if (lpfc_ncmd->nvmeCmd) {
2687 nvme_cmd = lpfc_ncmd->nvmeCmd;
2688 nvme_cmd->transferred_length = 0;
2689 nvme_cmd->rcv_rsplen = 0;
2690 nvme_cmd->status = NVME_SC_INTERNAL;
2691 nvme_cmd->done(nvme_cmd);
2692 lpfc_ncmd->nvmeCmd = NULL;
2693 }
2694 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2695 }
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707 void
2708 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2709 struct sli4_wcqe_xri_aborted *axri,
2710 struct lpfc_io_buf *lpfc_ncmd)
2711 {
2712 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2713 struct nvmefc_fcp_req *nvme_cmd = NULL;
2714 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2715
2716
2717 if (ndlp)
2718 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2719
2720 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2721 "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2722 "xri released\n",
2723 lpfc_ncmd->nvmeCmd, xri,
2724 lpfc_ncmd->cur_iocbq.iotag);
2725
2726
2727
2728
2729
2730 if (lpfc_ncmd->nvmeCmd) {
2731 nvme_cmd = lpfc_ncmd->nvmeCmd;
2732 nvme_cmd->done(nvme_cmd);
2733 lpfc_ncmd->nvmeCmd = NULL;
2734 }
2735 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2736 }
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748 void
2749 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2750 {
2751 struct lpfc_sli_ring *pring;
2752 u32 i, wait_cnt = 0;
2753
2754 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2755 return;
2756
2757
2758
2759
2760 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2761 if (!phba->sli4_hba.hdwq[i].io_wq)
2762 continue;
2763 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2764
2765 if (!pring)
2766 continue;
2767
2768
2769 while (!list_empty(&pring->txcmplq)) {
2770 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2771 wait_cnt++;
2772
2773
2774
2775
2776 if ((wait_cnt % 1000) == 0) {
2777 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2778 "6178 NVME IO not empty, "
2779 "cnt %d\n", wait_cnt);
2780 }
2781 }
2782 }
2783
2784
2785 lpfc_issue_hb_tmo(phba);
2786
2787 }
2788
2789 void
2790 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2791 uint32_t stat, uint32_t param)
2792 {
2793 #if (IS_ENABLED(CONFIG_NVME_FC))
2794 struct lpfc_io_buf *lpfc_ncmd;
2795 struct nvmefc_fcp_req *nCmd;
2796 struct lpfc_wcqe_complete wcqe;
2797 struct lpfc_wcqe_complete *wcqep = &wcqe;
2798
2799 lpfc_ncmd = pwqeIn->io_buf;
2800 if (!lpfc_ncmd) {
2801 lpfc_sli_release_iocbq(phba, pwqeIn);
2802 return;
2803 }
2804
2805 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2806 CMD_ABORT_XRI_CX) {
2807 lpfc_sli_release_iocbq(phba, pwqeIn);
2808 return;
2809 }
2810
2811 spin_lock(&lpfc_ncmd->buf_lock);
2812 nCmd = lpfc_ncmd->nvmeCmd;
2813 if (!nCmd) {
2814 spin_unlock(&lpfc_ncmd->buf_lock);
2815 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2816 return;
2817 }
2818 spin_unlock(&lpfc_ncmd->buf_lock);
2819
2820 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2821 "6194 NVME Cancel xri %x\n",
2822 lpfc_ncmd->cur_iocbq.sli4_xritag);
2823
2824 wcqep->word0 = 0;
2825 bf_set(lpfc_wcqe_c_status, wcqep, stat);
2826 wcqep->parameter = param;
2827 wcqep->total_data_placed = 0;
2828 wcqep->word3 = 0;
2829
2830
2831 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2832 bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2833
2834 memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
2835 (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
2836 #endif
2837 }