0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/pci.h>
0024 #include <linux/slab.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/delay.h>
0027 #include <asm/unaligned.h>
0028 #include <linux/crc-t10dif.h>
0029 #include <net/checksum.h>
0030
0031 #include <scsi/scsi.h>
0032 #include <scsi/scsi_device.h>
0033 #include <scsi/scsi_eh.h>
0034 #include <scsi/scsi_host.h>
0035 #include <scsi/scsi_tcq.h>
0036 #include <scsi/scsi_transport_fc.h>
0037 #include <scsi/fc/fc_fs.h>
0038
0039 #include "lpfc_version.h"
0040 #include "lpfc_hw4.h"
0041 #include "lpfc_hw.h"
0042 #include "lpfc_sli.h"
0043 #include "lpfc_sli4.h"
0044 #include "lpfc_nl.h"
0045 #include "lpfc_disc.h"
0046 #include "lpfc.h"
0047 #include "lpfc_scsi.h"
0048 #include "lpfc_nvme.h"
0049 #include "lpfc_logmsg.h"
0050 #include "lpfc_crtn.h"
0051 #include "lpfc_vport.h"
0052 #include "lpfc_debugfs.h"
0053
0054 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
0055 struct lpfc_async_xchg_ctx *,
0056 dma_addr_t rspbuf,
0057 uint16_t rspsize);
0058 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
0059 struct lpfc_async_xchg_ctx *);
0060 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
0061 struct lpfc_async_xchg_ctx *,
0062 uint32_t, uint16_t);
0063 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
0064 struct lpfc_async_xchg_ctx *,
0065 uint32_t, uint16_t);
0066 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
0067 struct lpfc_async_xchg_ctx *);
0068 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
0069
0070 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
0071
0072 static union lpfc_wqe128 lpfc_tsend_cmd_template;
0073 static union lpfc_wqe128 lpfc_treceive_cmd_template;
0074 static union lpfc_wqe128 lpfc_trsp_cmd_template;
0075
0076
0077 void
0078 lpfc_nvmet_cmd_template(void)
0079 {
0080 union lpfc_wqe128 *wqe;
0081
0082
0083 wqe = &lpfc_tsend_cmd_template;
0084 memset(wqe, 0, sizeof(union lpfc_wqe128));
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
0098 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
0099 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
0100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
0101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
0102
0103
0104
0105
0106
0107
0108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
0109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
0110 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
0111 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
0112 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
0113 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
0114
0115
0116 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
0117 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
0118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
0119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
0120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
0121 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
0122
0123
0124
0125
0126
0127
0128 wqe = &lpfc_treceive_cmd_template;
0129 memset(wqe, 0, sizeof(union lpfc_wqe128));
0130
0131
0132
0133
0134 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
0135
0136
0137
0138
0139
0140
0141
0142
0143 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
0144 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
0145 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
0146 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
0147 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
0148
0149
0150
0151
0152
0153
0154 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
0155 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
0156 bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
0157 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
0158 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
0159 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
0160
0161
0162 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
0163 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
0164 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
0165 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
0166 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
0167 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
0168
0169
0170
0171
0172
0173
0174 wqe = &lpfc_trsp_cmd_template;
0175 memset(wqe, 0, sizeof(union lpfc_wqe128));
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
0187 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
0188 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
0189 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
0190 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
0191
0192
0193
0194
0195
0196
0197 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
0198 bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
0199 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
0200 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
0201 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
0202 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
0203
0204
0205 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
0206 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
0207 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
0208 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
0209 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
0210 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
0211
0212
0213 }
0214
0215 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
0216 static struct lpfc_async_xchg_ctx *
0217 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
0218 {
0219 struct lpfc_async_xchg_ctx *ctxp;
0220 unsigned long iflag;
0221 bool found = false;
0222
0223 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
0224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
0225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
0226 continue;
0227
0228 found = true;
0229 break;
0230 }
0231 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
0232 if (found)
0233 return ctxp;
0234
0235 return NULL;
0236 }
0237
0238 static struct lpfc_async_xchg_ctx *
0239 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
0240 {
0241 struct lpfc_async_xchg_ctx *ctxp;
0242 unsigned long iflag;
0243 bool found = false;
0244
0245 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
0246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
0247 if (ctxp->oxid != oxid || ctxp->sid != sid)
0248 continue;
0249
0250 found = true;
0251 break;
0252 }
0253 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
0254 if (found)
0255 return ctxp;
0256
0257 return NULL;
0258 }
0259 #endif
0260
0261 static void
0262 lpfc_nvmet_defer_release(struct lpfc_hba *phba,
0263 struct lpfc_async_xchg_ctx *ctxp)
0264 {
0265 lockdep_assert_held(&ctxp->ctxlock);
0266
0267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
0268 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
0269 ctxp->oxid, ctxp->flag);
0270
0271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
0272 return;
0273
0274 ctxp->flag |= LPFC_NVME_CTX_RLS;
0275 spin_lock(&phba->sli4_hba.t_active_list_lock);
0276 list_del(&ctxp->list);
0277 spin_unlock(&phba->sli4_hba.t_active_list_lock);
0278 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
0279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
0280 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
0281 }
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294 void
0295 __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
0296 struct lpfc_iocbq *rspwqe)
0297 {
0298 struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
0299 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
0300 struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
0301 uint32_t status, result;
0302
0303 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
0304 result = wcqe->parameter;
0305
0306 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
0307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0308 "6410 NVMEx LS cmpl state mismatch IO x%x: "
0309 "%d %d\n",
0310 axchg->oxid, axchg->state, axchg->entry_cnt);
0311 }
0312
0313 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
0314 axchg->oxid, status, result);
0315
0316 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
0317 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
0318 status, result, axchg->oxid);
0319
0320 lpfc_nlp_put(cmdwqe->ndlp);
0321 cmdwqe->context_un.axchg = NULL;
0322 cmdwqe->bpl_dmabuf = NULL;
0323 lpfc_sli_release_iocbq(phba, cmdwqe);
0324 ls_rsp->done(ls_rsp);
0325 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
0326 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
0327 status, axchg->oxid);
0328 kfree(axchg);
0329 }
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342 static void
0343 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
0344 struct lpfc_iocbq *rspwqe)
0345 {
0346 struct lpfc_nvmet_tgtport *tgtp;
0347 uint32_t status, result;
0348 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
0349
0350 if (!phba->targetport)
0351 goto finish;
0352
0353 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
0354 result = wcqe->parameter;
0355
0356 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
0357 if (tgtp) {
0358 if (status) {
0359 atomic_inc(&tgtp->xmt_ls_rsp_error);
0360 if (result == IOERR_ABORT_REQUESTED)
0361 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
0362 if (bf_get(lpfc_wcqe_c_xb, wcqe))
0363 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
0364 } else {
0365 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
0366 }
0367 }
0368
0369 finish:
0370 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe);
0371 }
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 void
0386 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
0387 {
0388 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
0389 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
0390 struct lpfc_nvmet_tgtport *tgtp;
0391 struct fc_frame_header *fc_hdr;
0392 struct rqb_dmabuf *nvmebuf;
0393 struct lpfc_nvmet_ctx_info *infop;
0394 uint32_t size, oxid, sid;
0395 int cpu;
0396 unsigned long iflag;
0397
0398 if (ctxp->state == LPFC_NVME_STE_FREE) {
0399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0400 "6411 NVMET free, already free IO x%x: %d %d\n",
0401 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
0402 }
0403
0404 if (ctxp->rqb_buffer) {
0405 spin_lock_irqsave(&ctxp->ctxlock, iflag);
0406 nvmebuf = ctxp->rqb_buffer;
0407
0408 if (nvmebuf) {
0409 ctxp->rqb_buffer = NULL;
0410 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
0411 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
0412 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
0413 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
0414 nvmebuf);
0415 } else {
0416 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
0417
0418 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
0419 }
0420 } else {
0421 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
0422 }
0423 }
0424 ctxp->state = LPFC_NVME_STE_FREE;
0425
0426 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
0427 if (phba->sli4_hba.nvmet_io_wait_cnt) {
0428 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
0429 nvmebuf, struct rqb_dmabuf,
0430 hbuf.list);
0431 phba->sli4_hba.nvmet_io_wait_cnt--;
0432 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
0433 iflag);
0434
0435 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
0436 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
0437 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
0438 size = nvmebuf->bytes_recv;
0439 sid = sli4_sid_from_fc_hdr(fc_hdr);
0440
0441 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
0442 ctxp->wqeq = NULL;
0443 ctxp->offset = 0;
0444 ctxp->phba = phba;
0445 ctxp->size = size;
0446 ctxp->oxid = oxid;
0447 ctxp->sid = sid;
0448 ctxp->state = LPFC_NVME_STE_RCV;
0449 ctxp->entry_cnt = 1;
0450 ctxp->flag = 0;
0451 ctxp->ctxbuf = ctx_buf;
0452 ctxp->rqb_buffer = (void *)nvmebuf;
0453 spin_lock_init(&ctxp->ctxlock);
0454
0455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0456
0457 if (ctxp->ts_isr_cmd) {
0458 ctxp->ts_cmd_nvme = 0;
0459 ctxp->ts_nvme_data = 0;
0460 ctxp->ts_data_wqput = 0;
0461 ctxp->ts_isr_data = 0;
0462 ctxp->ts_data_nvme = 0;
0463 ctxp->ts_nvme_status = 0;
0464 ctxp->ts_status_wqput = 0;
0465 ctxp->ts_isr_status = 0;
0466 ctxp->ts_status_nvme = 0;
0467 }
0468 #endif
0469 atomic_inc(&tgtp->rcv_fcp_cmd_in);
0470
0471
0472 spin_lock_irqsave(&ctxp->ctxlock, iflag);
0473 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
0474 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
0475
0476 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
0477 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
0478 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0479 "6181 Unable to queue deferred work "
0480 "for oxid x%x. "
0481 "FCP Drop IO [x%x x%x x%x]\n",
0482 ctxp->oxid,
0483 atomic_read(&tgtp->rcv_fcp_cmd_in),
0484 atomic_read(&tgtp->rcv_fcp_cmd_out),
0485 atomic_read(&tgtp->xmt_fcp_release));
0486
0487 spin_lock_irqsave(&ctxp->ctxlock, iflag);
0488 lpfc_nvmet_defer_release(phba, ctxp);
0489 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
0490 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
0491 }
0492 return;
0493 }
0494 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
0495
0496
0497
0498
0499
0500 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
0501 list_del_init(&ctxp->list);
0502 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
0503 cpu = raw_smp_processor_id();
0504 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
0505 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
0506 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
0507 infop->nvmet_ctx_list_cnt++;
0508 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
0509 #endif
0510 }
0511
0512 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0513 static void
0514 lpfc_nvmet_ktime(struct lpfc_hba *phba,
0515 struct lpfc_async_xchg_ctx *ctxp)
0516 {
0517 uint64_t seg1, seg2, seg3, seg4, seg5;
0518 uint64_t seg6, seg7, seg8, seg9, seg10;
0519 uint64_t segsum;
0520
0521 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
0522 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
0523 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
0524 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
0525 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
0526 return;
0527
0528 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
0529 return;
0530 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
0531 return;
0532 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
0533 return;
0534 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
0535 return;
0536 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
0537 return;
0538 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
0539 return;
0540 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
0541 return;
0542 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
0543 return;
0544 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
0545 return;
0546 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
0547 return;
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
0574 segsum = seg1;
0575
0576 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
0577 if (segsum > seg2)
0578 return;
0579 seg2 -= segsum;
0580 segsum += seg2;
0581
0582 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
0583 if (segsum > seg3)
0584 return;
0585 seg3 -= segsum;
0586 segsum += seg3;
0587
0588 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
0589 if (segsum > seg4)
0590 return;
0591 seg4 -= segsum;
0592 segsum += seg4;
0593
0594 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
0595 if (segsum > seg5)
0596 return;
0597 seg5 -= segsum;
0598 segsum += seg5;
0599
0600
0601
0602 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
0603 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
0604 if (segsum > seg6)
0605 return;
0606 seg6 -= segsum;
0607 segsum += seg6;
0608
0609 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
0610 if (segsum > seg7)
0611 return;
0612 seg7 -= segsum;
0613 segsum += seg7;
0614
0615 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
0616 if (segsum > seg8)
0617 return;
0618 seg8 -= segsum;
0619 segsum += seg8;
0620
0621 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
0622 if (segsum > seg9)
0623 return;
0624 seg9 -= segsum;
0625 segsum += seg9;
0626
0627 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
0628 return;
0629 seg10 = (ctxp->ts_isr_status -
0630 ctxp->ts_isr_cmd);
0631 } else {
0632 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
0633 return;
0634 seg6 = 0;
0635 seg7 = 0;
0636 seg8 = 0;
0637 seg9 = 0;
0638 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
0639 }
0640
0641 phba->ktime_seg1_total += seg1;
0642 if (seg1 < phba->ktime_seg1_min)
0643 phba->ktime_seg1_min = seg1;
0644 else if (seg1 > phba->ktime_seg1_max)
0645 phba->ktime_seg1_max = seg1;
0646
0647 phba->ktime_seg2_total += seg2;
0648 if (seg2 < phba->ktime_seg2_min)
0649 phba->ktime_seg2_min = seg2;
0650 else if (seg2 > phba->ktime_seg2_max)
0651 phba->ktime_seg2_max = seg2;
0652
0653 phba->ktime_seg3_total += seg3;
0654 if (seg3 < phba->ktime_seg3_min)
0655 phba->ktime_seg3_min = seg3;
0656 else if (seg3 > phba->ktime_seg3_max)
0657 phba->ktime_seg3_max = seg3;
0658
0659 phba->ktime_seg4_total += seg4;
0660 if (seg4 < phba->ktime_seg4_min)
0661 phba->ktime_seg4_min = seg4;
0662 else if (seg4 > phba->ktime_seg4_max)
0663 phba->ktime_seg4_max = seg4;
0664
0665 phba->ktime_seg5_total += seg5;
0666 if (seg5 < phba->ktime_seg5_min)
0667 phba->ktime_seg5_min = seg5;
0668 else if (seg5 > phba->ktime_seg5_max)
0669 phba->ktime_seg5_max = seg5;
0670
0671 phba->ktime_data_samples++;
0672 if (!seg6)
0673 goto out;
0674
0675 phba->ktime_seg6_total += seg6;
0676 if (seg6 < phba->ktime_seg6_min)
0677 phba->ktime_seg6_min = seg6;
0678 else if (seg6 > phba->ktime_seg6_max)
0679 phba->ktime_seg6_max = seg6;
0680
0681 phba->ktime_seg7_total += seg7;
0682 if (seg7 < phba->ktime_seg7_min)
0683 phba->ktime_seg7_min = seg7;
0684 else if (seg7 > phba->ktime_seg7_max)
0685 phba->ktime_seg7_max = seg7;
0686
0687 phba->ktime_seg8_total += seg8;
0688 if (seg8 < phba->ktime_seg8_min)
0689 phba->ktime_seg8_min = seg8;
0690 else if (seg8 > phba->ktime_seg8_max)
0691 phba->ktime_seg8_max = seg8;
0692
0693 phba->ktime_seg9_total += seg9;
0694 if (seg9 < phba->ktime_seg9_min)
0695 phba->ktime_seg9_min = seg9;
0696 else if (seg9 > phba->ktime_seg9_max)
0697 phba->ktime_seg9_max = seg9;
0698 out:
0699 phba->ktime_seg10_total += seg10;
0700 if (seg10 < phba->ktime_seg10_min)
0701 phba->ktime_seg10_min = seg10;
0702 else if (seg10 > phba->ktime_seg10_max)
0703 phba->ktime_seg10_max = seg10;
0704 phba->ktime_status_samples++;
0705 }
0706 #endif
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718 static void
0719 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
0720 struct lpfc_iocbq *rspwqe)
0721 {
0722 struct lpfc_nvmet_tgtport *tgtp;
0723 struct nvmefc_tgt_fcp_req *rsp;
0724 struct lpfc_async_xchg_ctx *ctxp;
0725 uint32_t status, result, op, logerr;
0726 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
0727 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0728 int id;
0729 #endif
0730
0731 ctxp = cmdwqe->context_un.axchg;
0732 ctxp->flag &= ~LPFC_NVME_IO_INP;
0733
0734 rsp = &ctxp->hdlrctx.fcp_req;
0735 op = rsp->op;
0736
0737 status = bf_get(lpfc_wcqe_c_status, wcqe);
0738 result = wcqe->parameter;
0739
0740 if (phba->targetport)
0741 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
0742 else
0743 tgtp = NULL;
0744
0745 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
0746 ctxp->oxid, op, status);
0747
0748 if (status) {
0749 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
0750 rsp->transferred_length = 0;
0751 if (tgtp) {
0752 atomic_inc(&tgtp->xmt_fcp_rsp_error);
0753 if (result == IOERR_ABORT_REQUESTED)
0754 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
0755 }
0756
0757 logerr = LOG_NVME_IOERR;
0758
0759
0760 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
0761 ctxp->flag |= LPFC_NVME_XBUSY;
0762 logerr |= LOG_NVME_ABTS;
0763 if (tgtp)
0764 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
0765
0766 } else {
0767 ctxp->flag &= ~LPFC_NVME_XBUSY;
0768 }
0769
0770 lpfc_printf_log(phba, KERN_INFO, logerr,
0771 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
0772 "XBUSY:x%x\n",
0773 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
0774 status, result, ctxp->flag);
0775
0776 } else {
0777 rsp->fcp_error = NVME_SC_SUCCESS;
0778 if (op == NVMET_FCOP_RSP)
0779 rsp->transferred_length = rsp->rsplen;
0780 else
0781 rsp->transferred_length = rsp->transfer_length;
0782 if (tgtp)
0783 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
0784 }
0785
0786 if ((op == NVMET_FCOP_READDATA_RSP) ||
0787 (op == NVMET_FCOP_RSP)) {
0788
0789 ctxp->state = LPFC_NVME_STE_DONE;
0790 ctxp->entry_cnt++;
0791
0792 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0793 if (ctxp->ts_cmd_nvme) {
0794 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
0795 ctxp->ts_isr_data =
0796 cmdwqe->isr_timestamp;
0797 ctxp->ts_data_nvme =
0798 ktime_get_ns();
0799 ctxp->ts_nvme_status =
0800 ctxp->ts_data_nvme;
0801 ctxp->ts_status_wqput =
0802 ctxp->ts_data_nvme;
0803 ctxp->ts_isr_status =
0804 ctxp->ts_data_nvme;
0805 ctxp->ts_status_nvme =
0806 ctxp->ts_data_nvme;
0807 } else {
0808 ctxp->ts_isr_status =
0809 cmdwqe->isr_timestamp;
0810 ctxp->ts_status_nvme =
0811 ktime_get_ns();
0812 }
0813 }
0814 #endif
0815 rsp->done(rsp);
0816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0817 if (ctxp->ts_cmd_nvme)
0818 lpfc_nvmet_ktime(phba, ctxp);
0819 #endif
0820
0821 } else {
0822 ctxp->entry_cnt++;
0823 memset_startat(cmdwqe, 0, cmd_flag);
0824 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0825 if (ctxp->ts_cmd_nvme) {
0826 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
0827 ctxp->ts_data_nvme = ktime_get_ns();
0828 }
0829 #endif
0830 rsp->done(rsp);
0831 }
0832 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0833 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
0834 id = raw_smp_processor_id();
0835 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
0836 if (ctxp->cpu != id)
0837 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
0838 "6704 CPU Check cmdcmpl: "
0839 "cpu %d expect %d\n",
0840 id, ctxp->cpu);
0841 }
0842 #endif
0843 }
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861 int
0862 __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
0863 struct nvmefc_ls_rsp *ls_rsp,
0864 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
0865 struct lpfc_iocbq *cmdwqe,
0866 struct lpfc_iocbq *rspwqe))
0867 {
0868 struct lpfc_hba *phba = axchg->phba;
0869 struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
0870 struct lpfc_iocbq *nvmewqeq;
0871 struct lpfc_dmabuf dmabuf;
0872 struct ulp_bde64 bpl;
0873 int rc;
0874
0875 if (phba->pport->load_flag & FC_UNLOADING)
0876 return -ENODEV;
0877
0878 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
0879 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
0880
0881 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
0882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0883 "6412 NVMEx LS rsp state mismatch "
0884 "oxid x%x: %d %d\n",
0885 axchg->oxid, axchg->state, axchg->entry_cnt);
0886 return -EALREADY;
0887 }
0888 axchg->state = LPFC_NVME_STE_LS_RSP;
0889 axchg->entry_cnt++;
0890
0891 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
0892 ls_rsp->rsplen);
0893 if (nvmewqeq == NULL) {
0894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0895 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
0896 axchg->oxid);
0897 rc = -ENOMEM;
0898 goto out_free_buf;
0899 }
0900
0901
0902 nvmewqeq->num_bdes = 1;
0903 nvmewqeq->hba_wqidx = 0;
0904 nvmewqeq->bpl_dmabuf = &dmabuf;
0905 dmabuf.virt = &bpl;
0906 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
0907 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
0908 bpl.tus.f.bdeSize = ls_rsp->rsplen;
0909 bpl.tus.f.bdeFlags = 0;
0910 bpl.tus.w = le32_to_cpu(bpl.tus.w);
0911
0912
0913
0914
0915
0916
0917 nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
0918 nvmewqeq->context_un.axchg = axchg;
0919
0920 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
0921 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
0922
0923 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
0924
0925
0926 nvmewqeq->bpl_dmabuf = NULL;
0927
0928 if (rc == WQE_SUCCESS) {
0929
0930
0931
0932
0933 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
0934 return 0;
0935 }
0936
0937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0938 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
0939 axchg->oxid, rc);
0940
0941 rc = -ENXIO;
0942
0943 lpfc_nlp_put(nvmewqeq->ndlp);
0944
0945 out_free_buf:
0946
0947 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
0948
0949
0950
0951
0952
0953
0954
0955
0956 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
0957 return rc;
0958 }
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978 static int
0979 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
0980 struct nvmefc_ls_rsp *ls_rsp)
0981 {
0982 struct lpfc_async_xchg_ctx *axchg =
0983 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
0984 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
0985 int rc;
0986
0987 if (axchg->phba->pport->load_flag & FC_UNLOADING)
0988 return -ENODEV;
0989
0990 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
0991
0992 if (rc) {
0993 atomic_inc(&nvmep->xmt_ls_drop);
0994
0995
0996
0997
0998
0999 if (rc != -EALREADY)
1000 atomic_inc(&nvmep->xmt_ls_abort);
1001 return rc;
1002 }
1003
1004 atomic_inc(&nvmep->xmt_ls_rsp);
1005 return 0;
1006 }
1007
1008 static int
1009 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1010 struct nvmefc_tgt_fcp_req *rsp)
1011 {
1012 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1013 struct lpfc_async_xchg_ctx *ctxp =
1014 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1015 struct lpfc_hba *phba = ctxp->phba;
1016 struct lpfc_queue *wq;
1017 struct lpfc_iocbq *nvmewqeq;
1018 struct lpfc_sli_ring *pring;
1019 unsigned long iflags;
1020 int rc;
1021 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1022 int id;
1023 #endif
1024
1025 if (phba->pport->load_flag & FC_UNLOADING) {
1026 rc = -ENODEV;
1027 goto aerr;
1028 }
1029
1030 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1031 if (ctxp->ts_cmd_nvme) {
1032 if (rsp->op == NVMET_FCOP_RSP)
1033 ctxp->ts_nvme_status = ktime_get_ns();
1034 else
1035 ctxp->ts_nvme_data = ktime_get_ns();
1036 }
1037
1038
1039 if (!ctxp->hdwq)
1040 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1041
1042 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1043 id = raw_smp_processor_id();
1044 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1045 if (rsp->hwqid != id)
1046 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1047 "6705 CPU Check OP: "
1048 "cpu %d expect %d\n",
1049 id, rsp->hwqid);
1050 ctxp->cpu = id;
1051 }
1052 #endif
1053
1054
1055 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1056 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1057 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1059 "6102 IO oxid x%x aborted\n",
1060 ctxp->oxid);
1061 rc = -ENXIO;
1062 goto aerr;
1063 }
1064
1065 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1066 if (nvmewqeq == NULL) {
1067 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1069 "6152 FCP Drop IO x%x: Prep\n",
1070 ctxp->oxid);
1071 rc = -ENXIO;
1072 goto aerr;
1073 }
1074
1075 nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1076 nvmewqeq->context_un.axchg = ctxp;
1077 nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
1078 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1079
1080 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1081 ctxp->oxid, rsp->op, rsp->rsplen);
1082
1083 ctxp->flag |= LPFC_NVME_IO_INP;
1084 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1085 if (rc == WQE_SUCCESS) {
1086 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1087 if (!ctxp->ts_cmd_nvme)
1088 return 0;
1089 if (rsp->op == NVMET_FCOP_RSP)
1090 ctxp->ts_status_wqput = ktime_get_ns();
1091 else
1092 ctxp->ts_data_wqput = ktime_get_ns();
1093 #endif
1094 return 0;
1095 }
1096
1097 if (rc == -EBUSY) {
1098
1099
1100
1101
1102 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1103 wq = ctxp->hdwq->io_wq;
1104 pring = wq->pring;
1105 spin_lock_irqsave(&pring->ring_lock, iflags);
1106 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1107 wq->q_flag |= HBA_NVMET_WQFULL;
1108 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1109 atomic_inc(&lpfc_nvmep->defer_wqfull);
1110 return 0;
1111 }
1112
1113
1114 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1116 "6153 FCP Drop IO x%x: Issue: %d\n",
1117 ctxp->oxid, rc);
1118
1119 ctxp->wqeq->hba_wqidx = 0;
1120 nvmewqeq->context_un.axchg = NULL;
1121 nvmewqeq->bpl_dmabuf = NULL;
1122 rc = -EBUSY;
1123 aerr:
1124 return rc;
1125 }
1126
1127 static void
1128 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1129 {
1130 struct lpfc_nvmet_tgtport *tport = targetport->private;
1131
1132
1133 if (tport->phba->targetport)
1134 complete(tport->tport_unreg_cmp);
1135 }
1136
1137 static void
1138 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1139 struct nvmefc_tgt_fcp_req *req)
1140 {
1141 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1142 struct lpfc_async_xchg_ctx *ctxp =
1143 container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1144 struct lpfc_hba *phba = ctxp->phba;
1145 struct lpfc_queue *wq;
1146 unsigned long flags;
1147
1148 if (phba->pport->load_flag & FC_UNLOADING)
1149 return;
1150
1151 if (!ctxp->hdwq)
1152 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1153
1154 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1155 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1156 ctxp->oxid, ctxp->flag, ctxp->state);
1157
1158 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1159 ctxp->oxid, ctxp->flag, ctxp->state);
1160
1161 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1162
1163 spin_lock_irqsave(&ctxp->ctxlock, flags);
1164
1165
1166
1167
1168 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1169 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1170 return;
1171 }
1172 ctxp->flag |= LPFC_NVME_ABORT_OP;
1173
1174 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1175 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1176 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1177 ctxp->oxid);
1178 wq = ctxp->hdwq->io_wq;
1179 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1180 return;
1181 }
1182 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1183
1184
1185
1186
1187
1188 if (ctxp->state == LPFC_NVME_STE_RCV)
1189 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1190 ctxp->oxid);
1191 else
1192 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1193 ctxp->oxid);
1194 }
1195
1196 static void
1197 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1198 struct nvmefc_tgt_fcp_req *rsp)
1199 {
1200 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1201 struct lpfc_async_xchg_ctx *ctxp =
1202 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1203 struct lpfc_hba *phba = ctxp->phba;
1204 unsigned long flags;
1205 bool aborting = false;
1206
1207 spin_lock_irqsave(&ctxp->ctxlock, flags);
1208 if (ctxp->flag & LPFC_NVME_XBUSY)
1209 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1210 "6027 NVMET release with XBUSY flag x%x"
1211 " oxid x%x\n",
1212 ctxp->flag, ctxp->oxid);
1213 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1214 ctxp->state != LPFC_NVME_STE_ABORT)
1215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1216 "6413 NVMET release bad state %d %d oxid x%x\n",
1217 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1218
1219 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1220 (ctxp->flag & LPFC_NVME_XBUSY)) {
1221 aborting = true;
1222
1223 lpfc_nvmet_defer_release(phba, ctxp);
1224 }
1225 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1226
1227 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1228 ctxp->state, aborting);
1229
1230 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1231 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1232
1233 if (aborting)
1234 return;
1235
1236 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1237 }
1238
1239 static void
1240 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1241 struct nvmefc_tgt_fcp_req *rsp)
1242 {
1243 struct lpfc_nvmet_tgtport *tgtp;
1244 struct lpfc_async_xchg_ctx *ctxp =
1245 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1246 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1247 struct lpfc_hba *phba = ctxp->phba;
1248 unsigned long iflag;
1249
1250
1251 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1252 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1253
1254 if (!nvmebuf) {
1255 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1256 "6425 Defer rcv: no buffer oxid x%x: "
1257 "flg %x ste %x\n",
1258 ctxp->oxid, ctxp->flag, ctxp->state);
1259 return;
1260 }
1261
1262 tgtp = phba->targetport->private;
1263 if (tgtp)
1264 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1265
1266
1267 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1268 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1269 ctxp->rqb_buffer = NULL;
1270 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1271 }
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283 static void
1284 lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1285 struct lpfc_iocbq *rspwqe)
1286 {
1287 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
1288 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 static int
1306 lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1307 void *hosthandle,
1308 struct nvmefc_ls_req *pnvme_lsreq)
1309 {
1310 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1311 struct lpfc_hba *phba;
1312 struct lpfc_nodelist *ndlp;
1313 int ret;
1314 u32 hstate;
1315
1316 if (!lpfc_nvmet)
1317 return -EINVAL;
1318
1319 phba = lpfc_nvmet->phba;
1320 if (phba->pport->load_flag & FC_UNLOADING)
1321 return -EINVAL;
1322
1323 hstate = atomic_read(&lpfc_nvmet->state);
1324 if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1325 return -EACCES;
1326
1327 ndlp = (struct lpfc_nodelist *)hosthandle;
1328
1329 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1330 lpfc_nvmet_ls_req_cmp);
1331
1332 return ret;
1333 }
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 static void
1346 lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1347 void *hosthandle,
1348 struct nvmefc_ls_req *pnvme_lsreq)
1349 {
1350 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1351 struct lpfc_hba *phba;
1352 struct lpfc_nodelist *ndlp;
1353 int ret;
1354
1355 phba = lpfc_nvmet->phba;
1356 if (phba->pport->load_flag & FC_UNLOADING)
1357 return;
1358
1359 ndlp = (struct lpfc_nodelist *)hosthandle;
1360
1361 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1362 if (!ret)
1363 atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1364 }
1365
1366 static void
1367 lpfc_nvmet_host_release(void *hosthandle)
1368 {
1369 struct lpfc_nodelist *ndlp = hosthandle;
1370 struct lpfc_hba *phba = ndlp->phba;
1371 struct lpfc_nvmet_tgtport *tgtp;
1372
1373 if (!phba->targetport || !phba->targetport->private)
1374 return;
1375
1376 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1377 "6202 NVMET XPT releasing hosthandle x%px "
1378 "DID x%x xflags x%x refcnt %d\n",
1379 hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
1380 kref_read(&ndlp->kref));
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 spin_lock_irq(&ndlp->lock);
1383 ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
1384 spin_unlock_irq(&ndlp->lock);
1385 lpfc_nlp_put(ndlp);
1386 atomic_set(&tgtp->state, 0);
1387 }
1388
1389 static void
1390 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1391 {
1392 struct lpfc_nvmet_tgtport *tgtp;
1393 struct lpfc_hba *phba;
1394 uint32_t rc;
1395
1396 tgtp = tgtport->private;
1397 phba = tgtp->phba;
1398
1399 rc = lpfc_issue_els_rscn(phba->pport, 0);
1400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1401 "6420 NVMET subsystem change: Notification %s\n",
1402 (rc) ? "Failed" : "Sent");
1403 }
1404
1405 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1406 .targetport_delete = lpfc_nvmet_targetport_delete,
1407 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1408 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1409 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1410 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1411 .defer_rcv = lpfc_nvmet_defer_rcv,
1412 .discovery_event = lpfc_nvmet_discovery_event,
1413 .ls_req = lpfc_nvmet_ls_req,
1414 .ls_abort = lpfc_nvmet_ls_abort,
1415 .host_release = lpfc_nvmet_host_release,
1416
1417 .max_hw_queues = 1,
1418 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1419 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1420 .dma_boundary = 0xFFFFFFFF,
1421
1422
1423 .target_features = 0,
1424
1425 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1426 .lsrqst_priv_sz = 0,
1427 };
1428
1429 static void
1430 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1431 struct lpfc_nvmet_ctx_info *infop)
1432 {
1433 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1434 unsigned long flags;
1435
1436 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1437 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1438 &infop->nvmet_ctx_list, list) {
1439 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1440 list_del_init(&ctx_buf->list);
1441 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1442
1443 spin_lock(&phba->hbalock);
1444 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1445 spin_unlock(&phba->hbalock);
1446
1447 ctx_buf->sglq->state = SGL_FREED;
1448 ctx_buf->sglq->ndlp = NULL;
1449
1450 spin_lock(&phba->sli4_hba.sgl_list_lock);
1451 list_add_tail(&ctx_buf->sglq->list,
1452 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1453 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1454
1455 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1456 kfree(ctx_buf->context);
1457 }
1458 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1459 }
1460
1461 static void
1462 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1463 {
1464 struct lpfc_nvmet_ctx_info *infop;
1465 int i, j;
1466
1467
1468 infop = phba->sli4_hba.nvmet_ctx_info;
1469 if (!infop)
1470 return;
1471
1472
1473 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1474 for_each_present_cpu(j) {
1475 infop = lpfc_get_ctx_list(phba, j, i);
1476 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1477 }
1478 }
1479 kfree(phba->sli4_hba.nvmet_ctx_info);
1480 phba->sli4_hba.nvmet_ctx_info = NULL;
1481 }
1482
1483 static int
1484 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1485 {
1486 struct lpfc_nvmet_ctxbuf *ctx_buf;
1487 struct lpfc_iocbq *nvmewqe;
1488 union lpfc_wqe128 *wqe;
1489 struct lpfc_nvmet_ctx_info *last_infop;
1490 struct lpfc_nvmet_ctx_info *infop;
1491 int i, j, idx, cpu;
1492
1493 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1494 "6403 Allocate NVMET resources for %d XRIs\n",
1495 phba->sli4_hba.nvmet_xri_cnt);
1496
1497 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1498 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1499 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1500 if (!phba->sli4_hba.nvmet_ctx_info) {
1501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1502 "6419 Failed allocate memory for "
1503 "nvmet context lists\n");
1504 return -ENOMEM;
1505 }
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 for_each_possible_cpu(i) {
1527 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1528 infop = lpfc_get_ctx_list(phba, i, j);
1529 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1530 spin_lock_init(&infop->nvmet_ctx_list_lock);
1531 infop->nvmet_ctx_list_cnt = 0;
1532 }
1533 }
1534
1535
1536
1537
1538
1539
1540 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1541 last_infop = lpfc_get_ctx_list(phba,
1542 cpumask_first(cpu_present_mask),
1543 j);
1544 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1545 infop = lpfc_get_ctx_list(phba, i, j);
1546 infop->nvmet_ctx_next_cpu = last_infop;
1547 last_infop = infop;
1548 }
1549 }
1550
1551
1552
1553
1554 idx = 0;
1555 cpu = cpumask_first(cpu_present_mask);
1556 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1557 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1558 if (!ctx_buf) {
1559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1560 "6404 Ran out of memory for NVMET\n");
1561 return -ENOMEM;
1562 }
1563
1564 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1565 GFP_KERNEL);
1566 if (!ctx_buf->context) {
1567 kfree(ctx_buf);
1568 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1569 "6405 Ran out of NVMET "
1570 "context memory\n");
1571 return -ENOMEM;
1572 }
1573 ctx_buf->context->ctxbuf = ctx_buf;
1574 ctx_buf->context->state = LPFC_NVME_STE_FREE;
1575
1576 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1577 if (!ctx_buf->iocbq) {
1578 kfree(ctx_buf->context);
1579 kfree(ctx_buf);
1580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1581 "6406 Ran out of NVMET iocb/WQEs\n");
1582 return -ENOMEM;
1583 }
1584 ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET;
1585 nvmewqe = ctx_buf->iocbq;
1586 wqe = &nvmewqe->wqe;
1587
1588
1589 memset(wqe, 0, sizeof(union lpfc_wqe));
1590
1591 ctx_buf->iocbq->cmd_dmabuf = NULL;
1592 spin_lock(&phba->sli4_hba.sgl_list_lock);
1593 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1594 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1595 if (!ctx_buf->sglq) {
1596 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1597 kfree(ctx_buf->context);
1598 kfree(ctx_buf);
1599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1600 "6407 Ran out of NVMET XRIs\n");
1601 return -ENOMEM;
1602 }
1603 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1604
1605
1606
1607
1608
1609
1610 infop = lpfc_get_ctx_list(phba, cpu, idx);
1611 spin_lock(&infop->nvmet_ctx_list_lock);
1612 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1613 infop->nvmet_ctx_list_cnt++;
1614 spin_unlock(&infop->nvmet_ctx_list_lock);
1615
1616
1617 idx++;
1618 if (idx >= phba->cfg_nvmet_mrq) {
1619 idx = 0;
1620 cpu = cpumask_first(cpu_present_mask);
1621 continue;
1622 }
1623 cpu = cpumask_next(cpu, cpu_present_mask);
1624 if (cpu == nr_cpu_ids)
1625 cpu = cpumask_first(cpu_present_mask);
1626
1627 }
1628
1629 for_each_present_cpu(i) {
1630 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1631 infop = lpfc_get_ctx_list(phba, i, j);
1632 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1633 "6408 TOTAL NVMET ctx for CPU %d "
1634 "MRQ %d: cnt %d nextcpu x%px\n",
1635 i, j, infop->nvmet_ctx_list_cnt,
1636 infop->nvmet_ctx_next_cpu);
1637 }
1638 }
1639 return 0;
1640 }
1641
1642 int
1643 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1644 {
1645 struct lpfc_vport *vport = phba->pport;
1646 struct lpfc_nvmet_tgtport *tgtp;
1647 struct nvmet_fc_port_info pinfo;
1648 int error;
1649
1650 if (phba->targetport)
1651 return 0;
1652
1653 error = lpfc_nvmet_setup_io_context(phba);
1654 if (error)
1655 return error;
1656
1657 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1658 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1659 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1660 pinfo.port_id = vport->fc_myDID;
1661
1662
1663
1664
1665
1666 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1667 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1668 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1669
1670 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1671 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1672 &phba->pcidev->dev,
1673 &phba->targetport);
1674 #else
1675 error = -ENOENT;
1676 #endif
1677 if (error) {
1678 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1679 "6025 Cannot register NVME targetport x%x: "
1680 "portnm %llx nodenm %llx segs %d qs %d\n",
1681 error,
1682 pinfo.port_name, pinfo.node_name,
1683 lpfc_tgttemplate.max_sgl_segments,
1684 lpfc_tgttemplate.max_hw_queues);
1685 phba->targetport = NULL;
1686 phba->nvmet_support = 0;
1687
1688 lpfc_nvmet_cleanup_io_context(phba);
1689
1690 } else {
1691 tgtp = (struct lpfc_nvmet_tgtport *)
1692 phba->targetport->private;
1693 tgtp->phba = phba;
1694
1695 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1696 "6026 Registered NVME "
1697 "targetport: x%px, private x%px "
1698 "portnm %llx nodenm %llx segs %d qs %d\n",
1699 phba->targetport, tgtp,
1700 pinfo.port_name, pinfo.node_name,
1701 lpfc_tgttemplate.max_sgl_segments,
1702 lpfc_tgttemplate.max_hw_queues);
1703
1704 atomic_set(&tgtp->rcv_ls_req_in, 0);
1705 atomic_set(&tgtp->rcv_ls_req_out, 0);
1706 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1707 atomic_set(&tgtp->xmt_ls_abort, 0);
1708 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1709 atomic_set(&tgtp->xmt_ls_rsp, 0);
1710 atomic_set(&tgtp->xmt_ls_drop, 0);
1711 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1712 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1713 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1714 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1715 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1716 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1717 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1718 atomic_set(&tgtp->xmt_fcp_drop, 0);
1719 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1720 atomic_set(&tgtp->xmt_fcp_read, 0);
1721 atomic_set(&tgtp->xmt_fcp_write, 0);
1722 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1723 atomic_set(&tgtp->xmt_fcp_release, 0);
1724 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1725 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1726 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1727 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1728 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1729 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1730 atomic_set(&tgtp->xmt_fcp_abort, 0);
1731 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1732 atomic_set(&tgtp->xmt_abort_unsol, 0);
1733 atomic_set(&tgtp->xmt_abort_sol, 0);
1734 atomic_set(&tgtp->xmt_abort_rsp, 0);
1735 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1736 atomic_set(&tgtp->defer_ctx, 0);
1737 atomic_set(&tgtp->defer_fod, 0);
1738 atomic_set(&tgtp->defer_wqfull, 0);
1739 }
1740 return error;
1741 }
1742
1743 int
1744 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1745 {
1746 struct lpfc_vport *vport = phba->pport;
1747
1748 if (!phba->targetport)
1749 return 0;
1750
1751 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1752 "6007 Update NVMET port x%px did x%x\n",
1753 phba->targetport, vport->fc_myDID);
1754
1755 phba->targetport->port_id = vport->fc_myDID;
1756 return 0;
1757 }
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 void
1768 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1769 struct sli4_wcqe_xri_aborted *axri)
1770 {
1771 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1772 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1773 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1774 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1775 struct lpfc_nvmet_tgtport *tgtp;
1776 struct nvmefc_tgt_fcp_req *req = NULL;
1777 struct lpfc_nodelist *ndlp;
1778 unsigned long iflag = 0;
1779 int rrq_empty = 0;
1780 bool released = false;
1781
1782 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1783 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1784
1785 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1786 return;
1787
1788 if (phba->targetport) {
1789 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1790 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1791 }
1792
1793 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1794 list_for_each_entry_safe(ctxp, next_ctxp,
1795 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1796 list) {
1797 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1798 continue;
1799
1800 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1801 iflag);
1802
1803 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1804
1805
1806
1807 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1808 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1809 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1810 list_del_init(&ctxp->list);
1811 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1812 released = true;
1813 }
1814 ctxp->flag &= ~LPFC_NVME_XBUSY;
1815 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1816
1817 rrq_empty = list_empty(&phba->active_rrq_list);
1818 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1819 if (ndlp &&
1820 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1821 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1822 lpfc_set_rrq_active(phba, ndlp,
1823 ctxp->ctxbuf->sglq->sli4_lxritag,
1824 rxid, 1);
1825 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1826 }
1827
1828 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1829 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1830 ctxp->oxid, ctxp->flag, released);
1831 if (released)
1832 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1833
1834 if (rrq_empty)
1835 lpfc_worker_wake_up(phba);
1836 return;
1837 }
1838 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1839 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1840 if (ctxp) {
1841
1842
1843
1844
1845 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1846 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1847 "flag x%x oxid x%x rxid x%x\n",
1848 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1849 rxid);
1850
1851 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1852 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1853 ctxp->state = LPFC_NVME_STE_ABORT;
1854 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1855
1856 lpfc_nvmeio_data(phba,
1857 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1858 xri, raw_smp_processor_id(), 0);
1859
1860 req = &ctxp->hdlrctx.fcp_req;
1861 if (req)
1862 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1863 }
1864 #endif
1865 }
1866
1867 int
1868 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1869 struct fc_frame_header *fc_hdr)
1870 {
1871 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1872 struct lpfc_hba *phba = vport->phba;
1873 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1874 struct nvmefc_tgt_fcp_req *rsp;
1875 uint32_t sid;
1876 uint16_t oxid, xri;
1877 unsigned long iflag = 0;
1878
1879 sid = sli4_sid_from_fc_hdr(fc_hdr);
1880 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1881
1882 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1883 list_for_each_entry_safe(ctxp, next_ctxp,
1884 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1885 list) {
1886 if (ctxp->oxid != oxid || ctxp->sid != sid)
1887 continue;
1888
1889 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1890
1891 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1892 iflag);
1893 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1894 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1895 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1896
1897 lpfc_nvmeio_data(phba,
1898 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1899 xri, raw_smp_processor_id(), 0);
1900
1901 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1902 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1903
1904 rsp = &ctxp->hdlrctx.fcp_req;
1905 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1906
1907
1908 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1909 return 0;
1910 }
1911 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1912
1913 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1914 struct rqb_dmabuf *nvmebuf;
1915 struct fc_frame_header *fc_hdr_tmp;
1916 u32 sid_tmp;
1917 u16 oxid_tmp;
1918 bool found = false;
1919
1920 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1921
1922
1923 list_for_each_entry(nvmebuf,
1924 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1925 hbuf.list) {
1926 fc_hdr_tmp = (struct fc_frame_header *)
1927 (nvmebuf->hbuf.virt);
1928 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1929 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1930 if (oxid_tmp != oxid || sid_tmp != sid)
1931 continue;
1932
1933 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1934 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1935 "is waiting for a ctxp\n",
1936 oxid, sid);
1937
1938 list_del_init(&nvmebuf->hbuf.list);
1939 phba->sli4_hba.nvmet_io_wait_cnt--;
1940 found = true;
1941 break;
1942 }
1943 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1944 iflag);
1945
1946
1947 if (found) {
1948 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1949
1950 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1951 return 0;
1952 }
1953 }
1954
1955
1956 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1957 if (ctxp) {
1958 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1959
1960 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1961 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1962 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1963
1964 lpfc_nvmeio_data(phba,
1965 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1966 xri, raw_smp_processor_id(), 0);
1967
1968 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1969 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1970 "flag x%x state x%x\n",
1971 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1972
1973 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1974
1975 nvmet_fc_rcv_fcp_abort(phba->targetport,
1976 &ctxp->hdlrctx.fcp_req);
1977 } else {
1978 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1979 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1980 lpfc_nvmet_defer_release(phba, ctxp);
1981 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1982 }
1983 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1984 ctxp->oxid);
1985
1986 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1987 return 0;
1988 }
1989
1990 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1991 oxid, raw_smp_processor_id(), 1);
1992
1993 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1994 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1995
1996
1997 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1998 #endif
1999 return 0;
2000 }
2001
2002 static void
2003 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2004 struct lpfc_async_xchg_ctx *ctxp)
2005 {
2006 struct lpfc_sli_ring *pring;
2007 struct lpfc_iocbq *nvmewqeq;
2008 struct lpfc_iocbq *next_nvmewqeq;
2009 unsigned long iflags;
2010 struct lpfc_wcqe_complete wcqe;
2011 struct lpfc_wcqe_complete *wcqep;
2012
2013 pring = wq->pring;
2014 wcqep = &wcqe;
2015
2016
2017 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2018 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2019 wcqep->parameter = IOERR_ABORT_REQUESTED;
2020
2021 spin_lock_irqsave(&pring->ring_lock, iflags);
2022 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2023 &wq->wqfull_list, list) {
2024 if (ctxp) {
2025
2026 if (nvmewqeq->context_un.axchg == ctxp) {
2027 list_del(&nvmewqeq->list);
2028 spin_unlock_irqrestore(&pring->ring_lock,
2029 iflags);
2030 memcpy(&nvmewqeq->wcqe_cmpl, wcqep,
2031 sizeof(*wcqep));
2032 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2033 nvmewqeq);
2034 return;
2035 }
2036 continue;
2037 } else {
2038
2039 list_del(&nvmewqeq->list);
2040 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2041 memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep));
2042 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq);
2043 spin_lock_irqsave(&pring->ring_lock, iflags);
2044 }
2045 }
2046 if (!ctxp)
2047 wq->q_flag &= ~HBA_NVMET_WQFULL;
2048 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2049 }
2050
2051 void
2052 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2053 struct lpfc_queue *wq)
2054 {
2055 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2056 struct lpfc_sli_ring *pring;
2057 struct lpfc_iocbq *nvmewqeq;
2058 struct lpfc_async_xchg_ctx *ctxp;
2059 unsigned long iflags;
2060 int rc;
2061
2062
2063
2064
2065
2066 pring = wq->pring;
2067 spin_lock_irqsave(&pring->ring_lock, iflags);
2068 while (!list_empty(&wq->wqfull_list)) {
2069 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2070 list);
2071 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2072 ctxp = nvmewqeq->context_un.axchg;
2073 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2074 spin_lock_irqsave(&pring->ring_lock, iflags);
2075 if (rc == -EBUSY) {
2076
2077 list_add(&nvmewqeq->list, &wq->wqfull_list);
2078 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2079 return;
2080 }
2081 if (rc == WQE_SUCCESS) {
2082 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2083 if (ctxp->ts_cmd_nvme) {
2084 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2085 ctxp->ts_status_wqput = ktime_get_ns();
2086 else
2087 ctxp->ts_data_wqput = ktime_get_ns();
2088 }
2089 #endif
2090 } else {
2091 WARN_ON(rc);
2092 }
2093 }
2094 wq->q_flag &= ~HBA_NVMET_WQFULL;
2095 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2096
2097 #endif
2098 }
2099
2100 void
2101 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2102 {
2103 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2104 struct lpfc_nvmet_tgtport *tgtp;
2105 struct lpfc_queue *wq;
2106 uint32_t qidx;
2107 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2108
2109 if (phba->nvmet_support == 0)
2110 return;
2111 if (phba->targetport) {
2112 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2113 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2114 wq = phba->sli4_hba.hdwq[qidx].io_wq;
2115 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2116 }
2117 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2118 nvmet_fc_unregister_targetport(phba->targetport);
2119 if (!wait_for_completion_timeout(&tport_unreg_cmp,
2120 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2122 "6179 Unreg targetport x%px timeout "
2123 "reached.\n", phba->targetport);
2124 lpfc_nvmet_cleanup_io_context(phba);
2125 }
2126 phba->targetport = NULL;
2127 #endif
2128 }
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146 int
2147 lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2148 struct lpfc_async_xchg_ctx *axchg)
2149 {
2150 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2151 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2152 uint32_t *payload = axchg->payload;
2153 int rc;
2154
2155 atomic_inc(&tgtp->rcv_ls_req_in);
2156
2157
2158
2159
2160
2161
2162 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2163 axchg->payload, axchg->size);
2164
2165 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2166 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2167 "%08x %08x %08x\n", axchg->size, rc,
2168 *payload, *(payload+1), *(payload+2),
2169 *(payload+3), *(payload+4), *(payload+5));
2170
2171 if (!rc) {
2172 atomic_inc(&tgtp->rcv_ls_req_out);
2173 return 0;
2174 }
2175
2176 atomic_inc(&tgtp->rcv_ls_req_drop);
2177 #endif
2178 return 1;
2179 }
2180
2181 static void
2182 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2183 {
2184 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2185 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2186 struct lpfc_hba *phba = ctxp->phba;
2187 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2188 struct lpfc_nvmet_tgtport *tgtp;
2189 uint32_t *payload, qno;
2190 uint32_t rc;
2191 unsigned long iflags;
2192
2193 if (!nvmebuf) {
2194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2195 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2196 "oxid: x%x flg: x%x state: x%x\n",
2197 ctxp->oxid, ctxp->flag, ctxp->state);
2198 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2199 lpfc_nvmet_defer_release(phba, ctxp);
2200 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2201 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2202 ctxp->oxid);
2203 return;
2204 }
2205
2206 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2208 "6324 IO oxid x%x aborted\n",
2209 ctxp->oxid);
2210 return;
2211 }
2212
2213 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2214 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2215 ctxp->flag |= LPFC_NVME_TNOTIFY;
2216 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2217 if (ctxp->ts_isr_cmd)
2218 ctxp->ts_cmd_nvme = ktime_get_ns();
2219 #endif
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2230 payload, ctxp->size);
2231
2232 if (rc == 0) {
2233 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2234 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2235 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2236 (nvmebuf != ctxp->rqb_buffer)) {
2237 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2238 return;
2239 }
2240 ctxp->rqb_buffer = NULL;
2241 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2242 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2243 return;
2244 }
2245
2246
2247 if (rc == -EOVERFLOW) {
2248 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2249 "from %06x\n",
2250 ctxp->oxid, ctxp->size, ctxp->sid);
2251 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2252 atomic_inc(&tgtp->defer_fod);
2253 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2254 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2255 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2256 return;
2257 }
2258 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2259
2260
2261
2262
2263 qno = nvmebuf->idx;
2264 lpfc_post_rq_buffer(
2265 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2266 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2267 return;
2268 }
2269 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2270 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2272 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2273 ctxp->oxid, rc,
2274 atomic_read(&tgtp->rcv_fcp_cmd_in),
2275 atomic_read(&tgtp->rcv_fcp_cmd_out),
2276 atomic_read(&tgtp->xmt_fcp_release));
2277 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2278 ctxp->oxid, ctxp->size, ctxp->sid);
2279 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2280 lpfc_nvmet_defer_release(phba, ctxp);
2281 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2282 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2283 #endif
2284 }
2285
2286 static void
2287 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2288 {
2289 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2290 struct lpfc_nvmet_ctxbuf *ctx_buf =
2291 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2292
2293 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2294 #endif
2295 }
2296
2297 static struct lpfc_nvmet_ctxbuf *
2298 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2299 struct lpfc_nvmet_ctx_info *current_infop)
2300 {
2301 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2302 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2303 struct lpfc_nvmet_ctx_info *get_infop;
2304 int i;
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316 if (current_infop->nvmet_ctx_start_cpu)
2317 get_infop = current_infop->nvmet_ctx_start_cpu;
2318 else
2319 get_infop = current_infop->nvmet_ctx_next_cpu;
2320
2321 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2322 if (get_infop == current_infop) {
2323 get_infop = get_infop->nvmet_ctx_next_cpu;
2324 continue;
2325 }
2326 spin_lock(&get_infop->nvmet_ctx_list_lock);
2327
2328
2329 if (get_infop->nvmet_ctx_list_cnt) {
2330 list_splice_init(&get_infop->nvmet_ctx_list,
2331 ¤t_infop->nvmet_ctx_list);
2332 current_infop->nvmet_ctx_list_cnt =
2333 get_infop->nvmet_ctx_list_cnt - 1;
2334 get_infop->nvmet_ctx_list_cnt = 0;
2335 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2336
2337 current_infop->nvmet_ctx_start_cpu = get_infop;
2338 list_remove_head(¤t_infop->nvmet_ctx_list,
2339 ctx_buf, struct lpfc_nvmet_ctxbuf,
2340 list);
2341 return ctx_buf;
2342 }
2343
2344
2345 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2346 get_infop = get_infop->nvmet_ctx_next_cpu;
2347 }
2348
2349 #endif
2350
2351 return NULL;
2352 }
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369 static void
2370 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2371 uint32_t idx,
2372 struct rqb_dmabuf *nvmebuf,
2373 uint64_t isr_timestamp,
2374 uint8_t cqflag)
2375 {
2376 struct lpfc_async_xchg_ctx *ctxp;
2377 struct lpfc_nvmet_tgtport *tgtp;
2378 struct fc_frame_header *fc_hdr;
2379 struct lpfc_nvmet_ctxbuf *ctx_buf;
2380 struct lpfc_nvmet_ctx_info *current_infop;
2381 uint32_t size, oxid, sid, qno;
2382 unsigned long iflag;
2383 int current_cpu;
2384
2385 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2386 return;
2387
2388 ctx_buf = NULL;
2389 if (!nvmebuf || !phba->targetport) {
2390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2391 "6157 NVMET FCP Drop IO\n");
2392 if (nvmebuf)
2393 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2394 return;
2395 }
2396
2397
2398
2399
2400
2401
2402
2403
2404 current_cpu = raw_smp_processor_id();
2405 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2406 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2407 if (current_infop->nvmet_ctx_list_cnt) {
2408 list_remove_head(¤t_infop->nvmet_ctx_list,
2409 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2410 current_infop->nvmet_ctx_list_cnt--;
2411 } else {
2412 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2413 }
2414 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2415
2416 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2417 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2418 size = nvmebuf->bytes_recv;
2419
2420 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2421 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2422 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2423 if (idx != current_cpu)
2424 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2425 "6703 CPU Check rcv: "
2426 "cpu %d expect %d\n",
2427 current_cpu, idx);
2428 }
2429 #endif
2430
2431 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2432 oxid, size, raw_smp_processor_id());
2433
2434 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2435
2436 if (!ctx_buf) {
2437
2438 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2439 list_add_tail(&nvmebuf->hbuf.list,
2440 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2441 phba->sli4_hba.nvmet_io_wait_cnt++;
2442 phba->sli4_hba.nvmet_io_wait_total++;
2443 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2444 iflag);
2445
2446
2447 qno = nvmebuf->idx;
2448 lpfc_post_rq_buffer(
2449 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2450 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2451
2452 atomic_inc(&tgtp->defer_ctx);
2453 return;
2454 }
2455
2456 sid = sli4_sid_from_fc_hdr(fc_hdr);
2457
2458 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2459 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2460 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2461 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2462 if (ctxp->state != LPFC_NVME_STE_FREE) {
2463 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2464 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2465 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2466 }
2467 ctxp->wqeq = NULL;
2468 ctxp->offset = 0;
2469 ctxp->phba = phba;
2470 ctxp->size = size;
2471 ctxp->oxid = oxid;
2472 ctxp->sid = sid;
2473 ctxp->idx = idx;
2474 ctxp->state = LPFC_NVME_STE_RCV;
2475 ctxp->entry_cnt = 1;
2476 ctxp->flag = 0;
2477 ctxp->ctxbuf = ctx_buf;
2478 ctxp->rqb_buffer = (void *)nvmebuf;
2479 ctxp->hdwq = NULL;
2480 spin_lock_init(&ctxp->ctxlock);
2481
2482 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2483 if (isr_timestamp)
2484 ctxp->ts_isr_cmd = isr_timestamp;
2485 ctxp->ts_cmd_nvme = 0;
2486 ctxp->ts_nvme_data = 0;
2487 ctxp->ts_data_wqput = 0;
2488 ctxp->ts_isr_data = 0;
2489 ctxp->ts_data_nvme = 0;
2490 ctxp->ts_nvme_status = 0;
2491 ctxp->ts_status_wqput = 0;
2492 ctxp->ts_isr_status = 0;
2493 ctxp->ts_status_nvme = 0;
2494 #endif
2495
2496 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2497
2498 if (!cqflag) {
2499 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2500 return;
2501 }
2502
2503 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2504 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2505 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2506 "6325 Unable to queue work for oxid x%x. "
2507 "FCP Drop IO [x%x x%x x%x]\n",
2508 ctxp->oxid,
2509 atomic_read(&tgtp->rcv_fcp_cmd_in),
2510 atomic_read(&tgtp->rcv_fcp_cmd_out),
2511 atomic_read(&tgtp->xmt_fcp_release));
2512
2513 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2514 lpfc_nvmet_defer_release(phba, ctxp);
2515 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2516 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2517 }
2518 }
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534 void
2535 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2536 uint32_t idx,
2537 struct rqb_dmabuf *nvmebuf,
2538 uint64_t isr_timestamp,
2539 uint8_t cqflag)
2540 {
2541 if (!nvmebuf) {
2542 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2543 "3167 NVMET FCP Drop IO\n");
2544 return;
2545 }
2546 if (phba->nvmet_support == 0) {
2547 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2548 return;
2549 }
2550 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2551 }
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578 static struct lpfc_iocbq *
2579 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2580 struct lpfc_async_xchg_ctx *ctxp,
2581 dma_addr_t rspbuf, uint16_t rspsize)
2582 {
2583 struct lpfc_nodelist *ndlp;
2584 struct lpfc_iocbq *nvmewqe;
2585 union lpfc_wqe128 *wqe;
2586
2587 if (!lpfc_is_link_up(phba)) {
2588 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2589 "6104 NVMET prep LS wqe: link err: "
2590 "NPORT x%x oxid:x%x ste %d\n",
2591 ctxp->sid, ctxp->oxid, ctxp->state);
2592 return NULL;
2593 }
2594
2595
2596 nvmewqe = lpfc_sli_get_iocbq(phba);
2597 if (nvmewqe == NULL) {
2598 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2599 "6105 NVMET prep LS wqe: No WQE: "
2600 "NPORT x%x oxid x%x ste %d\n",
2601 ctxp->sid, ctxp->oxid, ctxp->state);
2602 return NULL;
2603 }
2604
2605 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2606 if (!ndlp ||
2607 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2608 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2610 "6106 NVMET prep LS wqe: No ndlp: "
2611 "NPORT x%x oxid x%x ste %d\n",
2612 ctxp->sid, ctxp->oxid, ctxp->state);
2613 goto nvme_wqe_free_wqeq_exit;
2614 }
2615 ctxp->wqeq = nvmewqe;
2616
2617
2618 nvmewqe->ndlp = lpfc_nlp_get(ndlp);
2619 if (!nvmewqe->ndlp)
2620 goto nvme_wqe_free_wqeq_exit;
2621 nvmewqe->context_un.axchg = ctxp;
2622
2623 wqe = &nvmewqe->wqe;
2624 memset(wqe, 0, sizeof(union lpfc_wqe));
2625
2626
2627 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2628 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2629 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2630 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2631
2632
2633
2634
2635
2636
2637 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2638 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2639 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2640 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2641 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2642
2643
2644 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2645 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2646 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2647
2648
2649 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2650 CMD_XMIT_SEQUENCE64_WQE);
2651 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2652 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2653 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2654
2655
2656 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2657
2658
2659 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2660
2661 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2662
2663
2664 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2665 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2666 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2667 LPFC_WQE_LENLOC_WORD12);
2668 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2669
2670
2671 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2672 LPFC_WQE_CQ_ID_DEFAULT);
2673 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2674 OTHER_COMMAND);
2675
2676
2677 wqe->xmit_sequence.xmit_len = rspsize;
2678
2679 nvmewqe->retry = 1;
2680 nvmewqe->vport = phba->pport;
2681 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2682 nvmewqe->cmd_flag |= LPFC_IO_NVME_LS;
2683
2684
2685 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2686 "6039 Xmit NVMET LS response to remote "
2687 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2688 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2689 rspsize);
2690 return nvmewqe;
2691
2692 nvme_wqe_free_wqeq_exit:
2693 nvmewqe->context_un.axchg = NULL;
2694 nvmewqe->ndlp = NULL;
2695 nvmewqe->bpl_dmabuf = NULL;
2696 lpfc_sli_release_iocbq(phba, nvmewqe);
2697 return NULL;
2698 }
2699
2700
2701 static struct lpfc_iocbq *
2702 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2703 struct lpfc_async_xchg_ctx *ctxp)
2704 {
2705 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2706 struct lpfc_nvmet_tgtport *tgtp;
2707 struct sli4_sge *sgl;
2708 struct lpfc_nodelist *ndlp;
2709 struct lpfc_iocbq *nvmewqe;
2710 struct scatterlist *sgel;
2711 union lpfc_wqe128 *wqe;
2712 struct ulp_bde64 *bde;
2713 dma_addr_t physaddr;
2714 int i, cnt, nsegs;
2715 bool use_pbde = false;
2716 int xc = 1;
2717
2718 if (!lpfc_is_link_up(phba)) {
2719 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2720 "6107 NVMET prep FCP wqe: link err:"
2721 "NPORT x%x oxid x%x ste %d\n",
2722 ctxp->sid, ctxp->oxid, ctxp->state);
2723 return NULL;
2724 }
2725
2726 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2727 if (!ndlp ||
2728 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2729 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2730 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2731 "6108 NVMET prep FCP wqe: no ndlp: "
2732 "NPORT x%x oxid x%x ste %d\n",
2733 ctxp->sid, ctxp->oxid, ctxp->state);
2734 return NULL;
2735 }
2736
2737 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2738 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2739 "6109 NVMET prep FCP wqe: seg cnt err: "
2740 "NPORT x%x oxid x%x ste %d cnt %d\n",
2741 ctxp->sid, ctxp->oxid, ctxp->state,
2742 phba->cfg_nvme_seg_cnt);
2743 return NULL;
2744 }
2745 nsegs = rsp->sg_cnt;
2746
2747 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2748 nvmewqe = ctxp->wqeq;
2749 if (nvmewqe == NULL) {
2750
2751 nvmewqe = ctxp->ctxbuf->iocbq;
2752 if (nvmewqe == NULL) {
2753 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2754 "6110 NVMET prep FCP wqe: No "
2755 "WQE: NPORT x%x oxid x%x ste %d\n",
2756 ctxp->sid, ctxp->oxid, ctxp->state);
2757 return NULL;
2758 }
2759 ctxp->wqeq = nvmewqe;
2760 xc = 0;
2761 nvmewqe->sli4_lxritag = NO_XRI;
2762 nvmewqe->sli4_xritag = NO_XRI;
2763 }
2764
2765
2766 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2767 (ctxp->entry_cnt == 1)) ||
2768 (ctxp->state == LPFC_NVME_STE_DATA)) {
2769 wqe = &nvmewqe->wqe;
2770 } else {
2771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2772 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2773 ctxp->state, ctxp->entry_cnt);
2774 return NULL;
2775 }
2776
2777 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2778 switch (rsp->op) {
2779 case NVMET_FCOP_READDATA:
2780 case NVMET_FCOP_READDATA_RSP:
2781
2782 memcpy(&wqe->words[7],
2783 &lpfc_tsend_cmd_template.words[7],
2784 sizeof(uint32_t) * 5);
2785
2786
2787 sgel = &rsp->sg[0];
2788 physaddr = sg_dma_address(sgel);
2789 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2790 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2791 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2792 wqe->fcp_tsend.bde.addrHigh =
2793 cpu_to_le32(putPaddrHigh(physaddr));
2794
2795
2796 wqe->fcp_tsend.payload_offset_len = 0;
2797
2798
2799 wqe->fcp_tsend.relative_offset = ctxp->offset;
2800
2801
2802 wqe->fcp_tsend.reserved = 0;
2803
2804
2805 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2806 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2807 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2808 nvmewqe->sli4_xritag);
2809
2810
2811
2812
2813 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2814
2815
2816 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2817 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2818
2819
2820 if (!xc)
2821 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2822
2823
2824 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2825
2826
2827 sgl->addr_hi = 0;
2828 sgl->addr_lo = 0;
2829 sgl->word2 = 0;
2830 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2831 sgl->word2 = cpu_to_le32(sgl->word2);
2832 sgl->sge_len = 0;
2833 sgl++;
2834 sgl->addr_hi = 0;
2835 sgl->addr_lo = 0;
2836 sgl->word2 = 0;
2837 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2838 sgl->word2 = cpu_to_le32(sgl->word2);
2839 sgl->sge_len = 0;
2840 sgl++;
2841 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2842 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2843
2844
2845
2846 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2847 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2848 bf_set(wqe_sup,
2849 &wqe->fcp_tsend.wqe_com, 1);
2850 } else {
2851 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2852 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2853 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2854 ((rsp->rsplen >> 2) - 1));
2855 memcpy(&wqe->words[16], rsp->rspaddr,
2856 rsp->rsplen);
2857 }
2858 } else {
2859 atomic_inc(&tgtp->xmt_fcp_read);
2860
2861
2862 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2863 }
2864 break;
2865
2866 case NVMET_FCOP_WRITEDATA:
2867
2868 memcpy(&wqe->words[3],
2869 &lpfc_treceive_cmd_template.words[3],
2870 sizeof(uint32_t) * 9);
2871
2872
2873 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2874 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2875 wqe->fcp_treceive.bde.addrLow = 0;
2876 wqe->fcp_treceive.bde.addrHigh = 0;
2877
2878
2879 wqe->fcp_treceive.relative_offset = ctxp->offset;
2880
2881
2882 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2883 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2884 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2885 nvmewqe->sli4_xritag);
2886
2887
2888
2889
2890 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2891
2892
2893 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2894 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2895
2896
2897 if (!xc)
2898 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2899
2900
2901 if (nsegs == 1 && phba->cfg_enable_pbde) {
2902 use_pbde = true;
2903
2904 } else {
2905
2906 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2907 }
2908
2909
2910 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2911
2912
2913 sgl->addr_hi = 0;
2914 sgl->addr_lo = 0;
2915 sgl->word2 = 0;
2916 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2917 sgl->word2 = cpu_to_le32(sgl->word2);
2918 sgl->sge_len = 0;
2919 sgl++;
2920 sgl->addr_hi = 0;
2921 sgl->addr_lo = 0;
2922 sgl->word2 = 0;
2923 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2924 sgl->word2 = cpu_to_le32(sgl->word2);
2925 sgl->sge_len = 0;
2926 sgl++;
2927 atomic_inc(&tgtp->xmt_fcp_write);
2928 break;
2929
2930 case NVMET_FCOP_RSP:
2931
2932 memcpy(&wqe->words[4],
2933 &lpfc_trsp_cmd_template.words[4],
2934 sizeof(uint32_t) * 8);
2935
2936
2937 physaddr = rsp->rspdma;
2938 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2939 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2940 wqe->fcp_trsp.bde.addrLow =
2941 cpu_to_le32(putPaddrLow(physaddr));
2942 wqe->fcp_trsp.bde.addrHigh =
2943 cpu_to_le32(putPaddrHigh(physaddr));
2944
2945
2946 wqe->fcp_trsp.response_len = rsp->rsplen;
2947
2948
2949 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2950 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2951 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2952 nvmewqe->sli4_xritag);
2953
2954
2955
2956
2957 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2958
2959
2960 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2961 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2962
2963
2964 if (xc)
2965 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2966
2967
2968
2969 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2970
2971 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2972 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2973 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2974 ((rsp->rsplen >> 2) - 1));
2975 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2976 }
2977
2978
2979 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2980
2981
2982 nsegs = 0;
2983 sgl->word2 = 0;
2984 atomic_inc(&tgtp->xmt_fcp_rsp);
2985 break;
2986
2987 default:
2988 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2989 "6064 Unknown Rsp Op %d\n",
2990 rsp->op);
2991 return NULL;
2992 }
2993
2994 nvmewqe->retry = 1;
2995 nvmewqe->vport = phba->pport;
2996 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2997 nvmewqe->ndlp = ndlp;
2998
2999 for_each_sg(rsp->sg, sgel, nsegs, i) {
3000 physaddr = sg_dma_address(sgel);
3001 cnt = sg_dma_len(sgel);
3002 sgl->addr_hi = putPaddrHigh(physaddr);
3003 sgl->addr_lo = putPaddrLow(physaddr);
3004 sgl->word2 = 0;
3005 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3006 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3007 if ((i+1) == rsp->sg_cnt)
3008 bf_set(lpfc_sli4_sge_last, sgl, 1);
3009 sgl->word2 = cpu_to_le32(sgl->word2);
3010 sgl->sge_len = cpu_to_le32(cnt);
3011 sgl++;
3012 ctxp->offset += cnt;
3013 }
3014
3015 bde = (struct ulp_bde64 *)&wqe->words[13];
3016 if (use_pbde) {
3017
3018 sgl--;
3019
3020
3021 bde->addrLow = sgl->addr_lo;
3022 bde->addrHigh = sgl->addr_hi;
3023 bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
3024 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3025 bde->tus.w = cpu_to_le32(bde->tus.w);
3026 } else {
3027 memset(bde, 0, sizeof(struct ulp_bde64));
3028 }
3029 ctxp->state = LPFC_NVME_STE_DATA;
3030 ctxp->entry_cnt++;
3031 return nvmewqe;
3032 }
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044 static void
3045 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3046 struct lpfc_iocbq *rspwqe)
3047 {
3048 struct lpfc_async_xchg_ctx *ctxp;
3049 struct lpfc_nvmet_tgtport *tgtp;
3050 uint32_t result;
3051 unsigned long flags;
3052 bool released = false;
3053 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3054
3055 ctxp = cmdwqe->context_un.axchg;
3056 result = wcqe->parameter;
3057
3058 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3059 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3060 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3061
3062 spin_lock_irqsave(&ctxp->ctxlock, flags);
3063 ctxp->state = LPFC_NVME_STE_DONE;
3064
3065
3066
3067
3068 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3069 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3070 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3071 list_del_init(&ctxp->list);
3072 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3073 released = true;
3074 }
3075 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3076 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3077 atomic_inc(&tgtp->xmt_abort_rsp);
3078
3079 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3080 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3081 "WCQE: %08x %08x %08x %08x\n",
3082 ctxp->oxid, ctxp->flag, released,
3083 wcqe->word0, wcqe->total_data_placed,
3084 result, wcqe->word3);
3085
3086 cmdwqe->rsp_dmabuf = NULL;
3087 cmdwqe->bpl_dmabuf = NULL;
3088
3089
3090
3091
3092 if (released)
3093 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3094
3095
3096 lpfc_sli_release_iocbq(phba, cmdwqe);
3097
3098
3099
3100
3101
3102 }
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114 static void
3115 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3116 struct lpfc_iocbq *rspwqe)
3117 {
3118 struct lpfc_async_xchg_ctx *ctxp;
3119 struct lpfc_nvmet_tgtport *tgtp;
3120 unsigned long flags;
3121 uint32_t result;
3122 bool released = false;
3123 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3124
3125 ctxp = cmdwqe->context_un.axchg;
3126 result = wcqe->parameter;
3127
3128 if (!ctxp) {
3129
3130 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3131 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3132 wcqe->word0, wcqe->total_data_placed,
3133 result, wcqe->word3);
3134 return;
3135 }
3136
3137 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3138 spin_lock_irqsave(&ctxp->ctxlock, flags);
3139 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3140 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3141
3142
3143 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3144 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3145 "6112 ABTS Wrong state:%d oxid x%x\n",
3146 ctxp->state, ctxp->oxid);
3147 }
3148
3149
3150
3151
3152 ctxp->state = LPFC_NVME_STE_DONE;
3153 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3154 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3155 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3156 list_del_init(&ctxp->list);
3157 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3158 released = true;
3159 }
3160 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3161 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3162 atomic_inc(&tgtp->xmt_abort_rsp);
3163
3164 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3165 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3166 "WCQE: %08x %08x %08x %08x\n",
3167 ctxp->oxid, ctxp->flag, released,
3168 wcqe->word0, wcqe->total_data_placed,
3169 result, wcqe->word3);
3170
3171 cmdwqe->rsp_dmabuf = NULL;
3172 cmdwqe->bpl_dmabuf = NULL;
3173
3174
3175
3176
3177 if (released)
3178 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3179
3180
3181
3182
3183
3184 }
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196 static void
3197 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3198 struct lpfc_iocbq *rspwqe)
3199 {
3200 struct lpfc_async_xchg_ctx *ctxp;
3201 struct lpfc_nvmet_tgtport *tgtp;
3202 uint32_t result;
3203 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
3204
3205 ctxp = cmdwqe->context_un.axchg;
3206 result = wcqe->parameter;
3207
3208 if (phba->nvmet_support) {
3209 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3210 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3211 }
3212
3213 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3214 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3215 ctxp, wcqe->word0, wcqe->total_data_placed,
3216 result, wcqe->word3);
3217
3218 if (!ctxp) {
3219 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3220 "6415 NVMET LS Abort No ctx: WCQE: "
3221 "%08x %08x %08x %08x\n",
3222 wcqe->word0, wcqe->total_data_placed,
3223 result, wcqe->word3);
3224
3225 lpfc_sli_release_iocbq(phba, cmdwqe);
3226 return;
3227 }
3228
3229 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3231 "6416 NVMET LS abort cmpl state mismatch: "
3232 "oxid x%x: %d %d\n",
3233 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3234 }
3235
3236 cmdwqe->rsp_dmabuf = NULL;
3237 cmdwqe->bpl_dmabuf = NULL;
3238 lpfc_sli_release_iocbq(phba, cmdwqe);
3239 kfree(ctxp);
3240 }
3241
3242 static int
3243 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3244 struct lpfc_async_xchg_ctx *ctxp,
3245 uint32_t sid, uint16_t xri)
3246 {
3247 struct lpfc_nvmet_tgtport *tgtp = NULL;
3248 struct lpfc_iocbq *abts_wqeq;
3249 union lpfc_wqe128 *wqe_abts;
3250 struct lpfc_nodelist *ndlp;
3251
3252 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3253 "6067 ABTS: sid %x xri x%x/x%x\n",
3254 sid, xri, ctxp->wqeq->sli4_xritag);
3255
3256 if (phba->nvmet_support && phba->targetport)
3257 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3258
3259 ndlp = lpfc_findnode_did(phba->pport, sid);
3260 if (!ndlp ||
3261 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3262 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3263 if (tgtp)
3264 atomic_inc(&tgtp->xmt_abort_rsp_error);
3265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3266 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3267 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3268
3269
3270 return 0;
3271 }
3272
3273 abts_wqeq = ctxp->wqeq;
3274 wqe_abts = &abts_wqeq->wqe;
3275
3276
3277
3278
3279
3280 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3281
3282
3283 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3284 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3285 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3286 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3287 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3288
3289
3290 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3291 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3292 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3293 abts_wqeq->sli4_xritag);
3294
3295
3296 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3297 CMD_XMIT_SEQUENCE64_WQE);
3298 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3299 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3300 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3301
3302
3303 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3304
3305
3306 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3307
3308 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3309
3310
3311 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3312 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3313 LPFC_WQE_LENLOC_WORD12);
3314 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3315 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3316
3317
3318 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3319 LPFC_WQE_CQ_ID_DEFAULT);
3320 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3321 OTHER_COMMAND);
3322
3323 abts_wqeq->vport = phba->pport;
3324 abts_wqeq->ndlp = ndlp;
3325 abts_wqeq->context_un.axchg = ctxp;
3326 abts_wqeq->bpl_dmabuf = NULL;
3327 abts_wqeq->num_bdes = 0;
3328
3329 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3330 abts_wqeq->iocb.ulpLe = 1;
3331
3332 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3333 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3334 xri, abts_wqeq->iotag);
3335 return 1;
3336 }
3337
3338 static int
3339 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3340 struct lpfc_async_xchg_ctx *ctxp,
3341 uint32_t sid, uint16_t xri)
3342 {
3343 struct lpfc_nvmet_tgtport *tgtp;
3344 struct lpfc_iocbq *abts_wqeq;
3345 struct lpfc_nodelist *ndlp;
3346 unsigned long flags;
3347 bool ia;
3348 int rc;
3349
3350 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3351 if (!ctxp->wqeq) {
3352 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3353 ctxp->wqeq->hba_wqidx = 0;
3354 }
3355
3356 ndlp = lpfc_findnode_did(phba->pport, sid);
3357 if (!ndlp ||
3358 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3359 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3360 atomic_inc(&tgtp->xmt_abort_rsp_error);
3361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3362 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3363 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3364
3365
3366 spin_lock_irqsave(&ctxp->ctxlock, flags);
3367 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3368 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3369 return 0;
3370 }
3371
3372
3373 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3374 spin_lock_irqsave(&ctxp->ctxlock, flags);
3375 if (!ctxp->abort_wqeq) {
3376 atomic_inc(&tgtp->xmt_abort_rsp_error);
3377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3378 "6161 ABORT failed: No wqeqs: "
3379 "xri: x%x\n", ctxp->oxid);
3380
3381 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3382 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3383 return 0;
3384 }
3385 abts_wqeq = ctxp->abort_wqeq;
3386 ctxp->state = LPFC_NVME_STE_ABORT;
3387 ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
3388 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3389
3390
3391 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3392 "6162 ABORT Request to rport DID x%06x "
3393 "for xri x%x x%x\n",
3394 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3395
3396
3397
3398
3399 spin_lock_irqsave(&phba->hbalock, flags);
3400
3401 if (phba->hba_flag & HBA_IOQ_FLUSH) {
3402 spin_unlock_irqrestore(&phba->hbalock, flags);
3403 atomic_inc(&tgtp->xmt_abort_rsp_error);
3404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3405 "6163 Driver in reset cleanup - flushing "
3406 "NVME Req now. hba_flag x%x oxid x%x\n",
3407 phba->hba_flag, ctxp->oxid);
3408 lpfc_sli_release_iocbq(phba, abts_wqeq);
3409 spin_lock_irqsave(&ctxp->ctxlock, flags);
3410 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3411 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3412 return 0;
3413 }
3414
3415
3416 if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
3417 spin_unlock_irqrestore(&phba->hbalock, flags);
3418 atomic_inc(&tgtp->xmt_abort_rsp_error);
3419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3420 "6164 Outstanding NVME I/O Abort Request "
3421 "still pending on oxid x%x\n",
3422 ctxp->oxid);
3423 lpfc_sli_release_iocbq(phba, abts_wqeq);
3424 spin_lock_irqsave(&ctxp->ctxlock, flags);
3425 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3426 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3427 return 0;
3428 }
3429
3430
3431 abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
3432
3433 lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
3434 abts_wqeq->iotag, CLASS3,
3435 LPFC_WQE_CQ_ID_DEFAULT, ia, true);
3436
3437
3438 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3439 abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3440 abts_wqeq->cmd_flag |= LPFC_IO_NVME;
3441 abts_wqeq->context_un.axchg = ctxp;
3442 abts_wqeq->vport = phba->pport;
3443 if (!ctxp->hdwq)
3444 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3445
3446 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3447 spin_unlock_irqrestore(&phba->hbalock, flags);
3448 if (rc == WQE_SUCCESS) {
3449 atomic_inc(&tgtp->xmt_abort_sol);
3450 return 0;
3451 }
3452
3453 atomic_inc(&tgtp->xmt_abort_rsp_error);
3454 spin_lock_irqsave(&ctxp->ctxlock, flags);
3455 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3456 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3457 lpfc_sli_release_iocbq(phba, abts_wqeq);
3458 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3459 "6166 Failed ABORT issue_wqe with status x%x "
3460 "for oxid x%x.\n",
3461 rc, ctxp->oxid);
3462 return 1;
3463 }
3464
3465 static int
3466 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3467 struct lpfc_async_xchg_ctx *ctxp,
3468 uint32_t sid, uint16_t xri)
3469 {
3470 struct lpfc_nvmet_tgtport *tgtp;
3471 struct lpfc_iocbq *abts_wqeq;
3472 unsigned long flags;
3473 bool released = false;
3474 int rc;
3475
3476 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3477 if (!ctxp->wqeq) {
3478 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3479 ctxp->wqeq->hba_wqidx = 0;
3480 }
3481
3482 if (ctxp->state == LPFC_NVME_STE_FREE) {
3483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3484 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3485 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3486 rc = WQE_BUSY;
3487 goto aerr;
3488 }
3489 ctxp->state = LPFC_NVME_STE_ABORT;
3490 ctxp->entry_cnt++;
3491 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3492 if (rc == 0)
3493 goto aerr;
3494
3495 spin_lock_irqsave(&phba->hbalock, flags);
3496 abts_wqeq = ctxp->wqeq;
3497 abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3498 abts_wqeq->cmd_flag |= LPFC_IO_NVMET;
3499 if (!ctxp->hdwq)
3500 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3501
3502 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3503 spin_unlock_irqrestore(&phba->hbalock, flags);
3504 if (rc == WQE_SUCCESS) {
3505 return 0;
3506 }
3507
3508 aerr:
3509 spin_lock_irqsave(&ctxp->ctxlock, flags);
3510 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3511 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3512 list_del_init(&ctxp->list);
3513 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3514 released = true;
3515 }
3516 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3517 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3518
3519 atomic_inc(&tgtp->xmt_abort_rsp_error);
3520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3521 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3522 "(%x)\n",
3523 ctxp->oxid, rc, released);
3524 if (released)
3525 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3526 return 1;
3527 }
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537 int
3538 lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3539 struct lpfc_async_xchg_ctx *ctxp,
3540 uint32_t sid, uint16_t xri)
3541 {
3542 struct lpfc_nvmet_tgtport *tgtp = NULL;
3543 struct lpfc_iocbq *abts_wqeq;
3544 unsigned long flags;
3545 int rc;
3546
3547 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3548 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3549 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3550 ctxp->entry_cnt++;
3551 } else {
3552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3553 "6418 NVMET LS abort state mismatch "
3554 "IO x%x: %d %d\n",
3555 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3556 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3557 }
3558
3559 if (phba->nvmet_support && phba->targetport)
3560 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3561
3562 if (!ctxp->wqeq) {
3563
3564 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3565 if (!ctxp->wqeq) {
3566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3567 "6068 Abort failed: No wqeqs: "
3568 "xri: x%x\n", xri);
3569
3570 kfree(ctxp);
3571 return 0;
3572 }
3573 }
3574 abts_wqeq = ctxp->wqeq;
3575
3576 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3577 rc = WQE_BUSY;
3578 goto out;
3579 }
3580
3581 spin_lock_irqsave(&phba->hbalock, flags);
3582 abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3583 abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS;
3584 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3585 spin_unlock_irqrestore(&phba->hbalock, flags);
3586 if (rc == WQE_SUCCESS) {
3587 if (tgtp)
3588 atomic_inc(&tgtp->xmt_abort_unsol);
3589 return 0;
3590 }
3591 out:
3592 if (tgtp)
3593 atomic_inc(&tgtp->xmt_abort_rsp_error);
3594 abts_wqeq->rsp_dmabuf = NULL;
3595 abts_wqeq->bpl_dmabuf = NULL;
3596 lpfc_sli_release_iocbq(phba, abts_wqeq);
3597 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3598 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3599 return 1;
3600 }
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611 void
3612 lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3613 {
3614 u32 ndlp_has_hh;
3615 struct lpfc_nvmet_tgtport *tgtp;
3616
3617 lpfc_printf_log(phba, KERN_INFO,
3618 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3619 "6203 Invalidating hosthandle x%px\n",
3620 ndlp);
3621
3622 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3623 atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3624
3625 spin_lock_irq(&ndlp->lock);
3626 ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
3627 spin_unlock_irq(&ndlp->lock);
3628
3629
3630
3631
3632
3633 if (!ndlp_has_hh) {
3634 lpfc_printf_log(phba, KERN_INFO,
3635 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3636 "6204 Skip invalidate on node x%px DID x%x\n",
3637 ndlp, ndlp->nlp_DID);
3638 return;
3639 }
3640
3641 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3642
3643 nvmet_fc_invalidate_host(phba->targetport, ndlp);
3644 #endif
3645 }