0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/pci.h>
0024 #include <linux/slab.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/export.h>
0027 #include <linux/delay.h>
0028 #include <asm/unaligned.h>
0029 #include <linux/t10-pi.h>
0030 #include <linux/crc-t10dif.h>
0031 #include <linux/blk-cgroup.h>
0032 #include <net/checksum.h>
0033
0034 #include <scsi/scsi.h>
0035 #include <scsi/scsi_device.h>
0036 #include <scsi/scsi_eh.h>
0037 #include <scsi/scsi_host.h>
0038 #include <scsi/scsi_tcq.h>
0039 #include <scsi/scsi_transport_fc.h>
0040
0041 #include "lpfc_version.h"
0042 #include "lpfc_hw4.h"
0043 #include "lpfc_hw.h"
0044 #include "lpfc_sli.h"
0045 #include "lpfc_sli4.h"
0046 #include "lpfc_nl.h"
0047 #include "lpfc_disc.h"
0048 #include "lpfc.h"
0049 #include "lpfc_scsi.h"
0050 #include "lpfc_logmsg.h"
0051 #include "lpfc_crtn.h"
0052 #include "lpfc_vport.h"
0053
0054 #define LPFC_RESET_WAIT 2
0055 #define LPFC_ABORT_WAIT 2
0056
0057 static char *dif_op_str[] = {
0058 "PROT_NORMAL",
0059 "PROT_READ_INSERT",
0060 "PROT_WRITE_STRIP",
0061 "PROT_READ_STRIP",
0062 "PROT_WRITE_INSERT",
0063 "PROT_READ_PASS",
0064 "PROT_WRITE_PASS",
0065 };
0066
0067 struct scsi_dif_tuple {
0068 __be16 guard_tag;
0069 __be16 app_tag;
0070 __be32 ref_tag;
0071 };
0072
0073 static struct lpfc_rport_data *
0074 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
0075 {
0076 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
0077
0078 if (vport->phba->cfg_fof)
0079 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
0080 else
0081 return (struct lpfc_rport_data *)sdev->hostdata;
0082 }
0083
0084 static void
0085 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
0086 static void
0087 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
0088 static int
0089 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 static void
0100 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
0101 struct lpfc_io_buf *lpfc_cmd)
0102 {
0103 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
0104 if (sgl) {
0105 sgl += 1;
0106 sgl->word2 = le32_to_cpu(sgl->word2);
0107 bf_set(lpfc_sli4_sge_last, sgl, 1);
0108 sgl->word2 = cpu_to_le32(sgl->word2);
0109 }
0110 }
0111
0112 #define LPFC_INVALID_REFTAG ((u32)-1)
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 static void
0123 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
0124 {
0125 struct lpfc_hba *phba = vport->phba;
0126 struct lpfc_rport_data *rdata;
0127 struct lpfc_nodelist *pnode;
0128 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
0129 unsigned long flags;
0130 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
0131 unsigned long latency;
0132 int i;
0133
0134 if (!vport->stat_data_enabled ||
0135 vport->stat_data_blocked ||
0136 (cmd->result))
0137 return;
0138
0139 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
0140 rdata = lpfc_cmd->rdata;
0141 pnode = rdata->pnode;
0142
0143 spin_lock_irqsave(shost->host_lock, flags);
0144 if (!pnode ||
0145 !pnode->lat_data ||
0146 (phba->bucket_type == LPFC_NO_BUCKET)) {
0147 spin_unlock_irqrestore(shost->host_lock, flags);
0148 return;
0149 }
0150
0151 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
0152 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
0153 phba->bucket_step;
0154
0155 if (i < 0)
0156 i = 0;
0157 else if (i >= LPFC_MAX_BUCKET_COUNT)
0158 i = LPFC_MAX_BUCKET_COUNT - 1;
0159 } else {
0160 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
0161 if (latency <= (phba->bucket_base +
0162 ((1<<i)*phba->bucket_step)))
0163 break;
0164 }
0165
0166 pnode->lat_data[i].cmd_count++;
0167 spin_unlock_irqrestore(shost->host_lock, flags);
0168 }
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181 void
0182 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
0183 {
0184 unsigned long flags;
0185 uint32_t evt_posted;
0186 unsigned long expires;
0187
0188 spin_lock_irqsave(&phba->hbalock, flags);
0189 atomic_inc(&phba->num_rsrc_err);
0190 phba->last_rsrc_error_time = jiffies;
0191
0192 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
0193 if (time_after(expires, jiffies)) {
0194 spin_unlock_irqrestore(&phba->hbalock, flags);
0195 return;
0196 }
0197
0198 phba->last_ramp_down_time = jiffies;
0199
0200 spin_unlock_irqrestore(&phba->hbalock, flags);
0201
0202 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
0203 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
0204 if (!evt_posted)
0205 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
0206 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
0207
0208 if (!evt_posted)
0209 lpfc_worker_wake_up(phba);
0210 return;
0211 }
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221 void
0222 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
0223 {
0224 struct lpfc_vport **vports;
0225 struct Scsi_Host *shost;
0226 struct scsi_device *sdev;
0227 unsigned long new_queue_depth;
0228 unsigned long num_rsrc_err, num_cmd_success;
0229 int i;
0230
0231 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
0232 num_cmd_success = atomic_read(&phba->num_cmd_success);
0233
0234
0235
0236
0237
0238
0239 if (num_rsrc_err == 0)
0240 return;
0241
0242 vports = lpfc_create_vport_work_array(phba);
0243 if (vports != NULL)
0244 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
0245 shost = lpfc_shost_from_vport(vports[i]);
0246 shost_for_each_device(sdev, shost) {
0247 new_queue_depth =
0248 sdev->queue_depth * num_rsrc_err /
0249 (num_rsrc_err + num_cmd_success);
0250 if (!new_queue_depth)
0251 new_queue_depth = sdev->queue_depth - 1;
0252 else
0253 new_queue_depth = sdev->queue_depth -
0254 new_queue_depth;
0255 scsi_change_queue_depth(sdev, new_queue_depth);
0256 }
0257 }
0258 lpfc_destroy_vport_work_array(phba, vports);
0259 atomic_set(&phba->num_rsrc_err, 0);
0260 atomic_set(&phba->num_cmd_success, 0);
0261 }
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271 void
0272 lpfc_scsi_dev_block(struct lpfc_hba *phba)
0273 {
0274 struct lpfc_vport **vports;
0275 struct Scsi_Host *shost;
0276 struct scsi_device *sdev;
0277 struct fc_rport *rport;
0278 int i;
0279
0280 vports = lpfc_create_vport_work_array(phba);
0281 if (vports != NULL)
0282 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
0283 shost = lpfc_shost_from_vport(vports[i]);
0284 shost_for_each_device(sdev, shost) {
0285 rport = starget_to_rport(scsi_target(sdev));
0286 fc_remote_port_delete(rport);
0287 }
0288 }
0289 lpfc_destroy_vport_work_array(phba, vports);
0290 }
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308 static int
0309 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
0310 {
0311 struct lpfc_hba *phba = vport->phba;
0312 struct lpfc_io_buf *psb;
0313 struct ulp_bde64 *bpl;
0314 IOCB_t *iocb;
0315 dma_addr_t pdma_phys_fcp_cmd;
0316 dma_addr_t pdma_phys_fcp_rsp;
0317 dma_addr_t pdma_phys_sgl;
0318 uint16_t iotag;
0319 int bcnt, bpl_size;
0320
0321 bpl_size = phba->cfg_sg_dma_buf_size -
0322 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
0323
0324 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
0325 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
0326 num_to_alloc, phba->cfg_sg_dma_buf_size,
0327 (int)sizeof(struct fcp_cmnd),
0328 (int)sizeof(struct fcp_rsp), bpl_size);
0329
0330 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
0331 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
0332 if (!psb)
0333 break;
0334
0335
0336
0337
0338
0339
0340
0341 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
0342 GFP_KERNEL, &psb->dma_handle);
0343 if (!psb->data) {
0344 kfree(psb);
0345 break;
0346 }
0347
0348
0349
0350 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
0351 if (iotag == 0) {
0352 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
0353 psb->data, psb->dma_handle);
0354 kfree(psb);
0355 break;
0356 }
0357 psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
0358
0359 psb->fcp_cmnd = psb->data;
0360 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
0361 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
0362 sizeof(struct fcp_rsp);
0363
0364
0365 bpl = (struct ulp_bde64 *)psb->dma_sgl;
0366 pdma_phys_fcp_cmd = psb->dma_handle;
0367 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
0368 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
0369 sizeof(struct fcp_rsp);
0370
0371
0372
0373
0374
0375
0376 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
0377 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
0378 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
0379 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
0380 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
0381
0382
0383 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
0384 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
0385 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
0386 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
0387 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
0388
0389
0390
0391
0392
0393 iocb = &psb->cur_iocbq.iocb;
0394 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
0395 if ((phba->sli_rev == 3) &&
0396 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
0397
0398 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
0399 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
0400 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
0401 unsli3.fcp_ext.icd);
0402 iocb->un.fcpi64.bdl.addrHigh = 0;
0403 iocb->ulpBdeCount = 0;
0404 iocb->ulpLe = 0;
0405
0406 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
0407 BUFF_TYPE_BDE_64;
0408 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
0409 sizeof(struct fcp_rsp);
0410 iocb->unsli3.fcp_ext.rbde.addrLow =
0411 putPaddrLow(pdma_phys_fcp_rsp);
0412 iocb->unsli3.fcp_ext.rbde.addrHigh =
0413 putPaddrHigh(pdma_phys_fcp_rsp);
0414 } else {
0415 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
0416 iocb->un.fcpi64.bdl.bdeSize =
0417 (2 * sizeof(struct ulp_bde64));
0418 iocb->un.fcpi64.bdl.addrLow =
0419 putPaddrLow(pdma_phys_sgl);
0420 iocb->un.fcpi64.bdl.addrHigh =
0421 putPaddrHigh(pdma_phys_sgl);
0422 iocb->ulpBdeCount = 1;
0423 iocb->ulpLe = 1;
0424 }
0425 iocb->ulpClass = CLASS3;
0426 psb->status = IOSTAT_SUCCESS;
0427
0428 psb->cur_iocbq.io_buf = psb;
0429 spin_lock_init(&psb->buf_lock);
0430 lpfc_release_scsi_buf_s3(phba, psb);
0431
0432 }
0433
0434 return bcnt;
0435 }
0436
0437
0438
0439
0440
0441
0442
0443
0444 void
0445 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
0446 {
0447 struct lpfc_hba *phba = vport->phba;
0448 struct lpfc_io_buf *psb, *next_psb;
0449 struct lpfc_sli4_hdw_queue *qp;
0450 unsigned long iflag = 0;
0451 int idx;
0452
0453 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
0454 return;
0455
0456 spin_lock_irqsave(&phba->hbalock, iflag);
0457 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
0458 qp = &phba->sli4_hba.hdwq[idx];
0459
0460 spin_lock(&qp->abts_io_buf_list_lock);
0461 list_for_each_entry_safe(psb, next_psb,
0462 &qp->lpfc_abts_io_buf_list, list) {
0463 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
0464 continue;
0465
0466 if (psb->rdata && psb->rdata->pnode &&
0467 psb->rdata->pnode->vport == vport)
0468 psb->rdata = NULL;
0469 }
0470 spin_unlock(&qp->abts_io_buf_list_lock);
0471 }
0472 spin_unlock_irqrestore(&phba->hbalock, iflag);
0473 }
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484 void
0485 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
0486 struct sli4_wcqe_xri_aborted *axri, int idx)
0487 {
0488 u16 xri = 0;
0489 u16 rxid = 0;
0490 struct lpfc_io_buf *psb, *next_psb;
0491 struct lpfc_sli4_hdw_queue *qp;
0492 unsigned long iflag = 0;
0493 struct lpfc_iocbq *iocbq;
0494 int i;
0495 struct lpfc_nodelist *ndlp;
0496 int rrq_empty = 0;
0497 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
0498 struct scsi_cmnd *cmd;
0499 int offline = 0;
0500
0501 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
0502 return;
0503 offline = pci_channel_offline(phba->pcidev);
0504 if (!offline) {
0505 xri = bf_get(lpfc_wcqe_xa_xri, axri);
0506 rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
0507 }
0508 qp = &phba->sli4_hba.hdwq[idx];
0509 spin_lock_irqsave(&phba->hbalock, iflag);
0510 spin_lock(&qp->abts_io_buf_list_lock);
0511 list_for_each_entry_safe(psb, next_psb,
0512 &qp->lpfc_abts_io_buf_list, list) {
0513 if (offline)
0514 xri = psb->cur_iocbq.sli4_xritag;
0515 if (psb->cur_iocbq.sli4_xritag == xri) {
0516 list_del_init(&psb->list);
0517 psb->flags &= ~LPFC_SBUF_XBUSY;
0518 psb->status = IOSTAT_SUCCESS;
0519 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
0520 qp->abts_nvme_io_bufs--;
0521 spin_unlock(&qp->abts_io_buf_list_lock);
0522 spin_unlock_irqrestore(&phba->hbalock, iflag);
0523 if (!offline) {
0524 lpfc_sli4_nvme_xri_aborted(phba, axri,
0525 psb);
0526 return;
0527 }
0528 lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
0529 spin_lock_irqsave(&phba->hbalock, iflag);
0530 spin_lock(&qp->abts_io_buf_list_lock);
0531 continue;
0532 }
0533 qp->abts_scsi_io_bufs--;
0534 spin_unlock(&qp->abts_io_buf_list_lock);
0535
0536 if (psb->rdata && psb->rdata->pnode)
0537 ndlp = psb->rdata->pnode;
0538 else
0539 ndlp = NULL;
0540
0541 rrq_empty = list_empty(&phba->active_rrq_list);
0542 spin_unlock_irqrestore(&phba->hbalock, iflag);
0543 if (ndlp && !offline) {
0544 lpfc_set_rrq_active(phba, ndlp,
0545 psb->cur_iocbq.sli4_lxritag, rxid, 1);
0546 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
0547 }
0548
0549 if (phba->cfg_fcp_wait_abts_rsp || offline) {
0550 spin_lock_irqsave(&psb->buf_lock, iflag);
0551 cmd = psb->pCmd;
0552 psb->pCmd = NULL;
0553 spin_unlock_irqrestore(&psb->buf_lock, iflag);
0554
0555
0556
0557
0558 if (cmd)
0559 scsi_done(cmd);
0560
0561
0562
0563
0564
0565 spin_lock_irqsave(&psb->buf_lock, iflag);
0566 psb->cur_iocbq.cmd_flag &=
0567 ~LPFC_DRIVER_ABORTED;
0568 if (psb->waitq)
0569 wake_up(psb->waitq);
0570 spin_unlock_irqrestore(&psb->buf_lock, iflag);
0571 }
0572
0573 lpfc_release_scsi_buf_s4(phba, psb);
0574 if (rrq_empty)
0575 lpfc_worker_wake_up(phba);
0576 if (!offline)
0577 return;
0578 spin_lock_irqsave(&phba->hbalock, iflag);
0579 spin_lock(&qp->abts_io_buf_list_lock);
0580 continue;
0581 }
0582 }
0583 spin_unlock(&qp->abts_io_buf_list_lock);
0584 if (!offline) {
0585 for (i = 1; i <= phba->sli.last_iotag; i++) {
0586 iocbq = phba->sli.iocbq_lookup[i];
0587
0588 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
0589 (iocbq->cmd_flag & LPFC_IO_LIBDFC))
0590 continue;
0591 if (iocbq->sli4_xritag != xri)
0592 continue;
0593 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
0594 psb->flags &= ~LPFC_SBUF_XBUSY;
0595 spin_unlock_irqrestore(&phba->hbalock, iflag);
0596 if (!list_empty(&pring->txq))
0597 lpfc_worker_wake_up(phba);
0598 return;
0599 }
0600 }
0601 spin_unlock_irqrestore(&phba->hbalock, iflag);
0602 }
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617 static struct lpfc_io_buf *
0618 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
0619 struct scsi_cmnd *cmnd)
0620 {
0621 struct lpfc_io_buf *lpfc_cmd = NULL;
0622 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
0623 unsigned long iflag = 0;
0624
0625 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
0626 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
0627 list);
0628 if (!lpfc_cmd) {
0629 spin_lock(&phba->scsi_buf_list_put_lock);
0630 list_splice(&phba->lpfc_scsi_buf_list_put,
0631 &phba->lpfc_scsi_buf_list_get);
0632 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
0633 list_remove_head(scsi_buf_list_get, lpfc_cmd,
0634 struct lpfc_io_buf, list);
0635 spin_unlock(&phba->scsi_buf_list_put_lock);
0636 }
0637 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
0638
0639 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
0640 atomic_inc(&ndlp->cmd_pending);
0641 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
0642 }
0643 return lpfc_cmd;
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658 static struct lpfc_io_buf *
0659 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
0660 struct scsi_cmnd *cmnd)
0661 {
0662 struct lpfc_io_buf *lpfc_cmd;
0663 struct lpfc_sli4_hdw_queue *qp;
0664 struct sli4_sge *sgl;
0665 dma_addr_t pdma_phys_fcp_rsp;
0666 dma_addr_t pdma_phys_fcp_cmd;
0667 uint32_t cpu, idx;
0668 int tag;
0669 struct fcp_cmd_rsp_buf *tmp = NULL;
0670
0671 cpu = raw_smp_processor_id();
0672 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
0673 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
0674 idx = blk_mq_unique_tag_to_hwq(tag);
0675 } else {
0676 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
0677 }
0678
0679 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
0680 !phba->cfg_xri_rebalancing);
0681 if (!lpfc_cmd) {
0682 qp = &phba->sli4_hba.hdwq[idx];
0683 qp->empty_io_bufs++;
0684 return NULL;
0685 }
0686
0687
0688
0689
0690 lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
0691 lpfc_cmd->prot_seg_cnt = 0;
0692 lpfc_cmd->seg_cnt = 0;
0693 lpfc_cmd->timeout = 0;
0694 lpfc_cmd->flags = 0;
0695 lpfc_cmd->start_time = jiffies;
0696 lpfc_cmd->waitq = NULL;
0697 lpfc_cmd->cpu = cpu;
0698 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0699 lpfc_cmd->prot_data_type = 0;
0700 #endif
0701 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
0702 if (!tmp) {
0703 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
0704 return NULL;
0705 }
0706
0707 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
0708 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
0709
0710
0711
0712
0713
0714
0715 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
0716 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
0717 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
0718 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
0719 sgl->word2 = le32_to_cpu(sgl->word2);
0720 bf_set(lpfc_sli4_sge_last, sgl, 0);
0721 sgl->word2 = cpu_to_le32(sgl->word2);
0722 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
0723 sgl++;
0724
0725
0726 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
0727 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
0728 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
0729 sgl->word2 = le32_to_cpu(sgl->word2);
0730 bf_set(lpfc_sli4_sge_last, sgl, 1);
0731 sgl->word2 = cpu_to_le32(sgl->word2);
0732 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
0733
0734 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
0735 atomic_inc(&ndlp->cmd_pending);
0736 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
0737 }
0738 return lpfc_cmd;
0739 }
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 static struct lpfc_io_buf*
0754 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
0755 struct scsi_cmnd *cmnd)
0756 {
0757 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
0758 }
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768 static void
0769 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
0770 {
0771 unsigned long iflag = 0;
0772
0773 psb->seg_cnt = 0;
0774 psb->prot_seg_cnt = 0;
0775
0776 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
0777 psb->pCmd = NULL;
0778 psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
0779 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
0780 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793 static void
0794 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
0795 {
0796 struct lpfc_sli4_hdw_queue *qp;
0797 unsigned long iflag = 0;
0798
0799 psb->seg_cnt = 0;
0800 psb->prot_seg_cnt = 0;
0801
0802 qp = psb->hdwq;
0803 if (psb->flags & LPFC_SBUF_XBUSY) {
0804 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
0805 if (!phba->cfg_fcp_wait_abts_rsp)
0806 psb->pCmd = NULL;
0807 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
0808 qp->abts_scsi_io_bufs++;
0809 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
0810 } else {
0811 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
0812 }
0813 }
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823 static void
0824 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
0825 {
0826 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
0827 atomic_dec(&psb->ndlp->cmd_pending);
0828
0829 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
0830 phba->lpfc_release_scsi_buf(phba, psb);
0831 }
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841 static void
0842 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
0843 {
0844 int i, j;
0845
0846 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
0847 i += sizeof(uint32_t), j++) {
0848 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
0849 }
0850 }
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866 static int
0867 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
0868 {
0869 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
0870 struct scatterlist *sgel = NULL;
0871 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
0872 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
0873 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
0874 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
0875 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
0876 dma_addr_t physaddr;
0877 uint32_t num_bde = 0;
0878 int nseg, datadir = scsi_cmnd->sc_data_direction;
0879
0880
0881
0882
0883
0884
0885
0886 bpl += 2;
0887 if (scsi_sg_count(scsi_cmnd)) {
0888
0889
0890
0891
0892
0893
0894
0895 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
0896 scsi_sg_count(scsi_cmnd), datadir);
0897 if (unlikely(!nseg))
0898 return 1;
0899
0900 lpfc_cmd->seg_cnt = nseg;
0901 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
0902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0903 "9064 BLKGRD: %s: Too many sg segments"
0904 " from dma_map_sg. Config %d, seg_cnt"
0905 " %d\n", __func__, phba->cfg_sg_seg_cnt,
0906 lpfc_cmd->seg_cnt);
0907 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
0908 lpfc_cmd->seg_cnt = 0;
0909 scsi_dma_unmap(scsi_cmnd);
0910 return 2;
0911 }
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
0923 physaddr = sg_dma_address(sgel);
0924 if (phba->sli_rev == 3 &&
0925 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
0926 !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
0927 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
0928 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
0929 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
0930 data_bde->addrLow = putPaddrLow(physaddr);
0931 data_bde->addrHigh = putPaddrHigh(physaddr);
0932 data_bde++;
0933 } else {
0934 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
0935 bpl->tus.f.bdeSize = sg_dma_len(sgel);
0936 bpl->tus.w = le32_to_cpu(bpl->tus.w);
0937 bpl->addrLow =
0938 le32_to_cpu(putPaddrLow(physaddr));
0939 bpl->addrHigh =
0940 le32_to_cpu(putPaddrHigh(physaddr));
0941 bpl++;
0942 }
0943 }
0944 }
0945
0946
0947
0948
0949
0950
0951
0952 if (phba->sli_rev == 3 &&
0953 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
0954 !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
0955 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
0956
0957
0958
0959
0960
0961 physaddr = lpfc_cmd->dma_handle;
0962 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
0963 data_bde->tus.f.bdeSize = (num_bde *
0964 sizeof(struct ulp_bde64));
0965 physaddr += (sizeof(struct fcp_cmnd) +
0966 sizeof(struct fcp_rsp) +
0967 (2 * sizeof(struct ulp_bde64)));
0968 data_bde->addrHigh = putPaddrHigh(physaddr);
0969 data_bde->addrLow = putPaddrLow(physaddr);
0970
0971 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
0972 } else {
0973
0974 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
0975 }
0976 } else {
0977 iocb_cmd->un.fcpi64.bdl.bdeSize =
0978 ((num_bde + 2) * sizeof(struct ulp_bde64));
0979 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
0980 }
0981 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
0982
0983
0984
0985
0986
0987 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
0988 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
0989 return 0;
0990 }
0991
0992 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
0993
0994
0995 #define BG_ERR_INIT 0x1
0996
0997 #define BG_ERR_TGT 0x2
0998
0999 #define BG_ERR_SWAP 0x10
1000
1001
1002
1003
1004 #define BG_ERR_CHECK 0x20
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016 static int
1017 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1018 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1019 {
1020 struct scatterlist *sgpe;
1021 struct lpfc_io_buf *lpfc_cmd = NULL;
1022 struct scsi_dif_tuple *src = NULL;
1023 struct lpfc_nodelist *ndlp;
1024 struct lpfc_rport_data *rdata;
1025 uint32_t op = scsi_get_prot_op(sc);
1026 uint32_t blksize;
1027 uint32_t numblks;
1028 u32 lba;
1029 int rc = 0;
1030 int blockoff = 0;
1031
1032 if (op == SCSI_PROT_NORMAL)
1033 return 0;
1034
1035 sgpe = scsi_prot_sglist(sc);
1036 lba = scsi_prot_ref_tag(sc);
1037 if (lba == LPFC_INVALID_REFTAG)
1038 return 0;
1039
1040
1041 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1042 blksize = scsi_prot_interval(sc);
1043 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1044
1045
1046 if (phba->lpfc_injerr_lba < (u64)lba ||
1047 (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
1048 return 0;
1049 if (sgpe) {
1050 blockoff = phba->lpfc_injerr_lba - (u64)lba;
1051 numblks = sg_dma_len(sgpe) /
1052 sizeof(struct scsi_dif_tuple);
1053 if (numblks < blockoff)
1054 blockoff = numblks;
1055 }
1056 }
1057
1058
1059 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1060 if (rdata && rdata->pnode) {
1061 ndlp = rdata->pnode;
1062
1063
1064 if (phba->lpfc_injerr_nportid &&
1065 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1066 return 0;
1067
1068
1069
1070
1071
1072 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1073 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1074 sizeof(struct lpfc_name)) != 0))
1075 return 0;
1076 }
1077
1078
1079 if (sgpe) {
1080 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1081 src += blockoff;
1082 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1083 }
1084
1085
1086 if (reftag) {
1087 if (phba->lpfc_injerr_wref_cnt) {
1088 switch (op) {
1089 case SCSI_PROT_WRITE_PASS:
1090 if (src) {
1091
1092
1093
1094
1095
1096
1097
1098
1099 lpfc_printf_log(phba, KERN_ERR,
1100 LOG_TRACE_EVENT,
1101 "9076 BLKGRD: Injecting reftag error: "
1102 "write lba x%lx + x%x oldrefTag x%x\n",
1103 (unsigned long)lba, blockoff,
1104 be32_to_cpu(src->ref_tag));
1105
1106
1107
1108
1109
1110 if (lpfc_cmd) {
1111 lpfc_cmd->prot_data_type =
1112 LPFC_INJERR_REFTAG;
1113 lpfc_cmd->prot_data_segment =
1114 src;
1115 lpfc_cmd->prot_data =
1116 src->ref_tag;
1117 }
1118 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1119 phba->lpfc_injerr_wref_cnt--;
1120 if (phba->lpfc_injerr_wref_cnt == 0) {
1121 phba->lpfc_injerr_nportid = 0;
1122 phba->lpfc_injerr_lba =
1123 LPFC_INJERR_LBA_OFF;
1124 memset(&phba->lpfc_injerr_wwpn,
1125 0, sizeof(struct lpfc_name));
1126 }
1127 rc = BG_ERR_TGT | BG_ERR_CHECK;
1128
1129 break;
1130 }
1131 fallthrough;
1132 case SCSI_PROT_WRITE_INSERT:
1133
1134
1135
1136
1137
1138
1139 *reftag = 0xDEADBEEF;
1140 phba->lpfc_injerr_wref_cnt--;
1141 if (phba->lpfc_injerr_wref_cnt == 0) {
1142 phba->lpfc_injerr_nportid = 0;
1143 phba->lpfc_injerr_lba =
1144 LPFC_INJERR_LBA_OFF;
1145 memset(&phba->lpfc_injerr_wwpn,
1146 0, sizeof(struct lpfc_name));
1147 }
1148 rc = BG_ERR_TGT | BG_ERR_CHECK;
1149
1150 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1151 "9078 BLKGRD: Injecting reftag error: "
1152 "write lba x%lx\n", (unsigned long)lba);
1153 break;
1154 case SCSI_PROT_WRITE_STRIP:
1155
1156
1157
1158
1159
1160 *reftag = 0xDEADBEEF;
1161 phba->lpfc_injerr_wref_cnt--;
1162 if (phba->lpfc_injerr_wref_cnt == 0) {
1163 phba->lpfc_injerr_nportid = 0;
1164 phba->lpfc_injerr_lba =
1165 LPFC_INJERR_LBA_OFF;
1166 memset(&phba->lpfc_injerr_wwpn,
1167 0, sizeof(struct lpfc_name));
1168 }
1169 rc = BG_ERR_INIT;
1170
1171 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1172 "9077 BLKGRD: Injecting reftag error: "
1173 "write lba x%lx\n", (unsigned long)lba);
1174 break;
1175 }
1176 }
1177 if (phba->lpfc_injerr_rref_cnt) {
1178 switch (op) {
1179 case SCSI_PROT_READ_INSERT:
1180 case SCSI_PROT_READ_STRIP:
1181 case SCSI_PROT_READ_PASS:
1182
1183
1184
1185
1186
1187 *reftag = 0xDEADBEEF;
1188 phba->lpfc_injerr_rref_cnt--;
1189 if (phba->lpfc_injerr_rref_cnt == 0) {
1190 phba->lpfc_injerr_nportid = 0;
1191 phba->lpfc_injerr_lba =
1192 LPFC_INJERR_LBA_OFF;
1193 memset(&phba->lpfc_injerr_wwpn,
1194 0, sizeof(struct lpfc_name));
1195 }
1196 rc = BG_ERR_INIT;
1197
1198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1199 "9079 BLKGRD: Injecting reftag error: "
1200 "read lba x%lx\n", (unsigned long)lba);
1201 break;
1202 }
1203 }
1204 }
1205
1206
1207 if (apptag) {
1208 if (phba->lpfc_injerr_wapp_cnt) {
1209 switch (op) {
1210 case SCSI_PROT_WRITE_PASS:
1211 if (src) {
1212
1213
1214
1215
1216
1217
1218
1219
1220 lpfc_printf_log(phba, KERN_ERR,
1221 LOG_TRACE_EVENT,
1222 "9080 BLKGRD: Injecting apptag error: "
1223 "write lba x%lx + x%x oldappTag x%x\n",
1224 (unsigned long)lba, blockoff,
1225 be16_to_cpu(src->app_tag));
1226
1227
1228
1229
1230
1231 if (lpfc_cmd) {
1232 lpfc_cmd->prot_data_type =
1233 LPFC_INJERR_APPTAG;
1234 lpfc_cmd->prot_data_segment =
1235 src;
1236 lpfc_cmd->prot_data =
1237 src->app_tag;
1238 }
1239 src->app_tag = cpu_to_be16(0xDEAD);
1240 phba->lpfc_injerr_wapp_cnt--;
1241 if (phba->lpfc_injerr_wapp_cnt == 0) {
1242 phba->lpfc_injerr_nportid = 0;
1243 phba->lpfc_injerr_lba =
1244 LPFC_INJERR_LBA_OFF;
1245 memset(&phba->lpfc_injerr_wwpn,
1246 0, sizeof(struct lpfc_name));
1247 }
1248 rc = BG_ERR_TGT | BG_ERR_CHECK;
1249 break;
1250 }
1251 fallthrough;
1252 case SCSI_PROT_WRITE_INSERT:
1253
1254
1255
1256
1257
1258
1259 *apptag = 0xDEAD;
1260 phba->lpfc_injerr_wapp_cnt--;
1261 if (phba->lpfc_injerr_wapp_cnt == 0) {
1262 phba->lpfc_injerr_nportid = 0;
1263 phba->lpfc_injerr_lba =
1264 LPFC_INJERR_LBA_OFF;
1265 memset(&phba->lpfc_injerr_wwpn,
1266 0, sizeof(struct lpfc_name));
1267 }
1268 rc = BG_ERR_TGT | BG_ERR_CHECK;
1269
1270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1271 "0813 BLKGRD: Injecting apptag error: "
1272 "write lba x%lx\n", (unsigned long)lba);
1273 break;
1274 case SCSI_PROT_WRITE_STRIP:
1275
1276
1277
1278
1279
1280 *apptag = 0xDEAD;
1281 phba->lpfc_injerr_wapp_cnt--;
1282 if (phba->lpfc_injerr_wapp_cnt == 0) {
1283 phba->lpfc_injerr_nportid = 0;
1284 phba->lpfc_injerr_lba =
1285 LPFC_INJERR_LBA_OFF;
1286 memset(&phba->lpfc_injerr_wwpn,
1287 0, sizeof(struct lpfc_name));
1288 }
1289 rc = BG_ERR_INIT;
1290
1291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1292 "0812 BLKGRD: Injecting apptag error: "
1293 "write lba x%lx\n", (unsigned long)lba);
1294 break;
1295 }
1296 }
1297 if (phba->lpfc_injerr_rapp_cnt) {
1298 switch (op) {
1299 case SCSI_PROT_READ_INSERT:
1300 case SCSI_PROT_READ_STRIP:
1301 case SCSI_PROT_READ_PASS:
1302
1303
1304
1305
1306
1307 *apptag = 0xDEAD;
1308 phba->lpfc_injerr_rapp_cnt--;
1309 if (phba->lpfc_injerr_rapp_cnt == 0) {
1310 phba->lpfc_injerr_nportid = 0;
1311 phba->lpfc_injerr_lba =
1312 LPFC_INJERR_LBA_OFF;
1313 memset(&phba->lpfc_injerr_wwpn,
1314 0, sizeof(struct lpfc_name));
1315 }
1316 rc = BG_ERR_INIT;
1317
1318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1319 "0814 BLKGRD: Injecting apptag error: "
1320 "read lba x%lx\n", (unsigned long)lba);
1321 break;
1322 }
1323 }
1324 }
1325
1326
1327
1328 if (new_guard) {
1329 if (phba->lpfc_injerr_wgrd_cnt) {
1330 switch (op) {
1331 case SCSI_PROT_WRITE_PASS:
1332 rc = BG_ERR_CHECK;
1333 fallthrough;
1334
1335 case SCSI_PROT_WRITE_INSERT:
1336
1337
1338
1339
1340
1341 phba->lpfc_injerr_wgrd_cnt--;
1342 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1343 phba->lpfc_injerr_nportid = 0;
1344 phba->lpfc_injerr_lba =
1345 LPFC_INJERR_LBA_OFF;
1346 memset(&phba->lpfc_injerr_wwpn,
1347 0, sizeof(struct lpfc_name));
1348 }
1349
1350 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1351
1352
1353 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1354 "0817 BLKGRD: Injecting guard error: "
1355 "write lba x%lx\n", (unsigned long)lba);
1356 break;
1357 case SCSI_PROT_WRITE_STRIP:
1358
1359
1360
1361
1362
1363 phba->lpfc_injerr_wgrd_cnt--;
1364 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1365 phba->lpfc_injerr_nportid = 0;
1366 phba->lpfc_injerr_lba =
1367 LPFC_INJERR_LBA_OFF;
1368 memset(&phba->lpfc_injerr_wwpn,
1369 0, sizeof(struct lpfc_name));
1370 }
1371
1372 rc = BG_ERR_INIT | BG_ERR_SWAP;
1373
1374
1375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1376 "0816 BLKGRD: Injecting guard error: "
1377 "write lba x%lx\n", (unsigned long)lba);
1378 break;
1379 }
1380 }
1381 if (phba->lpfc_injerr_rgrd_cnt) {
1382 switch (op) {
1383 case SCSI_PROT_READ_INSERT:
1384 case SCSI_PROT_READ_STRIP:
1385 case SCSI_PROT_READ_PASS:
1386
1387
1388
1389
1390
1391 phba->lpfc_injerr_rgrd_cnt--;
1392 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1393 phba->lpfc_injerr_nportid = 0;
1394 phba->lpfc_injerr_lba =
1395 LPFC_INJERR_LBA_OFF;
1396 memset(&phba->lpfc_injerr_wwpn,
1397 0, sizeof(struct lpfc_name));
1398 }
1399
1400 rc = BG_ERR_INIT | BG_ERR_SWAP;
1401
1402
1403 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1404 "0818 BLKGRD: Injecting guard error: "
1405 "read lba x%lx\n", (unsigned long)lba);
1406 }
1407 }
1408 }
1409
1410 return rc;
1411 }
1412 #endif
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 static int
1426 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1427 uint8_t *txop, uint8_t *rxop)
1428 {
1429 uint8_t ret = 0;
1430
1431 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1432 switch (scsi_get_prot_op(sc)) {
1433 case SCSI_PROT_READ_INSERT:
1434 case SCSI_PROT_WRITE_STRIP:
1435 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1436 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1437 break;
1438
1439 case SCSI_PROT_READ_STRIP:
1440 case SCSI_PROT_WRITE_INSERT:
1441 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1442 *txop = BG_OP_IN_NODIF_OUT_CRC;
1443 break;
1444
1445 case SCSI_PROT_READ_PASS:
1446 case SCSI_PROT_WRITE_PASS:
1447 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1448 *txop = BG_OP_IN_CSUM_OUT_CRC;
1449 break;
1450
1451 case SCSI_PROT_NORMAL:
1452 default:
1453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1454 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1455 scsi_get_prot_op(sc));
1456 ret = 1;
1457 break;
1458
1459 }
1460 } else {
1461 switch (scsi_get_prot_op(sc)) {
1462 case SCSI_PROT_READ_STRIP:
1463 case SCSI_PROT_WRITE_INSERT:
1464 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1465 *txop = BG_OP_IN_NODIF_OUT_CRC;
1466 break;
1467
1468 case SCSI_PROT_READ_PASS:
1469 case SCSI_PROT_WRITE_PASS:
1470 *rxop = BG_OP_IN_CRC_OUT_CRC;
1471 *txop = BG_OP_IN_CRC_OUT_CRC;
1472 break;
1473
1474 case SCSI_PROT_READ_INSERT:
1475 case SCSI_PROT_WRITE_STRIP:
1476 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1477 *txop = BG_OP_IN_CRC_OUT_NODIF;
1478 break;
1479
1480 case SCSI_PROT_NORMAL:
1481 default:
1482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1483 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1484 scsi_get_prot_op(sc));
1485 ret = 1;
1486 break;
1487 }
1488 }
1489
1490 return ret;
1491 }
1492
1493 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 static int
1506 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1507 uint8_t *txop, uint8_t *rxop)
1508 {
1509
1510 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1511 switch (scsi_get_prot_op(sc)) {
1512 case SCSI_PROT_READ_INSERT:
1513 case SCSI_PROT_WRITE_STRIP:
1514 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1515 *txop = BG_OP_IN_CRC_OUT_NODIF;
1516 break;
1517
1518 case SCSI_PROT_READ_STRIP:
1519 case SCSI_PROT_WRITE_INSERT:
1520 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1521 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1522 break;
1523
1524 case SCSI_PROT_READ_PASS:
1525 case SCSI_PROT_WRITE_PASS:
1526 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1527 *txop = BG_OP_IN_CRC_OUT_CSUM;
1528 break;
1529
1530 case SCSI_PROT_NORMAL:
1531 default:
1532 break;
1533
1534 }
1535 } else {
1536 switch (scsi_get_prot_op(sc)) {
1537 case SCSI_PROT_READ_STRIP:
1538 case SCSI_PROT_WRITE_INSERT:
1539 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1540 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1541 break;
1542
1543 case SCSI_PROT_READ_PASS:
1544 case SCSI_PROT_WRITE_PASS:
1545 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1546 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1547 break;
1548
1549 case SCSI_PROT_READ_INSERT:
1550 case SCSI_PROT_WRITE_STRIP:
1551 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1552 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1553 break;
1554
1555 case SCSI_PROT_NORMAL:
1556 default:
1557 break;
1558 }
1559 }
1560
1561 return 0;
1562 }
1563 #endif
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596 static int
1597 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1598 struct ulp_bde64 *bpl, int datasegcnt)
1599 {
1600 struct scatterlist *sgde = NULL;
1601 struct lpfc_pde5 *pde5 = NULL;
1602 struct lpfc_pde6 *pde6 = NULL;
1603 dma_addr_t physaddr;
1604 int i = 0, num_bde = 0, status;
1605 int datadir = sc->sc_data_direction;
1606 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1607 uint32_t rc;
1608 #endif
1609 uint32_t checking = 1;
1610 uint32_t reftag;
1611 uint8_t txop, rxop;
1612
1613 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1614 if (status)
1615 goto out;
1616
1617
1618 reftag = scsi_prot_ref_tag(sc);
1619 if (reftag == LPFC_INVALID_REFTAG)
1620 goto out;
1621
1622 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1623 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1624 if (rc) {
1625 if (rc & BG_ERR_SWAP)
1626 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1627 if (rc & BG_ERR_CHECK)
1628 checking = 0;
1629 }
1630 #endif
1631
1632
1633 pde5 = (struct lpfc_pde5 *) bpl;
1634 memset(pde5, 0, sizeof(struct lpfc_pde5));
1635 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1636
1637
1638 pde5->word0 = cpu_to_le32(pde5->word0);
1639 pde5->reftag = cpu_to_le32(reftag);
1640
1641
1642 num_bde++;
1643 bpl++;
1644 pde6 = (struct lpfc_pde6 *) bpl;
1645
1646
1647 memset(pde6, 0, sizeof(struct lpfc_pde6));
1648 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1649 bf_set(pde6_optx, pde6, txop);
1650 bf_set(pde6_oprx, pde6, rxop);
1651
1652
1653
1654
1655
1656 if (datadir == DMA_FROM_DEVICE) {
1657 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1658 bf_set(pde6_ce, pde6, checking);
1659 else
1660 bf_set(pde6_ce, pde6, 0);
1661
1662 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1663 bf_set(pde6_re, pde6, checking);
1664 else
1665 bf_set(pde6_re, pde6, 0);
1666 }
1667 bf_set(pde6_ai, pde6, 1);
1668 bf_set(pde6_ae, pde6, 0);
1669 bf_set(pde6_apptagval, pde6, 0);
1670
1671
1672 pde6->word0 = cpu_to_le32(pde6->word0);
1673 pde6->word1 = cpu_to_le32(pde6->word1);
1674 pde6->word2 = cpu_to_le32(pde6->word2);
1675
1676
1677 num_bde++;
1678 bpl++;
1679
1680
1681 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1682 physaddr = sg_dma_address(sgde);
1683 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1684 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1685 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1686 if (datadir == DMA_TO_DEVICE)
1687 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1688 else
1689 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1690 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1691 bpl++;
1692 num_bde++;
1693 }
1694
1695 out:
1696 return num_bde;
1697 }
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738 static int
1739 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1740 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1741 {
1742 struct scatterlist *sgde = NULL;
1743 struct scatterlist *sgpe = NULL;
1744 struct lpfc_pde5 *pde5 = NULL;
1745 struct lpfc_pde6 *pde6 = NULL;
1746 struct lpfc_pde7 *pde7 = NULL;
1747 dma_addr_t dataphysaddr, protphysaddr;
1748 unsigned short curr_data = 0, curr_prot = 0;
1749 unsigned int split_offset;
1750 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1751 unsigned int protgrp_blks, protgrp_bytes;
1752 unsigned int remainder, subtotal;
1753 int status;
1754 int datadir = sc->sc_data_direction;
1755 unsigned char pgdone = 0, alldone = 0;
1756 unsigned blksize;
1757 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1758 uint32_t rc;
1759 #endif
1760 uint32_t checking = 1;
1761 uint32_t reftag;
1762 uint8_t txop, rxop;
1763 int num_bde = 0;
1764
1765 sgpe = scsi_prot_sglist(sc);
1766 sgde = scsi_sglist(sc);
1767
1768 if (!sgpe || !sgde) {
1769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1770 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1771 sgpe, sgde);
1772 return 0;
1773 }
1774
1775 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1776 if (status)
1777 goto out;
1778
1779
1780 blksize = scsi_prot_interval(sc);
1781 reftag = scsi_prot_ref_tag(sc);
1782 if (reftag == LPFC_INVALID_REFTAG)
1783 goto out;
1784
1785 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1786 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1787 if (rc) {
1788 if (rc & BG_ERR_SWAP)
1789 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1790 if (rc & BG_ERR_CHECK)
1791 checking = 0;
1792 }
1793 #endif
1794
1795 split_offset = 0;
1796 do {
1797
1798 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1799 return num_bde + 3;
1800
1801
1802 pde5 = (struct lpfc_pde5 *) bpl;
1803 memset(pde5, 0, sizeof(struct lpfc_pde5));
1804 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1805
1806
1807 pde5->word0 = cpu_to_le32(pde5->word0);
1808 pde5->reftag = cpu_to_le32(reftag);
1809
1810
1811 num_bde++;
1812 bpl++;
1813 pde6 = (struct lpfc_pde6 *) bpl;
1814
1815
1816 memset(pde6, 0, sizeof(struct lpfc_pde6));
1817 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1818 bf_set(pde6_optx, pde6, txop);
1819 bf_set(pde6_oprx, pde6, rxop);
1820
1821 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1822 bf_set(pde6_ce, pde6, checking);
1823 else
1824 bf_set(pde6_ce, pde6, 0);
1825
1826 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1827 bf_set(pde6_re, pde6, checking);
1828 else
1829 bf_set(pde6_re, pde6, 0);
1830
1831 bf_set(pde6_ai, pde6, 1);
1832 bf_set(pde6_ae, pde6, 0);
1833 bf_set(pde6_apptagval, pde6, 0);
1834
1835
1836 pde6->word0 = cpu_to_le32(pde6->word0);
1837 pde6->word1 = cpu_to_le32(pde6->word1);
1838 pde6->word2 = cpu_to_le32(pde6->word2);
1839
1840
1841 num_bde++;
1842 bpl++;
1843
1844
1845 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1846 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1847
1848
1849 BUG_ON(protgroup_len % 8);
1850
1851 pde7 = (struct lpfc_pde7 *) bpl;
1852 memset(pde7, 0, sizeof(struct lpfc_pde7));
1853 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1854
1855 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1856 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1857
1858 protgrp_blks = protgroup_len / 8;
1859 protgrp_bytes = protgrp_blks * blksize;
1860
1861
1862 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1863 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1864 protgroup_offset += protgroup_remainder;
1865 protgrp_blks = protgroup_remainder / 8;
1866 protgrp_bytes = protgrp_blks * blksize;
1867 } else {
1868 protgroup_offset = 0;
1869 curr_prot++;
1870 }
1871
1872 num_bde++;
1873
1874
1875 pgdone = 0;
1876 subtotal = 0;
1877 while (!pgdone) {
1878
1879 if (num_bde >= phba->cfg_total_seg_cnt)
1880 return num_bde + 1;
1881
1882 if (!sgde) {
1883 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1884 "9065 BLKGRD:%s Invalid data segment\n",
1885 __func__);
1886 return 0;
1887 }
1888 bpl++;
1889 dataphysaddr = sg_dma_address(sgde) + split_offset;
1890 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1891 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1892
1893 remainder = sg_dma_len(sgde) - split_offset;
1894
1895 if ((subtotal + remainder) <= protgrp_bytes) {
1896
1897 bpl->tus.f.bdeSize = remainder;
1898 split_offset = 0;
1899
1900 if ((subtotal + remainder) == protgrp_bytes)
1901 pgdone = 1;
1902 } else {
1903
1904 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1905 split_offset += bpl->tus.f.bdeSize;
1906 }
1907
1908 subtotal += bpl->tus.f.bdeSize;
1909
1910 if (datadir == DMA_TO_DEVICE)
1911 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1912 else
1913 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1914 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1915
1916 num_bde++;
1917 curr_data++;
1918
1919 if (split_offset)
1920 break;
1921
1922
1923 sgde = sg_next(sgde);
1924
1925 }
1926
1927 if (protgroup_offset) {
1928
1929 reftag += protgrp_blks;
1930 bpl++;
1931 continue;
1932 }
1933
1934
1935 if (curr_prot == protcnt) {
1936 alldone = 1;
1937 } else if (curr_prot < protcnt) {
1938
1939 sgpe = sg_next(sgpe);
1940 bpl++;
1941
1942
1943 reftag += protgrp_blks;
1944 } else {
1945
1946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1947 "9054 BLKGRD: bug in %s\n", __func__);
1948 }
1949
1950 } while (!alldone);
1951 out:
1952
1953 return num_bde;
1954 }
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986 static int
1987 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1988 struct sli4_sge *sgl, int datasegcnt,
1989 struct lpfc_io_buf *lpfc_cmd)
1990 {
1991 struct scatterlist *sgde = NULL;
1992 struct sli4_sge_diseed *diseed = NULL;
1993 dma_addr_t physaddr;
1994 int i = 0, num_sge = 0, status;
1995 uint32_t reftag;
1996 uint8_t txop, rxop;
1997 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1998 uint32_t rc;
1999 #endif
2000 uint32_t checking = 1;
2001 uint32_t dma_len;
2002 uint32_t dma_offset = 0;
2003 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2004 int j;
2005 bool lsp_just_set = false;
2006
2007 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2008 if (status)
2009 goto out;
2010
2011
2012 reftag = scsi_prot_ref_tag(sc);
2013 if (reftag == LPFC_INVALID_REFTAG)
2014 goto out;
2015
2016 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2017 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2018 if (rc) {
2019 if (rc & BG_ERR_SWAP)
2020 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2021 if (rc & BG_ERR_CHECK)
2022 checking = 0;
2023 }
2024 #endif
2025
2026
2027 diseed = (struct sli4_sge_diseed *) sgl;
2028 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2029 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2030
2031
2032 diseed->ref_tag = cpu_to_le32(reftag);
2033 diseed->ref_tag_tran = diseed->ref_tag;
2034
2035
2036
2037
2038
2039 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2040 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
2041 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2042 else
2043 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2044
2045 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2046 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2047 else
2048 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2049 }
2050
2051
2052 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2053 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2054
2055 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2056 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2057
2058
2059 diseed->word2 = cpu_to_le32(diseed->word2);
2060 diseed->word3 = cpu_to_le32(diseed->word3);
2061
2062
2063 num_sge++;
2064 sgl++;
2065
2066
2067 sgde = scsi_sglist(sc);
2068 j = 3;
2069 for (i = 0; i < datasegcnt; i++) {
2070
2071 sgl->word2 = 0;
2072
2073
2074 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2075 ((datasegcnt - 1) != i)) {
2076
2077 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2078
2079 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2080
2081 if (unlikely(!sgl_xtra)) {
2082 lpfc_cmd->seg_cnt = 0;
2083 return 0;
2084 }
2085 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2086 sgl_xtra->dma_phys_sgl));
2087 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2088 sgl_xtra->dma_phys_sgl));
2089
2090 } else {
2091 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2092 }
2093
2094 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2095 if ((datasegcnt - 1) == i)
2096 bf_set(lpfc_sli4_sge_last, sgl, 1);
2097 physaddr = sg_dma_address(sgde);
2098 dma_len = sg_dma_len(sgde);
2099 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2100 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2101
2102 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2103 sgl->word2 = cpu_to_le32(sgl->word2);
2104 sgl->sge_len = cpu_to_le32(dma_len);
2105
2106 dma_offset += dma_len;
2107 sgde = sg_next(sgde);
2108
2109 sgl++;
2110 num_sge++;
2111 lsp_just_set = false;
2112
2113 } else {
2114 sgl->word2 = cpu_to_le32(sgl->word2);
2115 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2116
2117 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2118 i = i - 1;
2119
2120 lsp_just_set = true;
2121 }
2122
2123 j++;
2124
2125 }
2126
2127 out:
2128 return num_sge;
2129 }
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169 static int
2170 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2171 struct sli4_sge *sgl, int datacnt, int protcnt,
2172 struct lpfc_io_buf *lpfc_cmd)
2173 {
2174 struct scatterlist *sgde = NULL;
2175 struct scatterlist *sgpe = NULL;
2176 struct sli4_sge_diseed *diseed = NULL;
2177 dma_addr_t dataphysaddr, protphysaddr;
2178 unsigned short curr_data = 0, curr_prot = 0;
2179 unsigned int split_offset;
2180 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2181 unsigned int protgrp_blks, protgrp_bytes;
2182 unsigned int remainder, subtotal;
2183 int status;
2184 unsigned char pgdone = 0, alldone = 0;
2185 unsigned blksize;
2186 uint32_t reftag;
2187 uint8_t txop, rxop;
2188 uint32_t dma_len;
2189 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2190 uint32_t rc;
2191 #endif
2192 uint32_t checking = 1;
2193 uint32_t dma_offset = 0;
2194 int num_sge = 0, j = 2;
2195 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2196
2197 sgpe = scsi_prot_sglist(sc);
2198 sgde = scsi_sglist(sc);
2199
2200 if (!sgpe || !sgde) {
2201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2202 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2203 sgpe, sgde);
2204 return 0;
2205 }
2206
2207 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2208 if (status)
2209 goto out;
2210
2211
2212 blksize = scsi_prot_interval(sc);
2213 reftag = scsi_prot_ref_tag(sc);
2214 if (reftag == LPFC_INVALID_REFTAG)
2215 goto out;
2216
2217 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2218 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2219 if (rc) {
2220 if (rc & BG_ERR_SWAP)
2221 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2222 if (rc & BG_ERR_CHECK)
2223 checking = 0;
2224 }
2225 #endif
2226
2227 split_offset = 0;
2228 do {
2229
2230 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2231 !(phba->cfg_xpsgl))
2232 return num_sge + 3;
2233
2234
2235 if (!((j + 1) % phba->border_sge_num) ||
2236 !((j + 2) % phba->border_sge_num) ||
2237 !((j + 3) % phba->border_sge_num)) {
2238 sgl->word2 = 0;
2239
2240
2241 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2242
2243 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2244
2245 if (unlikely(!sgl_xtra)) {
2246 goto out;
2247 } else {
2248 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2249 sgl_xtra->dma_phys_sgl));
2250 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2251 sgl_xtra->dma_phys_sgl));
2252 }
2253
2254 sgl->word2 = cpu_to_le32(sgl->word2);
2255 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2256
2257 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2258 j = 0;
2259 }
2260
2261
2262 diseed = (struct sli4_sge_diseed *) sgl;
2263 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2264 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2265
2266
2267 diseed->ref_tag = cpu_to_le32(reftag);
2268 diseed->ref_tag_tran = diseed->ref_tag;
2269
2270 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
2271 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2272 } else {
2273 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2274
2275
2276
2277
2278
2279
2280
2281 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2282 txop = BG_OP_RAW_MODE;
2283 rxop = BG_OP_RAW_MODE;
2284 }
2285 }
2286
2287
2288 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2289 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2290 else
2291 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2292
2293
2294 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2295 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2296
2297 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2298 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2299
2300
2301 diseed->word2 = cpu_to_le32(diseed->word2);
2302 diseed->word3 = cpu_to_le32(diseed->word3);
2303
2304
2305 num_sge++;
2306
2307 sgl++;
2308 j++;
2309
2310
2311 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2312 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2313
2314
2315 BUG_ON(protgroup_len % 8);
2316
2317
2318 sgl->word2 = 0;
2319 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2320 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2321 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2322 sgl->word2 = cpu_to_le32(sgl->word2);
2323 sgl->sge_len = 0;
2324
2325 protgrp_blks = protgroup_len / 8;
2326 protgrp_bytes = protgrp_blks * blksize;
2327
2328
2329 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2330 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2331 protgroup_offset += protgroup_remainder;
2332 protgrp_blks = protgroup_remainder / 8;
2333 protgrp_bytes = protgrp_blks * blksize;
2334 } else {
2335 protgroup_offset = 0;
2336 curr_prot++;
2337 }
2338
2339 num_sge++;
2340
2341
2342 pgdone = 0;
2343 subtotal = 0;
2344
2345 sgl++;
2346 j++;
2347
2348 while (!pgdone) {
2349
2350 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2351 !phba->cfg_xpsgl)
2352 return num_sge + 1;
2353
2354 if (!sgde) {
2355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2356 "9086 BLKGRD:%s Invalid data segment\n",
2357 __func__);
2358 return 0;
2359 }
2360
2361 if (!((j + 1) % phba->border_sge_num)) {
2362 sgl->word2 = 0;
2363
2364
2365 bf_set(lpfc_sli4_sge_type, sgl,
2366 LPFC_SGE_TYPE_LSP);
2367
2368 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2369 lpfc_cmd);
2370
2371 if (unlikely(!sgl_xtra)) {
2372 goto out;
2373 } else {
2374 sgl->addr_lo = cpu_to_le32(
2375 putPaddrLow(sgl_xtra->dma_phys_sgl));
2376 sgl->addr_hi = cpu_to_le32(
2377 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2378 }
2379
2380 sgl->word2 = cpu_to_le32(sgl->word2);
2381 sgl->sge_len = cpu_to_le32(
2382 phba->cfg_sg_dma_buf_size);
2383
2384 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2385 } else {
2386 dataphysaddr = sg_dma_address(sgde) +
2387 split_offset;
2388
2389 remainder = sg_dma_len(sgde) - split_offset;
2390
2391 if ((subtotal + remainder) <= protgrp_bytes) {
2392
2393 dma_len = remainder;
2394 split_offset = 0;
2395
2396 if ((subtotal + remainder) ==
2397 protgrp_bytes)
2398 pgdone = 1;
2399 } else {
2400
2401
2402
2403 dma_len = protgrp_bytes - subtotal;
2404 split_offset += dma_len;
2405 }
2406
2407 subtotal += dma_len;
2408
2409 sgl->word2 = 0;
2410 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2411 dataphysaddr));
2412 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2413 dataphysaddr));
2414 bf_set(lpfc_sli4_sge_last, sgl, 0);
2415 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2416 bf_set(lpfc_sli4_sge_type, sgl,
2417 LPFC_SGE_TYPE_DATA);
2418
2419 sgl->sge_len = cpu_to_le32(dma_len);
2420 dma_offset += dma_len;
2421
2422 num_sge++;
2423 curr_data++;
2424
2425 if (split_offset) {
2426 sgl++;
2427 j++;
2428 break;
2429 }
2430
2431
2432 sgde = sg_next(sgde);
2433
2434 sgl++;
2435 }
2436
2437 j++;
2438 }
2439
2440 if (protgroup_offset) {
2441
2442 reftag += protgrp_blks;
2443 continue;
2444 }
2445
2446
2447 if (curr_prot == protcnt) {
2448
2449 sgl--;
2450 bf_set(lpfc_sli4_sge_last, sgl, 1);
2451 alldone = 1;
2452 } else if (curr_prot < protcnt) {
2453
2454 sgpe = sg_next(sgpe);
2455
2456
2457 reftag += protgrp_blks;
2458 } else {
2459
2460 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2461 "9085 BLKGRD: bug in %s\n", __func__);
2462 }
2463
2464 } while (!alldone);
2465
2466 out:
2467
2468 return num_sge;
2469 }
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482 static int
2483 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2484 {
2485 int ret = LPFC_PG_TYPE_INVALID;
2486 unsigned char op = scsi_get_prot_op(sc);
2487
2488 switch (op) {
2489 case SCSI_PROT_READ_STRIP:
2490 case SCSI_PROT_WRITE_INSERT:
2491 ret = LPFC_PG_TYPE_NO_DIF;
2492 break;
2493 case SCSI_PROT_READ_INSERT:
2494 case SCSI_PROT_WRITE_STRIP:
2495 case SCSI_PROT_READ_PASS:
2496 case SCSI_PROT_WRITE_PASS:
2497 ret = LPFC_PG_TYPE_DIF_BUF;
2498 break;
2499 default:
2500 if (phba)
2501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2502 "9021 Unsupported protection op:%d\n",
2503 op);
2504 break;
2505 }
2506 return ret;
2507 }
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519 static int
2520 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2521 struct lpfc_io_buf *lpfc_cmd)
2522 {
2523 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2524 int fcpdl;
2525
2526 fcpdl = scsi_bufflen(sc);
2527
2528
2529 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2530
2531 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2532 return fcpdl;
2533
2534 } else {
2535
2536 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2537 return fcpdl;
2538 }
2539
2540
2541
2542
2543
2544
2545 fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
2546
2547 return fcpdl;
2548 }
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562 static int
2563 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2564 struct lpfc_io_buf *lpfc_cmd)
2565 {
2566 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2567 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2568 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2569 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2570 uint32_t num_bde = 0;
2571 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2572 int prot_group_type = 0;
2573 int fcpdl;
2574 int ret = 1;
2575 struct lpfc_vport *vport = phba->pport;
2576
2577
2578
2579
2580
2581 bpl += 2;
2582 if (scsi_sg_count(scsi_cmnd)) {
2583
2584
2585
2586
2587
2588
2589 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2590 scsi_sglist(scsi_cmnd),
2591 scsi_sg_count(scsi_cmnd), datadir);
2592 if (unlikely(!datasegcnt))
2593 return 1;
2594
2595 lpfc_cmd->seg_cnt = datasegcnt;
2596
2597
2598 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2599 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2600 ret = 2;
2601 goto err;
2602 }
2603
2604 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2605
2606 switch (prot_group_type) {
2607 case LPFC_PG_TYPE_NO_DIF:
2608
2609
2610 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2611 ret = 2;
2612 goto err;
2613 }
2614
2615 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2616 datasegcnt);
2617
2618 if (num_bde < 2) {
2619 ret = 2;
2620 goto err;
2621 }
2622 break;
2623
2624 case LPFC_PG_TYPE_DIF_BUF:
2625
2626
2627
2628
2629
2630 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2631 scsi_prot_sglist(scsi_cmnd),
2632 scsi_prot_sg_count(scsi_cmnd), datadir);
2633 if (unlikely(!protsegcnt)) {
2634 scsi_dma_unmap(scsi_cmnd);
2635 return 1;
2636 }
2637
2638 lpfc_cmd->prot_seg_cnt = protsegcnt;
2639
2640
2641
2642
2643
2644 if ((lpfc_cmd->prot_seg_cnt * 4) >
2645 (phba->cfg_total_seg_cnt - 2)) {
2646 ret = 2;
2647 goto err;
2648 }
2649
2650 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2651 datasegcnt, protsegcnt);
2652
2653 if ((num_bde < 3) ||
2654 (num_bde > phba->cfg_total_seg_cnt)) {
2655 ret = 2;
2656 goto err;
2657 }
2658 break;
2659
2660 case LPFC_PG_TYPE_INVALID:
2661 default:
2662 scsi_dma_unmap(scsi_cmnd);
2663 lpfc_cmd->seg_cnt = 0;
2664
2665 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2666 "9022 Unexpected protection group %i\n",
2667 prot_group_type);
2668 return 2;
2669 }
2670 }
2671
2672
2673
2674
2675
2676
2677
2678 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2679 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2680 iocb_cmd->ulpBdeCount = 1;
2681 iocb_cmd->ulpLe = 1;
2682
2683 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2684 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2685
2686
2687
2688
2689
2690 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2691
2692
2693
2694
2695
2696 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2697 (fcpdl < vport->cfg_first_burst_size))
2698 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2699
2700 return 0;
2701 err:
2702 if (lpfc_cmd->seg_cnt)
2703 scsi_dma_unmap(scsi_cmnd);
2704 if (lpfc_cmd->prot_seg_cnt)
2705 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2706 scsi_prot_sg_count(scsi_cmnd),
2707 scsi_cmnd->sc_data_direction);
2708
2709 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2710 "9023 Cannot setup S/G List for HBA"
2711 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2712 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2713 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2714 prot_group_type, num_bde);
2715
2716 lpfc_cmd->seg_cnt = 0;
2717 lpfc_cmd->prot_seg_cnt = 0;
2718 return ret;
2719 }
2720
2721
2722
2723
2724
2725
2726 static uint16_t
2727 lpfc_bg_crc(uint8_t *data, int count)
2728 {
2729 uint16_t crc = 0;
2730 uint16_t x;
2731
2732 crc = crc_t10dif(data, count);
2733 x = cpu_to_be16(crc);
2734 return x;
2735 }
2736
2737
2738
2739
2740
2741
2742 static uint16_t
2743 lpfc_bg_csum(uint8_t *data, int count)
2744 {
2745 uint16_t ret;
2746
2747 ret = ip_compute_csum(data, count);
2748 return ret;
2749 }
2750
2751
2752
2753
2754
2755 static void
2756 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2757 {
2758 struct scatterlist *sgpe;
2759 struct scatterlist *sgde;
2760 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2761 struct scsi_dif_tuple *src = NULL;
2762 uint8_t *data_src = NULL;
2763 uint16_t guard_tag;
2764 uint16_t start_app_tag, app_tag;
2765 uint32_t start_ref_tag, ref_tag;
2766 int prot, protsegcnt;
2767 int err_type, len, data_len;
2768 int chk_ref, chk_app, chk_guard;
2769 uint16_t sum;
2770 unsigned blksize;
2771
2772 err_type = BGS_GUARD_ERR_MASK;
2773 sum = 0;
2774 guard_tag = 0;
2775
2776
2777 prot = scsi_get_prot_op(cmd);
2778 if ((prot == SCSI_PROT_READ_STRIP) ||
2779 (prot == SCSI_PROT_WRITE_INSERT) ||
2780 (prot == SCSI_PROT_NORMAL))
2781 goto out;
2782
2783
2784 chk_ref = 1;
2785 chk_app = 0;
2786 chk_guard = 0;
2787
2788
2789 sgpe = scsi_prot_sglist(cmd);
2790 protsegcnt = lpfc_cmd->prot_seg_cnt;
2791
2792 if (sgpe && protsegcnt) {
2793
2794
2795
2796
2797
2798 sgde = scsi_sglist(cmd);
2799 blksize = scsi_prot_interval(cmd);
2800 data_src = (uint8_t *)sg_virt(sgde);
2801 data_len = sgde->length;
2802 if ((data_len & (blksize - 1)) == 0)
2803 chk_guard = 1;
2804
2805 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2806 start_ref_tag = scsi_prot_ref_tag(cmd);
2807 if (start_ref_tag == LPFC_INVALID_REFTAG)
2808 goto out;
2809 start_app_tag = src->app_tag;
2810 len = sgpe->length;
2811 while (src && protsegcnt) {
2812 while (len) {
2813
2814
2815
2816
2817
2818 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2819 (src->app_tag == T10_PI_APP_ESCAPE)) {
2820 start_ref_tag++;
2821 goto skipit;
2822 }
2823
2824
2825 if (chk_guard) {
2826 guard_tag = src->guard_tag;
2827 if (cmd->prot_flags
2828 & SCSI_PROT_IP_CHECKSUM)
2829 sum = lpfc_bg_csum(data_src,
2830 blksize);
2831 else
2832 sum = lpfc_bg_crc(data_src,
2833 blksize);
2834 if ((guard_tag != sum)) {
2835 err_type = BGS_GUARD_ERR_MASK;
2836 goto out;
2837 }
2838 }
2839
2840
2841 ref_tag = be32_to_cpu(src->ref_tag);
2842 if (chk_ref && (ref_tag != start_ref_tag)) {
2843 err_type = BGS_REFTAG_ERR_MASK;
2844 goto out;
2845 }
2846 start_ref_tag++;
2847
2848
2849 app_tag = src->app_tag;
2850 if (chk_app && (app_tag != start_app_tag)) {
2851 err_type = BGS_APPTAG_ERR_MASK;
2852 goto out;
2853 }
2854 skipit:
2855 len -= sizeof(struct scsi_dif_tuple);
2856 if (len < 0)
2857 len = 0;
2858 src++;
2859
2860 data_src += blksize;
2861 data_len -= blksize;
2862
2863
2864
2865
2866
2867
2868 if (chk_guard && (data_len == 0)) {
2869 chk_guard = 0;
2870 sgde = sg_next(sgde);
2871 if (!sgde)
2872 goto out;
2873
2874 data_src = (uint8_t *)sg_virt(sgde);
2875 data_len = sgde->length;
2876 if ((data_len & (blksize - 1)) == 0)
2877 chk_guard = 1;
2878 }
2879 }
2880
2881
2882 sgpe = sg_next(sgpe);
2883 if (sgpe) {
2884 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2885 len = sgpe->length;
2886 } else {
2887 src = NULL;
2888 }
2889 protsegcnt--;
2890 }
2891 }
2892 out:
2893 if (err_type == BGS_GUARD_ERR_MASK) {
2894 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2895 set_host_byte(cmd, DID_ABORT);
2896 phba->bg_guard_err_cnt++;
2897 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2898 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2899 scsi_prot_ref_tag(cmd),
2900 sum, guard_tag);
2901
2902 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2903 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2904 set_host_byte(cmd, DID_ABORT);
2905
2906 phba->bg_reftag_err_cnt++;
2907 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2908 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2909 scsi_prot_ref_tag(cmd),
2910 ref_tag, start_ref_tag);
2911
2912 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2913 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2914 set_host_byte(cmd, DID_ABORT);
2915
2916 phba->bg_apptag_err_cnt++;
2917 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2918 "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2919 scsi_prot_ref_tag(cmd),
2920 app_tag, start_app_tag);
2921 }
2922 }
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936 static int
2937 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2938 struct lpfc_iocbq *pIocbOut)
2939 {
2940 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2941 struct sli3_bg_fields *bgf;
2942 int ret = 0;
2943 struct lpfc_wcqe_complete *wcqe;
2944 u32 status;
2945 u32 bghm = 0;
2946 u32 bgstat = 0;
2947 u64 failing_sector = 0;
2948
2949 if (phba->sli_rev == LPFC_SLI_REV4) {
2950 wcqe = &pIocbOut->wcqe_cmpl;
2951 status = bf_get(lpfc_wcqe_c_status, wcqe);
2952
2953 if (status == CQE_STATUS_DI_ERROR) {
2954
2955 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
2956 bgstat |= BGS_GUARD_ERR_MASK;
2957
2958
2959 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
2960 bgstat |= BGS_APPTAG_ERR_MASK;
2961
2962
2963 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
2964 bgstat |= BGS_REFTAG_ERR_MASK;
2965
2966
2967
2968
2969 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2970 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2971 bghm = wcqe->total_data_placed;
2972 }
2973
2974
2975
2976
2977
2978 if (!bgstat)
2979 bgstat |= (BGS_REFTAG_ERR_MASK |
2980 BGS_APPTAG_ERR_MASK |
2981 BGS_GUARD_ERR_MASK);
2982 }
2983
2984 } else {
2985 bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2986 bghm = bgf->bghm;
2987 bgstat = bgf->bgstat;
2988 }
2989
2990 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2991 cmd->result = DID_ERROR << 16;
2992 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2993 "9072 BLKGRD: Invalid BG Profile in cmd "
2994 "0x%x reftag 0x%x blk cnt 0x%x "
2995 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2996 scsi_prot_ref_tag(cmd),
2997 scsi_logical_block_count(cmd), bgstat, bghm);
2998 ret = (-1);
2999 goto out;
3000 }
3001
3002 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3003 cmd->result = DID_ERROR << 16;
3004 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3005 "9073 BLKGRD: Invalid BG PDIF Block in cmd "
3006 "0x%x reftag 0x%x blk cnt 0x%x "
3007 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3008 scsi_prot_ref_tag(cmd),
3009 scsi_logical_block_count(cmd), bgstat, bghm);
3010 ret = (-1);
3011 goto out;
3012 }
3013
3014 if (lpfc_bgs_get_guard_err(bgstat)) {
3015 ret = 1;
3016 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3017 set_host_byte(cmd, DID_ABORT);
3018 phba->bg_guard_err_cnt++;
3019 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3020 "9055 BLKGRD: Guard Tag error in cmd "
3021 "0x%x reftag 0x%x blk cnt 0x%x "
3022 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3023 scsi_prot_ref_tag(cmd),
3024 scsi_logical_block_count(cmd), bgstat, bghm);
3025 }
3026
3027 if (lpfc_bgs_get_reftag_err(bgstat)) {
3028 ret = 1;
3029 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3030 set_host_byte(cmd, DID_ABORT);
3031 phba->bg_reftag_err_cnt++;
3032 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3033 "9056 BLKGRD: Ref Tag error in cmd "
3034 "0x%x reftag 0x%x blk cnt 0x%x "
3035 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3036 scsi_prot_ref_tag(cmd),
3037 scsi_logical_block_count(cmd), bgstat, bghm);
3038 }
3039
3040 if (lpfc_bgs_get_apptag_err(bgstat)) {
3041 ret = 1;
3042 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3043 set_host_byte(cmd, DID_ABORT);
3044 phba->bg_apptag_err_cnt++;
3045 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3046 "9061 BLKGRD: App Tag error in cmd "
3047 "0x%x reftag 0x%x blk cnt 0x%x "
3048 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3049 scsi_prot_ref_tag(cmd),
3050 scsi_logical_block_count(cmd), bgstat, bghm);
3051 }
3052
3053 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3054
3055
3056
3057
3058
3059
3060 cmd->sense_buffer[7] = 0xc;
3061 cmd->sense_buffer[8] = 0;
3062 cmd->sense_buffer[9] = 0xa;
3063 cmd->sense_buffer[10] = 0x80;
3064
3065
3066 switch (scsi_get_prot_op(cmd)) {
3067 case SCSI_PROT_READ_INSERT:
3068 case SCSI_PROT_WRITE_STRIP:
3069 bghm /= cmd->device->sector_size;
3070 break;
3071 case SCSI_PROT_READ_STRIP:
3072 case SCSI_PROT_WRITE_INSERT:
3073 case SCSI_PROT_READ_PASS:
3074 case SCSI_PROT_WRITE_PASS:
3075 bghm /= (cmd->device->sector_size +
3076 sizeof(struct scsi_dif_tuple));
3077 break;
3078 }
3079
3080 failing_sector = scsi_get_lba(cmd);
3081 failing_sector += bghm;
3082
3083
3084 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3085 }
3086
3087 if (!ret) {
3088
3089 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3090 "9057 BLKGRD: Unknown error in cmd "
3091 "0x%x reftag 0x%x blk cnt 0x%x "
3092 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3093 scsi_prot_ref_tag(cmd),
3094 scsi_logical_block_count(cmd), bgstat, bghm);
3095
3096
3097 lpfc_calc_bg_err(phba, lpfc_cmd);
3098 }
3099 out:
3100 return ret;
3101 }
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116 static int
3117 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3118 {
3119 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3120 struct scatterlist *sgel = NULL;
3121 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3122 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3123 struct sli4_sge *first_data_sgl;
3124 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3125 struct lpfc_vport *vport = phba->pport;
3126 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3127 dma_addr_t physaddr;
3128 uint32_t dma_len;
3129 uint32_t dma_offset = 0;
3130 int nseg, i, j;
3131 struct ulp_bde64 *bde;
3132 bool lsp_just_set = false;
3133 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3134
3135
3136
3137
3138
3139
3140
3141 if (scsi_sg_count(scsi_cmnd)) {
3142
3143
3144
3145
3146
3147
3148
3149 nseg = scsi_dma_map(scsi_cmnd);
3150 if (unlikely(nseg <= 0))
3151 return 1;
3152 sgl += 1;
3153
3154 sgl->word2 = le32_to_cpu(sgl->word2);
3155 bf_set(lpfc_sli4_sge_last, sgl, 0);
3156 sgl->word2 = cpu_to_le32(sgl->word2);
3157 sgl += 1;
3158 first_data_sgl = sgl;
3159 lpfc_cmd->seg_cnt = nseg;
3160 if (!phba->cfg_xpsgl &&
3161 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3162 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3163 "9074 BLKGRD:"
3164 " %s: Too many sg segments from "
3165 "dma_map_sg. Config %d, seg_cnt %d\n",
3166 __func__, phba->cfg_sg_seg_cnt,
3167 lpfc_cmd->seg_cnt);
3168 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3169 lpfc_cmd->seg_cnt = 0;
3170 scsi_dma_unmap(scsi_cmnd);
3171 return 2;
3172 }
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185 sgel = scsi_sglist(scsi_cmnd);
3186 j = 2;
3187 for (i = 0; i < nseg; i++) {
3188 sgl->word2 = 0;
3189 if (nseg == 1) {
3190 bf_set(lpfc_sli4_sge_last, sgl, 1);
3191 bf_set(lpfc_sli4_sge_type, sgl,
3192 LPFC_SGE_TYPE_DATA);
3193 } else {
3194 bf_set(lpfc_sli4_sge_last, sgl, 0);
3195
3196
3197 if (!lsp_just_set &&
3198 !((j + 1) % phba->border_sge_num) &&
3199 ((nseg - 1) != i)) {
3200
3201 bf_set(lpfc_sli4_sge_type, sgl,
3202 LPFC_SGE_TYPE_LSP);
3203
3204 sgl_xtra = lpfc_get_sgl_per_hdwq(
3205 phba, lpfc_cmd);
3206
3207 if (unlikely(!sgl_xtra)) {
3208 lpfc_cmd->seg_cnt = 0;
3209 scsi_dma_unmap(scsi_cmnd);
3210 return 1;
3211 }
3212 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3213 sgl_xtra->dma_phys_sgl));
3214 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3215 sgl_xtra->dma_phys_sgl));
3216
3217 } else {
3218 bf_set(lpfc_sli4_sge_type, sgl,
3219 LPFC_SGE_TYPE_DATA);
3220 }
3221 }
3222
3223 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3224 LPFC_SGE_TYPE_LSP)) {
3225 if ((nseg - 1) == i)
3226 bf_set(lpfc_sli4_sge_last, sgl, 1);
3227
3228 physaddr = sg_dma_address(sgel);
3229 dma_len = sg_dma_len(sgel);
3230 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3231 physaddr));
3232 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3233 physaddr));
3234
3235 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3236 sgl->word2 = cpu_to_le32(sgl->word2);
3237 sgl->sge_len = cpu_to_le32(dma_len);
3238
3239 dma_offset += dma_len;
3240 sgel = sg_next(sgel);
3241
3242 sgl++;
3243 lsp_just_set = false;
3244
3245 } else {
3246 sgl->word2 = cpu_to_le32(sgl->word2);
3247 sgl->sge_len = cpu_to_le32(
3248 phba->cfg_sg_dma_buf_size);
3249
3250 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3251 i = i - 1;
3252
3253 lsp_just_set = true;
3254 }
3255
3256 j++;
3257 }
3258
3259
3260
3261
3262
3263 if (nseg == 1 &&
3264 ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3265 phba->cfg_enable_pbde)) {
3266
3267 bde = (struct ulp_bde64 *)
3268 &wqe->words[13];
3269 bde->addrLow = first_data_sgl->addr_lo;
3270 bde->addrHigh = first_data_sgl->addr_hi;
3271 bde->tus.f.bdeSize =
3272 le32_to_cpu(first_data_sgl->sge_len);
3273 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3274 bde->tus.w = cpu_to_le32(bde->tus.w);
3275
3276
3277 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3278 } else {
3279 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3280
3281 }
3282 } else {
3283 sgl += 1;
3284
3285 sgl->word2 = le32_to_cpu(sgl->word2);
3286 bf_set(lpfc_sli4_sge_last, sgl, 1);
3287 sgl->word2 = cpu_to_le32(sgl->word2);
3288
3289 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3290 phba->cfg_enable_pbde) {
3291 bde = (struct ulp_bde64 *)
3292 &wqe->words[13];
3293 memset(bde, 0, (sizeof(uint32_t) * 3));
3294 }
3295 }
3296
3297
3298
3299
3300
3301
3302
3303 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3304
3305 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3306 vport->cfg_first_burst_size &&
3307 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3308 u32 init_len, total_len;
3309
3310 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3311 init_len = min(total_len, vport->cfg_first_burst_size);
3312
3313
3314 wqe->fcp_iwrite.initial_xfer_len = init_len;
3315 wqe->fcp_iwrite.total_xfer_len = total_len;
3316 } else {
3317
3318 wqe->fcp_iwrite.total_xfer_len =
3319 be32_to_cpu(fcp_cmnd->fcpDl);
3320 }
3321
3322
3323
3324
3325
3326 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3327 scsi_cmnd->device->hostdata)->oas_enabled) {
3328 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3329 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3330 scsi_cmnd->device->hostdata)->priority;
3331
3332
3333 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3334 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3335
3336 if (lpfc_cmd->cur_iocbq.priority)
3337 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3338 (lpfc_cmd->cur_iocbq.priority << 1));
3339 else
3340 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3341 (phba->cfg_XLanePriority << 1));
3342 }
3343
3344 return 0;
3345 }
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360 static int
3361 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3362 struct lpfc_io_buf *lpfc_cmd)
3363 {
3364 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3365 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3366 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3367 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3368 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3369 uint32_t num_sge = 0;
3370 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3371 int prot_group_type = 0;
3372 int fcpdl;
3373 int ret = 1;
3374 struct lpfc_vport *vport = phba->pport;
3375
3376
3377
3378
3379
3380 if (scsi_sg_count(scsi_cmnd)) {
3381
3382
3383
3384
3385
3386
3387 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3388 scsi_sglist(scsi_cmnd),
3389 scsi_sg_count(scsi_cmnd), datadir);
3390 if (unlikely(!datasegcnt))
3391 return 1;
3392
3393 sgl += 1;
3394
3395 sgl->word2 = le32_to_cpu(sgl->word2);
3396 bf_set(lpfc_sli4_sge_last, sgl, 0);
3397 sgl->word2 = cpu_to_le32(sgl->word2);
3398
3399 sgl += 1;
3400 lpfc_cmd->seg_cnt = datasegcnt;
3401
3402
3403 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3404 !phba->cfg_xpsgl) {
3405 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3406 ret = 2;
3407 goto err;
3408 }
3409
3410 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3411
3412 switch (prot_group_type) {
3413 case LPFC_PG_TYPE_NO_DIF:
3414
3415 if (((lpfc_cmd->seg_cnt + 1) >
3416 phba->cfg_total_seg_cnt) &&
3417 !phba->cfg_xpsgl) {
3418 ret = 2;
3419 goto err;
3420 }
3421
3422 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3423 datasegcnt, lpfc_cmd);
3424
3425
3426 if (num_sge < 2) {
3427 ret = 2;
3428 goto err;
3429 }
3430 break;
3431
3432 case LPFC_PG_TYPE_DIF_BUF:
3433
3434
3435
3436
3437
3438 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3439 scsi_prot_sglist(scsi_cmnd),
3440 scsi_prot_sg_count(scsi_cmnd), datadir);
3441 if (unlikely(!protsegcnt)) {
3442 scsi_dma_unmap(scsi_cmnd);
3443 return 1;
3444 }
3445
3446 lpfc_cmd->prot_seg_cnt = protsegcnt;
3447
3448
3449
3450
3451 if (((lpfc_cmd->prot_seg_cnt * 3) >
3452 (phba->cfg_total_seg_cnt - 2)) &&
3453 !phba->cfg_xpsgl) {
3454 ret = 2;
3455 goto err;
3456 }
3457
3458 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3459 datasegcnt, protsegcnt, lpfc_cmd);
3460
3461
3462 if (num_sge < 3 ||
3463 (num_sge > phba->cfg_total_seg_cnt &&
3464 !phba->cfg_xpsgl)) {
3465 ret = 2;
3466 goto err;
3467 }
3468 break;
3469
3470 case LPFC_PG_TYPE_INVALID:
3471 default:
3472 scsi_dma_unmap(scsi_cmnd);
3473 lpfc_cmd->seg_cnt = 0;
3474
3475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3476 "9083 Unexpected protection group %i\n",
3477 prot_group_type);
3478 return 2;
3479 }
3480 }
3481
3482 switch (scsi_get_prot_op(scsi_cmnd)) {
3483 case SCSI_PROT_WRITE_STRIP:
3484 case SCSI_PROT_READ_STRIP:
3485 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
3486 break;
3487 case SCSI_PROT_WRITE_INSERT:
3488 case SCSI_PROT_READ_INSERT:
3489 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
3490 break;
3491 case SCSI_PROT_WRITE_PASS:
3492 case SCSI_PROT_READ_PASS:
3493 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
3494 break;
3495 }
3496
3497 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3498 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3499
3500
3501 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3502 vport->cfg_first_burst_size &&
3503 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3504 u32 init_len, total_len;
3505
3506 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3507 init_len = min(total_len, vport->cfg_first_burst_size);
3508
3509
3510 wqe->fcp_iwrite.initial_xfer_len = init_len;
3511 wqe->fcp_iwrite.total_xfer_len = total_len;
3512 } else {
3513
3514 wqe->fcp_iwrite.total_xfer_len =
3515 be32_to_cpu(fcp_cmnd->fcpDl);
3516 }
3517
3518
3519
3520
3521
3522 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3523 scsi_cmnd->device->hostdata)->oas_enabled) {
3524 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3525
3526
3527 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3528 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3529 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3530 (phba->cfg_XLanePriority << 1));
3531 }
3532
3533
3534 if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
3535 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3536 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
3537 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3538 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
3539 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3540
3541 lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
3542 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3543
3544 return 0;
3545 err:
3546 if (lpfc_cmd->seg_cnt)
3547 scsi_dma_unmap(scsi_cmnd);
3548 if (lpfc_cmd->prot_seg_cnt)
3549 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3550 scsi_prot_sg_count(scsi_cmnd),
3551 scsi_cmnd->sc_data_direction);
3552
3553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3554 "9084 Cannot setup S/G List for HBA"
3555 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3556 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3557 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3558 prot_group_type, num_sge);
3559
3560 lpfc_cmd->seg_cnt = 0;
3561 lpfc_cmd->prot_seg_cnt = 0;
3562 return ret;
3563 }
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577 static inline int
3578 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3579 {
3580 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3581 }
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596 static inline int
3597 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3598 {
3599 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3600 }
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615 static inline int
3616 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3617 uint8_t tmo)
3618 {
3619 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3620 }
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632 static void
3633 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3634 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3635 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3636 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3637 uint32_t resp_info = fcprsp->rspStatus2;
3638 uint32_t scsi_status = fcprsp->rspStatus3;
3639 struct lpfc_fast_path_event *fast_path_evt = NULL;
3640 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3641 unsigned long flags;
3642
3643 if (!pnode)
3644 return;
3645
3646
3647 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3648 (cmnd->result == SAM_STAT_BUSY)) {
3649 fast_path_evt = lpfc_alloc_fast_evt(phba);
3650 if (!fast_path_evt)
3651 return;
3652 fast_path_evt->un.scsi_evt.event_type =
3653 FC_REG_SCSI_EVENT;
3654 fast_path_evt->un.scsi_evt.subcategory =
3655 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3656 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3657 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3658 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3659 &pnode->nlp_portname, sizeof(struct lpfc_name));
3660 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3661 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3662 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3663 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3664 fast_path_evt = lpfc_alloc_fast_evt(phba);
3665 if (!fast_path_evt)
3666 return;
3667 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3668 FC_REG_SCSI_EVENT;
3669 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3670 LPFC_EVENT_CHECK_COND;
3671 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3672 cmnd->device->lun;
3673 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3674 &pnode->nlp_portname, sizeof(struct lpfc_name));
3675 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3676 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3677 fast_path_evt->un.check_cond_evt.sense_key =
3678 cmnd->sense_buffer[2] & 0xf;
3679 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3680 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3681 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3682 fcpi_parm &&
3683 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3684 ((scsi_status == SAM_STAT_GOOD) &&
3685 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3686
3687
3688
3689
3690 fast_path_evt = lpfc_alloc_fast_evt(phba);
3691 if (!fast_path_evt)
3692 return;
3693 fast_path_evt->un.read_check_error.header.event_type =
3694 FC_REG_FABRIC_EVENT;
3695 fast_path_evt->un.read_check_error.header.subcategory =
3696 LPFC_EVENT_FCPRDCHKERR;
3697 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3698 &pnode->nlp_portname, sizeof(struct lpfc_name));
3699 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3700 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3701 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3702 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3703 fast_path_evt->un.read_check_error.fcpiparam =
3704 fcpi_parm;
3705 } else
3706 return;
3707
3708 fast_path_evt->vport = vport;
3709 spin_lock_irqsave(&phba->hbalock, flags);
3710 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3711 spin_unlock_irqrestore(&phba->hbalock, flags);
3712 lpfc_worker_wake_up(phba);
3713 return;
3714 }
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724 static void
3725 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3726 {
3727
3728
3729
3730
3731
3732
3733 if (psb->seg_cnt > 0)
3734 scsi_dma_unmap(psb->pCmd);
3735 if (psb->prot_seg_cnt > 0)
3736 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3737 scsi_prot_sg_count(psb->pCmd),
3738 psb->pCmd->sc_data_direction);
3739 }
3740
3741
3742
3743
3744
3745
3746
3747
3748 void
3749 lpfc_unblock_requests(struct lpfc_hba *phba)
3750 {
3751 struct lpfc_vport **vports;
3752 struct Scsi_Host *shost;
3753 int i;
3754
3755 if (phba->sli_rev == LPFC_SLI_REV4 &&
3756 !phba->sli4_hba.max_cfg_param.vpi_used) {
3757 shost = lpfc_shost_from_vport(phba->pport);
3758 scsi_unblock_requests(shost);
3759 return;
3760 }
3761
3762 vports = lpfc_create_vport_work_array(phba);
3763 if (vports != NULL)
3764 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3765 shost = lpfc_shost_from_vport(vports[i]);
3766 scsi_unblock_requests(shost);
3767 }
3768 lpfc_destroy_vport_work_array(phba, vports);
3769 }
3770
3771
3772
3773
3774
3775
3776
3777
3778 void
3779 lpfc_block_requests(struct lpfc_hba *phba)
3780 {
3781 struct lpfc_vport **vports;
3782 struct Scsi_Host *shost;
3783 int i;
3784
3785 if (atomic_read(&phba->cmf_stop_io))
3786 return;
3787
3788 if (phba->sli_rev == LPFC_SLI_REV4 &&
3789 !phba->sli4_hba.max_cfg_param.vpi_used) {
3790 shost = lpfc_shost_from_vport(phba->pport);
3791 scsi_block_requests(shost);
3792 return;
3793 }
3794
3795 vports = lpfc_create_vport_work_array(phba);
3796 if (vports != NULL)
3797 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3798 shost = lpfc_shost_from_vport(vports[i]);
3799 scsi_block_requests(shost);
3800 }
3801 lpfc_destroy_vport_work_array(phba, vports);
3802 }
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817 int
3818 lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
3819 uint64_t time, uint32_t size, struct Scsi_Host *shost)
3820 {
3821 struct lpfc_cgn_stat *cgs;
3822
3823 if (time != LPFC_CGN_NOT_SENT) {
3824
3825 if (time < 1000)
3826 time = 1;
3827 else
3828 time = div_u64(time + 500, 1000);
3829
3830 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3831 atomic64_add(size, &cgs->rcv_bytes);
3832 atomic64_add(time, &cgs->rx_latency);
3833 atomic_inc(&cgs->rx_io_cnt);
3834 }
3835 return 0;
3836 }
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846 int
3847 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
3848 {
3849 uint64_t total;
3850 struct lpfc_cgn_stat *cgs;
3851 int cpu;
3852
3853
3854 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
3855 phba->cmf_max_bytes_per_interval) {
3856 total = 0;
3857 for_each_present_cpu(cpu) {
3858 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3859 total += atomic64_read(&cgs->total_bytes);
3860 }
3861 if (total >= phba->cmf_max_bytes_per_interval) {
3862 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
3863 lpfc_block_requests(phba);
3864 phba->cmf_last_ts =
3865 lpfc_calc_cmf_latency(phba);
3866 }
3867 atomic_inc(&phba->cmf_busy);
3868 return -EBUSY;
3869 }
3870 if (size > atomic_read(&phba->rx_max_read_cnt))
3871 atomic_set(&phba->rx_max_read_cnt, size);
3872 }
3873
3874 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3875 atomic64_add(size, &cgs->total_bytes);
3876 return 0;
3877 }
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889 static void
3890 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3891 uint32_t fcpi_parm)
3892 {
3893 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3894 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3895 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3896 uint32_t resp_info = fcprsp->rspStatus2;
3897 uint32_t scsi_status = fcprsp->rspStatus3;
3898 uint32_t *lp;
3899 uint32_t host_status = DID_OK;
3900 uint32_t rsplen = 0;
3901 uint32_t fcpDl;
3902 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3903
3904
3905
3906
3907
3908
3909
3910 if (fcpcmd->fcpCntl2) {
3911 scsi_status = 0;
3912 goto out;
3913 }
3914
3915 if (resp_info & RSP_LEN_VALID) {
3916 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3917 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3918 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3919 "2719 Invalid response length: "
3920 "tgt x%x lun x%llx cmnd x%x rsplen "
3921 "x%x\n", cmnd->device->id,
3922 cmnd->device->lun, cmnd->cmnd[0],
3923 rsplen);
3924 host_status = DID_ERROR;
3925 goto out;
3926 }
3927 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3928 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3929 "2757 Protocol failure detected during "
3930 "processing of FCP I/O op: "
3931 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3932 cmnd->device->id,
3933 cmnd->device->lun, cmnd->cmnd[0],
3934 fcprsp->rspInfo3);
3935 host_status = DID_ERROR;
3936 goto out;
3937 }
3938 }
3939
3940 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3941 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3942 if (snslen > SCSI_SENSE_BUFFERSIZE)
3943 snslen = SCSI_SENSE_BUFFERSIZE;
3944
3945 if (resp_info & RSP_LEN_VALID)
3946 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3947 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3948 }
3949 lp = (uint32_t *)cmnd->sense_buffer;
3950
3951
3952 if (!scsi_status && (resp_info & RESID_UNDER)) {
3953
3954 if (vport->cfg_log_verbose & LOG_FCP)
3955 logit = LOG_FCP_ERROR;
3956
3957 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3958 logit = LOG_FCP_UNDER;
3959 }
3960
3961 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3962 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3963 "Data: x%x x%x x%x x%x x%x\n",
3964 cmnd->cmnd[0], scsi_status,
3965 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3966 be32_to_cpu(fcprsp->rspResId),
3967 be32_to_cpu(fcprsp->rspSnsLen),
3968 be32_to_cpu(fcprsp->rspRspLen),
3969 fcprsp->rspInfo3);
3970
3971 scsi_set_resid(cmnd, 0);
3972 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3973 if (resp_info & RESID_UNDER) {
3974 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3975
3976 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3977 "9025 FCP Underrun, expected %d, "
3978 "residual %d Data: x%x x%x x%x\n",
3979 fcpDl,
3980 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3981 cmnd->underflow);
3982
3983
3984
3985
3986
3987
3988 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3989 lpfc_printf_vlog(vport, KERN_WARNING,
3990 LOG_FCP | LOG_FCP_ERROR,
3991 "9026 FCP Read Check Error "
3992 "and Underrun Data: x%x x%x x%x x%x\n",
3993 fcpDl,
3994 scsi_get_resid(cmnd), fcpi_parm,
3995 cmnd->cmnd[0]);
3996 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3997 host_status = DID_ERROR;
3998 }
3999
4000
4001
4002
4003
4004
4005 if (!(resp_info & SNS_LEN_VALID) &&
4006 (scsi_status == SAM_STAT_GOOD) &&
4007 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
4008 < cmnd->underflow)) {
4009 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4010 "9027 FCP command x%x residual "
4011 "underrun converted to error "
4012 "Data: x%x x%x x%x\n",
4013 cmnd->cmnd[0], scsi_bufflen(cmnd),
4014 scsi_get_resid(cmnd), cmnd->underflow);
4015 host_status = DID_ERROR;
4016 }
4017 } else if (resp_info & RESID_OVER) {
4018 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4019 "9028 FCP command x%x residual overrun error. "
4020 "Data: x%x x%x\n", cmnd->cmnd[0],
4021 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
4022 host_status = DID_ERROR;
4023
4024
4025
4026
4027
4028 } else if (fcpi_parm) {
4029 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
4030 "9029 FCP %s Check Error Data: "
4031 "x%x x%x x%x x%x x%x\n",
4032 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
4033 "Read" : "Write"),
4034 fcpDl, be32_to_cpu(fcprsp->rspResId),
4035 fcpi_parm, cmnd->cmnd[0], scsi_status);
4036
4037
4038
4039
4040
4041 if (fcpi_parm > fcpDl)
4042 goto out;
4043
4044 switch (scsi_status) {
4045 case SAM_STAT_GOOD:
4046 case SAM_STAT_CHECK_CONDITION:
4047
4048
4049
4050
4051
4052 host_status = DID_ERROR;
4053 break;
4054 }
4055 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4056 }
4057
4058 out:
4059 cmnd->result = host_status << 16 | scsi_status;
4060 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4061 }
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073 static void
4074 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4075 struct lpfc_iocbq *pwqeOut)
4076 {
4077 struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
4078 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
4079 struct lpfc_vport *vport = pwqeIn->vport;
4080 struct lpfc_rport_data *rdata;
4081 struct lpfc_nodelist *ndlp;
4082 struct scsi_cmnd *cmd;
4083 unsigned long flags;
4084 struct lpfc_fast_path_event *fast_path_evt;
4085 struct Scsi_Host *shost;
4086 u32 logit = LOG_FCP;
4087 u32 status, idx;
4088 u32 lat;
4089 u8 wait_xb_clr = 0;
4090
4091
4092 if (!lpfc_cmd) {
4093 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4094 "9032 Null lpfc_cmd pointer. No "
4095 "release, skip completion\n");
4096 return;
4097 }
4098
4099 rdata = lpfc_cmd->rdata;
4100 ndlp = rdata->pnode;
4101
4102
4103 cmd = lpfc_cmd->pCmd;
4104 if (!cmd) {
4105 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4106 "9042 I/O completion: Not an active IO\n");
4107 lpfc_release_scsi_buf(phba, lpfc_cmd);
4108 return;
4109 }
4110
4111 spin_lock(&lpfc_cmd->buf_lock);
4112 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4113 if (phba->sli4_hba.hdwq)
4114 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4115
4116 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4117 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4118 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4119 #endif
4120 shost = cmd->device->host;
4121
4122 status = bf_get(lpfc_wcqe_c_status, wcqe);
4123 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4124 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4125
4126 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4127 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4128 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4129 if (phba->cfg_fcp_wait_abts_rsp)
4130 wait_xb_clr = 1;
4131 }
4132
4133 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4134 if (lpfc_cmd->prot_data_type) {
4135 struct scsi_dif_tuple *src = NULL;
4136
4137 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4138
4139
4140
4141
4142 switch (lpfc_cmd->prot_data_type) {
4143 case LPFC_INJERR_REFTAG:
4144 src->ref_tag =
4145 lpfc_cmd->prot_data;
4146 break;
4147 case LPFC_INJERR_APPTAG:
4148 src->app_tag =
4149 (uint16_t)lpfc_cmd->prot_data;
4150 break;
4151 case LPFC_INJERR_GUARD:
4152 src->guard_tag =
4153 (uint16_t)lpfc_cmd->prot_data;
4154 break;
4155 default:
4156 break;
4157 }
4158
4159 lpfc_cmd->prot_data = 0;
4160 lpfc_cmd->prot_data_type = 0;
4161 lpfc_cmd->prot_data_segment = NULL;
4162 }
4163 #endif
4164 if (unlikely(lpfc_cmd->status)) {
4165 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4166 (lpfc_cmd->result & IOERR_DRVR_MASK))
4167 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4168 else if (lpfc_cmd->status >= IOSTAT_CNT)
4169 lpfc_cmd->status = IOSTAT_DEFAULT;
4170 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4171 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4172 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4173 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4174 logit = 0;
4175 else
4176 logit = LOG_FCP | LOG_FCP_UNDER;
4177 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4178 "9034 FCP cmd x%x failed <%d/%lld> "
4179 "status: x%x result: x%x "
4180 "sid: x%x did: x%x oxid: x%x "
4181 "Data: x%x x%x x%x\n",
4182 cmd->cmnd[0],
4183 cmd->device ? cmd->device->id : 0xffff,
4184 cmd->device ? cmd->device->lun : 0xffff,
4185 lpfc_cmd->status, lpfc_cmd->result,
4186 vport->fc_myDID,
4187 (ndlp) ? ndlp->nlp_DID : 0,
4188 lpfc_cmd->cur_iocbq.sli4_xritag,
4189 wcqe->parameter, wcqe->total_data_placed,
4190 lpfc_cmd->cur_iocbq.iotag);
4191 }
4192
4193 switch (lpfc_cmd->status) {
4194 case IOSTAT_SUCCESS:
4195 cmd->result = DID_OK << 16;
4196 break;
4197 case IOSTAT_FCP_RSP_ERROR:
4198 lpfc_handle_fcp_err(vport, lpfc_cmd,
4199 pwqeIn->wqe.fcp_iread.total_xfer_len -
4200 wcqe->total_data_placed);
4201 break;
4202 case IOSTAT_NPORT_BSY:
4203 case IOSTAT_FABRIC_BSY:
4204 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4205 fast_path_evt = lpfc_alloc_fast_evt(phba);
4206 if (!fast_path_evt)
4207 break;
4208 fast_path_evt->un.fabric_evt.event_type =
4209 FC_REG_FABRIC_EVENT;
4210 fast_path_evt->un.fabric_evt.subcategory =
4211 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4212 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4213 if (ndlp) {
4214 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4215 &ndlp->nlp_portname,
4216 sizeof(struct lpfc_name));
4217 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4218 &ndlp->nlp_nodename,
4219 sizeof(struct lpfc_name));
4220 }
4221 fast_path_evt->vport = vport;
4222 fast_path_evt->work_evt.evt =
4223 LPFC_EVT_FASTPATH_MGMT_EVT;
4224 spin_lock_irqsave(&phba->hbalock, flags);
4225 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4226 &phba->work_list);
4227 spin_unlock_irqrestore(&phba->hbalock, flags);
4228 lpfc_worker_wake_up(phba);
4229 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4230 "9035 Fabric/Node busy FCP cmd x%x failed"
4231 " <%d/%lld> "
4232 "status: x%x result: x%x "
4233 "sid: x%x did: x%x oxid: x%x "
4234 "Data: x%x x%x x%x\n",
4235 cmd->cmnd[0],
4236 cmd->device ? cmd->device->id : 0xffff,
4237 cmd->device ? cmd->device->lun : 0xffff,
4238 lpfc_cmd->status, lpfc_cmd->result,
4239 vport->fc_myDID,
4240 (ndlp) ? ndlp->nlp_DID : 0,
4241 lpfc_cmd->cur_iocbq.sli4_xritag,
4242 wcqe->parameter,
4243 wcqe->total_data_placed,
4244 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4245 break;
4246 case IOSTAT_REMOTE_STOP:
4247 if (ndlp) {
4248
4249
4250
4251
4252 lpfc_set_rrq_active(phba, ndlp,
4253 lpfc_cmd->cur_iocbq.sli4_lxritag,
4254 0, 0);
4255 }
4256 fallthrough;
4257 case IOSTAT_LOCAL_REJECT:
4258 if (lpfc_cmd->result & IOERR_DRVR_MASK)
4259 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4260 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4261 lpfc_cmd->result ==
4262 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4263 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4264 lpfc_cmd->result ==
4265 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4266 cmd->result = DID_NO_CONNECT << 16;
4267 break;
4268 }
4269 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4270 lpfc_cmd->result == IOERR_LINK_DOWN ||
4271 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4272 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4273 lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
4274 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4275 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4276 break;
4277 }
4278 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4279 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4280 status == CQE_STATUS_DI_ERROR) {
4281 if (scsi_get_prot_op(cmd) !=
4282 SCSI_PROT_NORMAL) {
4283
4284
4285
4286
4287 lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
4288 break;
4289 } else {
4290 lpfc_printf_vlog(vport, KERN_WARNING,
4291 LOG_BG,
4292 "9040 non-zero BGSTAT "
4293 "on unprotected cmd\n");
4294 }
4295 }
4296 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4297 "9036 Local Reject FCP cmd x%x failed"
4298 " <%d/%lld> "
4299 "status: x%x result: x%x "
4300 "sid: x%x did: x%x oxid: x%x "
4301 "Data: x%x x%x x%x\n",
4302 cmd->cmnd[0],
4303 cmd->device ? cmd->device->id : 0xffff,
4304 cmd->device ? cmd->device->lun : 0xffff,
4305 lpfc_cmd->status, lpfc_cmd->result,
4306 vport->fc_myDID,
4307 (ndlp) ? ndlp->nlp_DID : 0,
4308 lpfc_cmd->cur_iocbq.sli4_xritag,
4309 wcqe->parameter,
4310 wcqe->total_data_placed,
4311 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4312 fallthrough;
4313 default:
4314 if (lpfc_cmd->status >= IOSTAT_CNT)
4315 lpfc_cmd->status = IOSTAT_DEFAULT;
4316 cmd->result = DID_ERROR << 16;
4317 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4318 "9037 FCP Completion Error: xri %x "
4319 "status x%x result x%x [x%x] "
4320 "placed x%x\n",
4321 lpfc_cmd->cur_iocbq.sli4_xritag,
4322 lpfc_cmd->status, lpfc_cmd->result,
4323 wcqe->parameter,
4324 wcqe->total_data_placed);
4325 }
4326 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4327 u32 *lp = (u32 *)cmd->sense_buffer;
4328
4329 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4330 "9039 Iodone <%d/%llu> cmd x%px, error "
4331 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
4332 cmd->device->id, cmd->device->lun, cmd,
4333 cmd->result, *lp, *(lp + 3),
4334 (u64)scsi_get_lba(cmd),
4335 cmd->retries, scsi_get_resid(cmd));
4336 }
4337
4338 lpfc_update_stats(vport, lpfc_cmd);
4339
4340 if (vport->cfg_max_scsicmpl_time &&
4341 time_after(jiffies, lpfc_cmd->start_time +
4342 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4343 spin_lock_irqsave(shost->host_lock, flags);
4344 if (ndlp) {
4345 if (ndlp->cmd_qdepth >
4346 atomic_read(&ndlp->cmd_pending) &&
4347 (atomic_read(&ndlp->cmd_pending) >
4348 LPFC_MIN_TGT_QDEPTH) &&
4349 (cmd->cmnd[0] == READ_10 ||
4350 cmd->cmnd[0] == WRITE_10))
4351 ndlp->cmd_qdepth =
4352 atomic_read(&ndlp->cmd_pending);
4353
4354 ndlp->last_change_time = jiffies;
4355 }
4356 spin_unlock_irqrestore(shost->host_lock, flags);
4357 }
4358 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4359
4360 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4361 if (lpfc_cmd->ts_cmd_start) {
4362 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4363 lpfc_cmd->ts_data_io = ktime_get_ns();
4364 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4365 lpfc_io_ktime(phba, lpfc_cmd);
4366 }
4367 #endif
4368 if (likely(!wait_xb_clr))
4369 lpfc_cmd->pCmd = NULL;
4370 spin_unlock(&lpfc_cmd->buf_lock);
4371
4372
4373 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
4374 cmd->sc_data_direction == DMA_FROM_DEVICE &&
4375 (scsi_sg_count(cmd))) {
4376
4377 lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
4378 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
4379 }
4380
4381 if (wait_xb_clr)
4382 goto out;
4383
4384
4385 scsi_done(cmd);
4386
4387
4388
4389
4390
4391 spin_lock(&lpfc_cmd->buf_lock);
4392 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4393 if (lpfc_cmd->waitq)
4394 wake_up(lpfc_cmd->waitq);
4395 spin_unlock(&lpfc_cmd->buf_lock);
4396 out:
4397 lpfc_release_scsi_buf(phba, lpfc_cmd);
4398 }
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410 static void
4411 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4412 struct lpfc_iocbq *pIocbOut)
4413 {
4414 struct lpfc_io_buf *lpfc_cmd =
4415 (struct lpfc_io_buf *) pIocbIn->io_buf;
4416 struct lpfc_vport *vport = pIocbIn->vport;
4417 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4418 struct lpfc_nodelist *pnode = rdata->pnode;
4419 struct scsi_cmnd *cmd;
4420 unsigned long flags;
4421 struct lpfc_fast_path_event *fast_path_evt;
4422 struct Scsi_Host *shost;
4423 int idx;
4424 uint32_t logit = LOG_FCP;
4425
4426
4427 spin_lock(&lpfc_cmd->buf_lock);
4428
4429
4430 cmd = lpfc_cmd->pCmd;
4431 if (!cmd || !phba) {
4432 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4433 "2621 IO completion: Not an active IO\n");
4434 spin_unlock(&lpfc_cmd->buf_lock);
4435 return;
4436 }
4437
4438 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4439 if (phba->sli4_hba.hdwq)
4440 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4441
4442 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4443 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4444 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4445 #endif
4446 shost = cmd->device->host;
4447
4448 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4449 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4450
4451 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4452 if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
4453 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4454
4455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4456 if (lpfc_cmd->prot_data_type) {
4457 struct scsi_dif_tuple *src = NULL;
4458
4459 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4460
4461
4462
4463
4464 switch (lpfc_cmd->prot_data_type) {
4465 case LPFC_INJERR_REFTAG:
4466 src->ref_tag =
4467 lpfc_cmd->prot_data;
4468 break;
4469 case LPFC_INJERR_APPTAG:
4470 src->app_tag =
4471 (uint16_t)lpfc_cmd->prot_data;
4472 break;
4473 case LPFC_INJERR_GUARD:
4474 src->guard_tag =
4475 (uint16_t)lpfc_cmd->prot_data;
4476 break;
4477 default:
4478 break;
4479 }
4480
4481 lpfc_cmd->prot_data = 0;
4482 lpfc_cmd->prot_data_type = 0;
4483 lpfc_cmd->prot_data_segment = NULL;
4484 }
4485 #endif
4486
4487 if (unlikely(lpfc_cmd->status)) {
4488 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4489 (lpfc_cmd->result & IOERR_DRVR_MASK))
4490 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4491 else if (lpfc_cmd->status >= IOSTAT_CNT)
4492 lpfc_cmd->status = IOSTAT_DEFAULT;
4493 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4494 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4495 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4496 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4497 logit = 0;
4498 else
4499 logit = LOG_FCP | LOG_FCP_UNDER;
4500 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4501 "9030 FCP cmd x%x failed <%d/%lld> "
4502 "status: x%x result: x%x "
4503 "sid: x%x did: x%x oxid: x%x "
4504 "Data: x%x x%x\n",
4505 cmd->cmnd[0],
4506 cmd->device ? cmd->device->id : 0xffff,
4507 cmd->device ? cmd->device->lun : 0xffff,
4508 lpfc_cmd->status, lpfc_cmd->result,
4509 vport->fc_myDID,
4510 (pnode) ? pnode->nlp_DID : 0,
4511 phba->sli_rev == LPFC_SLI_REV4 ?
4512 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4513 pIocbOut->iocb.ulpContext,
4514 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4515
4516 switch (lpfc_cmd->status) {
4517 case IOSTAT_FCP_RSP_ERROR:
4518
4519 lpfc_handle_fcp_err(vport, lpfc_cmd,
4520 pIocbOut->iocb.un.fcpi.fcpi_parm);
4521 break;
4522 case IOSTAT_NPORT_BSY:
4523 case IOSTAT_FABRIC_BSY:
4524 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4525 fast_path_evt = lpfc_alloc_fast_evt(phba);
4526 if (!fast_path_evt)
4527 break;
4528 fast_path_evt->un.fabric_evt.event_type =
4529 FC_REG_FABRIC_EVENT;
4530 fast_path_evt->un.fabric_evt.subcategory =
4531 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4532 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4533 if (pnode) {
4534 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4535 &pnode->nlp_portname,
4536 sizeof(struct lpfc_name));
4537 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4538 &pnode->nlp_nodename,
4539 sizeof(struct lpfc_name));
4540 }
4541 fast_path_evt->vport = vport;
4542 fast_path_evt->work_evt.evt =
4543 LPFC_EVT_FASTPATH_MGMT_EVT;
4544 spin_lock_irqsave(&phba->hbalock, flags);
4545 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4546 &phba->work_list);
4547 spin_unlock_irqrestore(&phba->hbalock, flags);
4548 lpfc_worker_wake_up(phba);
4549 break;
4550 case IOSTAT_LOCAL_REJECT:
4551 case IOSTAT_REMOTE_STOP:
4552 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4553 lpfc_cmd->result ==
4554 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4555 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4556 lpfc_cmd->result ==
4557 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4558 cmd->result = DID_NO_CONNECT << 16;
4559 break;
4560 }
4561 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4562 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4563 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4564 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4565 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4566 break;
4567 }
4568 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4569 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4570 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4571 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4572
4573
4574
4575
4576 lpfc_parse_bg_err(phba, lpfc_cmd,
4577 pIocbOut);
4578 break;
4579 } else {
4580 lpfc_printf_vlog(vport, KERN_WARNING,
4581 LOG_BG,
4582 "9031 non-zero BGSTAT "
4583 "on unprotected cmd\n");
4584 }
4585 }
4586 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4587 && (phba->sli_rev == LPFC_SLI_REV4)
4588 && pnode) {
4589
4590
4591
4592
4593 lpfc_set_rrq_active(phba, pnode,
4594 lpfc_cmd->cur_iocbq.sli4_lxritag,
4595 0, 0);
4596 }
4597 fallthrough;
4598 default:
4599 cmd->result = DID_ERROR << 16;
4600 break;
4601 }
4602
4603 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4604 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4605 SAM_STAT_BUSY;
4606 } else
4607 cmd->result = DID_OK << 16;
4608
4609 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4610 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4611
4612 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4613 "0710 Iodone <%d/%llu> cmd x%px, error "
4614 "x%x SNS x%x x%x Data: x%x x%x\n",
4615 cmd->device->id, cmd->device->lun, cmd,
4616 cmd->result, *lp, *(lp + 3), cmd->retries,
4617 scsi_get_resid(cmd));
4618 }
4619
4620 lpfc_update_stats(vport, lpfc_cmd);
4621 if (vport->cfg_max_scsicmpl_time &&
4622 time_after(jiffies, lpfc_cmd->start_time +
4623 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4624 spin_lock_irqsave(shost->host_lock, flags);
4625 if (pnode) {
4626 if (pnode->cmd_qdepth >
4627 atomic_read(&pnode->cmd_pending) &&
4628 (atomic_read(&pnode->cmd_pending) >
4629 LPFC_MIN_TGT_QDEPTH) &&
4630 ((cmd->cmnd[0] == READ_10) ||
4631 (cmd->cmnd[0] == WRITE_10)))
4632 pnode->cmd_qdepth =
4633 atomic_read(&pnode->cmd_pending);
4634
4635 pnode->last_change_time = jiffies;
4636 }
4637 spin_unlock_irqrestore(shost->host_lock, flags);
4638 }
4639 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4640
4641 lpfc_cmd->pCmd = NULL;
4642 spin_unlock(&lpfc_cmd->buf_lock);
4643
4644 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4645 if (lpfc_cmd->ts_cmd_start) {
4646 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4647 lpfc_cmd->ts_data_io = ktime_get_ns();
4648 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4649 lpfc_io_ktime(phba, lpfc_cmd);
4650 }
4651 #endif
4652
4653
4654 scsi_done(cmd);
4655
4656
4657
4658
4659
4660 spin_lock(&lpfc_cmd->buf_lock);
4661 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4662 if (lpfc_cmd->waitq)
4663 wake_up(lpfc_cmd->waitq);
4664 spin_unlock(&lpfc_cmd->buf_lock);
4665
4666 lpfc_release_scsi_buf(phba, lpfc_cmd);
4667 }
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4682 struct lpfc_io_buf *lpfc_cmd,
4683 uint8_t tmo)
4684 {
4685 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4686 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4687 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4688 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4689 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4690 int datadir = scsi_cmnd->sc_data_direction;
4691 u32 fcpdl;
4692
4693 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4694
4695
4696
4697
4698
4699
4700
4701 if (scsi_sg_count(scsi_cmnd)) {
4702 if (datadir == DMA_TO_DEVICE) {
4703 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4704 iocb_cmd->ulpPU = PARM_READ_CHECK;
4705 if (vport->cfg_first_burst_size &&
4706 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4707 u32 xrdy_len;
4708
4709 fcpdl = scsi_bufflen(scsi_cmnd);
4710 xrdy_len = min(fcpdl,
4711 vport->cfg_first_burst_size);
4712 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4713 }
4714 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4715 } else {
4716 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4717 iocb_cmd->ulpPU = PARM_READ_CHECK;
4718 fcp_cmnd->fcpCntl3 = READ_DATA;
4719 }
4720 } else {
4721 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4722 iocb_cmd->un.fcpi.fcpi_parm = 0;
4723 iocb_cmd->ulpPU = 0;
4724 fcp_cmnd->fcpCntl3 = 0;
4725 }
4726
4727
4728
4729
4730
4731 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4732 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4733 piocbq->iocb.ulpFCP2Rcvy = 1;
4734 else
4735 piocbq->iocb.ulpFCP2Rcvy = 0;
4736
4737 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4738 piocbq->io_buf = lpfc_cmd;
4739 if (!piocbq->cmd_cmpl)
4740 piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4741 piocbq->iocb.ulpTimeout = tmo;
4742 piocbq->vport = vport;
4743 return 0;
4744 }
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4759 struct lpfc_io_buf *lpfc_cmd,
4760 uint8_t tmo)
4761 {
4762 struct lpfc_hba *phba = vport->phba;
4763 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4764 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4765 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4766 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4767 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4768 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4769 u16 idx = lpfc_cmd->hdwq_no;
4770 int datadir = scsi_cmnd->sc_data_direction;
4771
4772 hdwq = &phba->sli4_hba.hdwq[idx];
4773
4774
4775 memset(wqe, 0, sizeof(union lpfc_wqe128));
4776
4777
4778
4779
4780
4781 if (scsi_sg_count(scsi_cmnd)) {
4782 if (datadir == DMA_TO_DEVICE) {
4783
4784 memcpy(&wqe->words[7],
4785 &lpfc_iwrite_cmd_template.words[7],
4786 sizeof(uint32_t) * 5);
4787
4788 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4789 if (hdwq)
4790 hdwq->scsi_cstat.output_requests++;
4791 } else {
4792
4793 memcpy(&wqe->words[7],
4794 &lpfc_iread_cmd_template.words[7],
4795 sizeof(uint32_t) * 5);
4796
4797
4798 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4799
4800 fcp_cmnd->fcpCntl3 = READ_DATA;
4801 if (hdwq)
4802 hdwq->scsi_cstat.input_requests++;
4803
4804
4805 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
4806 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
4807 LPFC_WQE_IOD_NONE);
4808 }
4809 } else {
4810
4811 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4812 sizeof(uint32_t) * 8);
4813
4814
4815 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4816
4817 fcp_cmnd->fcpCntl3 = 0;
4818 if (hdwq)
4819 hdwq->scsi_cstat.control_requests++;
4820 }
4821
4822
4823
4824
4825
4826
4827
4828 bf_set(payload_offset_len, &wqe->fcp_icmd,
4829 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4830
4831
4832 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4833 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4834 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4835
4836
4837 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4838 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4839
4840 bf_set(wqe_class, &wqe->generic.wqe_com,
4841 (pnode->nlp_fcp_info & 0x0f));
4842
4843
4844 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4845
4846
4847 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4848
4849 pwqeq->vport = vport;
4850 pwqeq->io_buf = lpfc_cmd;
4851 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4852 pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4853
4854 return 0;
4855 }
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866 static int
4867 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4868 struct lpfc_nodelist *pnode)
4869 {
4870 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4871 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4872 u8 *ptr;
4873
4874 if (!pnode)
4875 return 0;
4876
4877 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4878
4879 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4880
4881 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4882 &lpfc_cmd->fcp_cmnd->fcp_lun);
4883
4884 ptr = &fcp_cmnd->fcpCdb[0];
4885 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4886 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4887 ptr += scsi_cmnd->cmd_len;
4888 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4889 }
4890
4891 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4892
4893 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4894
4895 return 0;
4896 }
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912 static int
4913 lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
4914 struct lpfc_io_buf *lpfc_cmd,
4915 u64 lun, u8 task_mgmt_cmd)
4916 {
4917 struct lpfc_iocbq *piocbq;
4918 IOCB_t *piocb;
4919 struct fcp_cmnd *fcp_cmnd;
4920 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4921 struct lpfc_nodelist *ndlp = rdata->pnode;
4922
4923 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4924 return 0;
4925
4926 piocbq = &(lpfc_cmd->cur_iocbq);
4927 piocbq->vport = vport;
4928
4929 piocb = &piocbq->iocb;
4930
4931 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4932
4933 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4934 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4935 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4936 if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4937 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4938 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4939 piocb->ulpContext = ndlp->nlp_rpi;
4940 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4941 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4942 piocb->ulpPU = 0;
4943 piocb->un.fcpi.fcpi_parm = 0;
4944
4945
4946 if (lpfc_cmd->timeout > 0xff) {
4947
4948
4949
4950
4951 piocb->ulpTimeout = 0;
4952 } else
4953 piocb->ulpTimeout = lpfc_cmd->timeout;
4954
4955 return 1;
4956 }
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972 static int
4973 lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
4974 struct lpfc_io_buf *lpfc_cmd,
4975 u64 lun, u8 task_mgmt_cmd)
4976 {
4977 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4978 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4979 struct fcp_cmnd *fcp_cmnd;
4980 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4981 struct lpfc_nodelist *ndlp = rdata->pnode;
4982
4983 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4984 return 0;
4985
4986 pwqeq->vport = vport;
4987
4988 memset(wqe, 0, sizeof(union lpfc_wqe128));
4989
4990
4991 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4992 sizeof(uint32_t) * 8);
4993
4994 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4995
4996 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4997 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4998 fcp_cmnd->fcpCntl3 = 0;
4999 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
5000
5001 bf_set(payload_offset_len, &wqe->fcp_icmd,
5002 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
5003 bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
5004 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
5005 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
5006 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
5007 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
5008 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
5009 (ndlp->nlp_fcp_info & 0x0f));
5010
5011
5012 if (lpfc_cmd->timeout > 0xff) {
5013
5014
5015
5016
5017 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
5018 } else {
5019 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
5020 }
5021
5022 lpfc_prep_embed_io(vport->phba, lpfc_cmd);
5023 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
5024 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
5025 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
5026
5027 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
5028
5029 return 1;
5030 }
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041 int
5042 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5043 {
5044
5045 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
5046
5047 switch (dev_grp) {
5048 case LPFC_PCI_DEV_LP:
5049 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
5050 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
5051 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
5052 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
5053 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
5054 phba->lpfc_scsi_prep_task_mgmt_cmd =
5055 lpfc_scsi_prep_task_mgmt_cmd_s3;
5056 break;
5057 case LPFC_PCI_DEV_OC:
5058 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
5059 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
5060 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
5061 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
5062 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
5063 phba->lpfc_scsi_prep_task_mgmt_cmd =
5064 lpfc_scsi_prep_task_mgmt_cmd_s4;
5065 break;
5066 default:
5067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5068 "1418 Invalid HBA PCI-device group: 0x%x\n",
5069 dev_grp);
5070 return -ENODEV;
5071 }
5072 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
5073 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
5074 return 0;
5075 }
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086 static void
5087 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
5088 struct lpfc_iocbq *cmdiocbq,
5089 struct lpfc_iocbq *rspiocbq)
5090 {
5091 struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
5092 if (lpfc_cmd)
5093 lpfc_release_scsi_buf(phba, lpfc_cmd);
5094 return;
5095 }
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111 int
5112 lpfc_check_pci_resettable(struct lpfc_hba *phba)
5113 {
5114 const struct pci_dev *pdev = phba->pcidev;
5115 struct pci_dev *ptr = NULL;
5116 u8 counter = 0;
5117
5118
5119 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5120
5121 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5123 "8346 Non-Emulex vendor found: "
5124 "0x%04x\n", ptr->vendor);
5125 return -EBADSLT;
5126 }
5127
5128
5129 if (phba->sli_rev != LPFC_SLI_REV4 ||
5130 phba->hba_flag & HBA_FCOE_MODE) {
5131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5132 "8347 Incapable PCI reset device: "
5133 "0x%04x\n", ptr->device);
5134 return -EBADSLT;
5135 }
5136
5137
5138
5139
5140 if (ptr->devfn == 0) {
5141 if (++counter > 1) {
5142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5143 "8348 More than one device on "
5144 "secondary bus found\n");
5145 return -EBADSLT;
5146 }
5147 }
5148 }
5149
5150 return 0;
5151 }
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162 const char *
5163 lpfc_info(struct Scsi_Host *host)
5164 {
5165 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5166 struct lpfc_hba *phba = vport->phba;
5167 int link_speed = 0;
5168 static char lpfcinfobuf[384];
5169 char tmp[384] = {0};
5170
5171 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5172 if (phba && phba->pcidev){
5173
5174 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5175 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5176 sizeof(lpfcinfobuf))
5177 goto buffer_done;
5178
5179
5180 scnprintf(tmp, sizeof(tmp),
5181 " on PCI bus %02x device %02x irq %d",
5182 phba->pcidev->bus->number, phba->pcidev->devfn,
5183 phba->pcidev->irq);
5184 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5185 sizeof(lpfcinfobuf))
5186 goto buffer_done;
5187
5188
5189 if (phba->Port[0]) {
5190 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5191 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5192 sizeof(lpfcinfobuf))
5193 goto buffer_done;
5194 }
5195
5196
5197 link_speed = lpfc_sli_port_speed_get(phba);
5198 if (link_speed != 0) {
5199 scnprintf(tmp, sizeof(tmp),
5200 " Logical Link Speed: %d Mbps", link_speed);
5201 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5202 sizeof(lpfcinfobuf))
5203 goto buffer_done;
5204 }
5205
5206
5207 if (!lpfc_check_pci_resettable(phba)) {
5208 scnprintf(tmp, sizeof(tmp), " PCI resettable");
5209 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5210 }
5211 }
5212
5213 buffer_done:
5214 return lpfcinfobuf;
5215 }
5216
5217
5218
5219
5220
5221
5222
5223
5224 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5225 {
5226 unsigned long poll_tmo_expires =
5227 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5228
5229 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5230 mod_timer(&phba->fcp_poll_timer,
5231 poll_tmo_expires);
5232 }
5233
5234
5235
5236
5237
5238
5239
5240 void lpfc_poll_start_timer(struct lpfc_hba * phba)
5241 {
5242 lpfc_poll_rearm_timer(phba);
5243 }
5244
5245
5246
5247
5248
5249
5250
5251
5252 void lpfc_poll_timeout(struct timer_list *t)
5253 {
5254 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5255
5256 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5257 lpfc_sli_handle_fast_ring_event(phba,
5258 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5259
5260 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5261 lpfc_poll_rearm_timer(phba);
5262 }
5263 }
5264
5265
5266
5267
5268
5269
5270 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5271 {
5272 struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
5273
5274 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
5275 return NULL;
5276 return blkcg_get_fc_appid(bio);
5277 }
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292 static int
5293 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5294 {
5295 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5296 struct lpfc_hba *phba = vport->phba;
5297 struct lpfc_iocbq *cur_iocbq = NULL;
5298 struct lpfc_rport_data *rdata;
5299 struct lpfc_nodelist *ndlp;
5300 struct lpfc_io_buf *lpfc_cmd;
5301 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5302 int err, idx;
5303 u8 *uuid = NULL;
5304 uint64_t start;
5305
5306 start = ktime_get_ns();
5307 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5308
5309
5310 if (unlikely(!rdata) || unlikely(!rport))
5311 goto out_fail_command;
5312
5313 err = fc_remote_port_chkready(rport);
5314 if (err) {
5315 cmnd->result = err;
5316 goto out_fail_command;
5317 }
5318 ndlp = rdata->pnode;
5319
5320 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5321 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5322
5323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5324 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5325 " op:%02x str=%s without registering for"
5326 " BlockGuard - Rejecting command\n",
5327 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5328 dif_op_str[scsi_get_prot_op(cmnd)]);
5329 goto out_fail_command;
5330 }
5331
5332
5333
5334
5335
5336 if (!ndlp)
5337 goto out_tgt_busy1;
5338
5339
5340 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
5341 cmnd->sc_data_direction == DMA_FROM_DEVICE &&
5342 (scsi_sg_count(cmnd))) {
5343
5344 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
5345 if (err)
5346 goto out_tgt_busy1;
5347 }
5348
5349 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5350 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5351 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5352 "3377 Target Queue Full, scsi Id:%d "
5353 "Qdepth:%d Pending command:%d"
5354 " WWNN:%02x:%02x:%02x:%02x:"
5355 "%02x:%02x:%02x:%02x, "
5356 " WWPN:%02x:%02x:%02x:%02x:"
5357 "%02x:%02x:%02x:%02x",
5358 ndlp->nlp_sid, ndlp->cmd_qdepth,
5359 atomic_read(&ndlp->cmd_pending),
5360 ndlp->nlp_nodename.u.wwn[0],
5361 ndlp->nlp_nodename.u.wwn[1],
5362 ndlp->nlp_nodename.u.wwn[2],
5363 ndlp->nlp_nodename.u.wwn[3],
5364 ndlp->nlp_nodename.u.wwn[4],
5365 ndlp->nlp_nodename.u.wwn[5],
5366 ndlp->nlp_nodename.u.wwn[6],
5367 ndlp->nlp_nodename.u.wwn[7],
5368 ndlp->nlp_portname.u.wwn[0],
5369 ndlp->nlp_portname.u.wwn[1],
5370 ndlp->nlp_portname.u.wwn[2],
5371 ndlp->nlp_portname.u.wwn[3],
5372 ndlp->nlp_portname.u.wwn[4],
5373 ndlp->nlp_portname.u.wwn[5],
5374 ndlp->nlp_portname.u.wwn[6],
5375 ndlp->nlp_portname.u.wwn[7]);
5376 goto out_tgt_busy2;
5377 }
5378 }
5379
5380 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5381 if (lpfc_cmd == NULL) {
5382 lpfc_rampdown_queue_depth(phba);
5383
5384 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5385 "0707 driver's buffer pool is empty, "
5386 "IO busied\n");
5387 goto out_host_busy;
5388 }
5389 lpfc_cmd->rx_cmd_start = start;
5390
5391 cur_iocbq = &lpfc_cmd->cur_iocbq;
5392
5393
5394
5395
5396 lpfc_cmd->pCmd = cmnd;
5397 lpfc_cmd->rdata = rdata;
5398 lpfc_cmd->ndlp = ndlp;
5399 cur_iocbq->cmd_cmpl = NULL;
5400 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5401
5402 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5403 if (err)
5404 goto out_host_busy_release_buf;
5405
5406 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5407 if (vport->phba->cfg_enable_bg) {
5408 lpfc_printf_vlog(vport,
5409 KERN_INFO, LOG_SCSI_CMD,
5410 "9033 BLKGRD: rcvd %s cmd:x%x "
5411 "reftag x%x cnt %u pt %x\n",
5412 dif_op_str[scsi_get_prot_op(cmnd)],
5413 cmnd->cmnd[0],
5414 scsi_prot_ref_tag(cmnd),
5415 scsi_logical_block_count(cmnd),
5416 (cmnd->cmnd[1]>>5));
5417 }
5418 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5419 } else {
5420 if (vport->phba->cfg_enable_bg) {
5421 lpfc_printf_vlog(vport,
5422 KERN_INFO, LOG_SCSI_CMD,
5423 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5424 "x%x reftag x%x cnt %u pt %x\n",
5425 cmnd->cmnd[0],
5426 scsi_prot_ref_tag(cmnd),
5427 scsi_logical_block_count(cmnd),
5428 (cmnd->cmnd[1]>>5));
5429 }
5430 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5431 }
5432
5433 if (unlikely(err)) {
5434 if (err == 2) {
5435 cmnd->result = DID_ERROR << 16;
5436 goto out_fail_command_release_buf;
5437 }
5438 goto out_host_busy_free_buf;
5439 }
5440
5441
5442 if (lpfc_is_vmid_enabled(phba) &&
5443 (ndlp->vmid_support ||
5444 phba->pport->vmid_priority_tagging ==
5445 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5446
5447
5448 uuid = lpfc_is_command_vm_io(cmnd);
5449
5450 if (uuid) {
5451 err = lpfc_vmid_get_appid(vport, uuid,
5452 cmnd->sc_data_direction,
5453 (union lpfc_vmid_io_tag *)
5454 &cur_iocbq->vmid_tag);
5455 if (!err)
5456 cur_iocbq->cmd_flag |= LPFC_IO_VMID;
5457 }
5458 }
5459
5460 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5461 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5462 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5463 #endif
5464
5465 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
5466 SLI_IOCB_RET_IOCB);
5467 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5468 if (start) {
5469 lpfc_cmd->ts_cmd_start = start;
5470 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5471 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5472 } else {
5473 lpfc_cmd->ts_cmd_start = 0;
5474 }
5475 #endif
5476 if (err) {
5477 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5478 "3376 FCP could not issue iocb err %x "
5479 "FCP cmd x%x <%d/%llu> "
5480 "sid: x%x did: x%x oxid: x%x "
5481 "Data: x%x x%x x%x x%x\n",
5482 err, cmnd->cmnd[0],
5483 cmnd->device ? cmnd->device->id : 0xffff,
5484 cmnd->device ? cmnd->device->lun : (u64)-1,
5485 vport->fc_myDID, ndlp->nlp_DID,
5486 phba->sli_rev == LPFC_SLI_REV4 ?
5487 cur_iocbq->sli4_xritag : 0xffff,
5488 phba->sli_rev == LPFC_SLI_REV4 ?
5489 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5490 cur_iocbq->iocb.ulpContext,
5491 cur_iocbq->iotag,
5492 phba->sli_rev == LPFC_SLI_REV4 ?
5493 bf_get(wqe_tmo,
5494 &cur_iocbq->wqe.generic.wqe_com) :
5495 cur_iocbq->iocb.ulpTimeout,
5496 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
5497
5498 goto out_host_busy_free_buf;
5499 }
5500
5501 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5502 lpfc_sli_handle_fast_ring_event(phba,
5503 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5504
5505 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5506 lpfc_poll_rearm_timer(phba);
5507 }
5508
5509 if (phba->cfg_xri_rebalancing)
5510 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5511
5512 return 0;
5513
5514 out_host_busy_free_buf:
5515 idx = lpfc_cmd->hdwq_no;
5516 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5517 if (phba->sli4_hba.hdwq) {
5518 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5519 case WRITE_DATA:
5520 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5521 break;
5522 case READ_DATA:
5523 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5524 break;
5525 default:
5526 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5527 }
5528 }
5529 out_host_busy_release_buf:
5530 lpfc_release_scsi_buf(phba, lpfc_cmd);
5531 out_host_busy:
5532 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5533 shost);
5534 return SCSI_MLQUEUE_HOST_BUSY;
5535
5536 out_tgt_busy2:
5537 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5538 shost);
5539 out_tgt_busy1:
5540 return SCSI_MLQUEUE_TARGET_BUSY;
5541
5542 out_fail_command_release_buf:
5543 lpfc_release_scsi_buf(phba, lpfc_cmd);
5544 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5545 shost);
5546
5547 out_fail_command:
5548 scsi_done(cmnd);
5549 return 0;
5550 }
5551
5552
5553
5554
5555
5556 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5557 {
5558 u32 bucket;
5559 struct lpfc_vmid *cur;
5560
5561 if (vport->port_type == LPFC_PHYSICAL_PORT)
5562 del_timer_sync(&vport->phba->inactive_vmid_poll);
5563
5564 kfree(vport->qfpa_res);
5565 kfree(vport->vmid_priority.vmid_range);
5566 kfree(vport->vmid);
5567
5568 if (!hash_empty(vport->hash_table))
5569 hash_for_each(vport->hash_table, bucket, cur, hnode)
5570 hash_del(&cur->hnode);
5571
5572 vport->qfpa_res = NULL;
5573 vport->vmid_priority.vmid_range = NULL;
5574 vport->vmid = NULL;
5575 vport->cur_vmid_cnt = 0;
5576 }
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588 static int
5589 lpfc_abort_handler(struct scsi_cmnd *cmnd)
5590 {
5591 struct Scsi_Host *shost = cmnd->device->host;
5592 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5593 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5594 struct lpfc_hba *phba = vport->phba;
5595 struct lpfc_iocbq *iocb;
5596 struct lpfc_io_buf *lpfc_cmd;
5597 int ret = SUCCESS, status = 0;
5598 struct lpfc_sli_ring *pring_s4 = NULL;
5599 struct lpfc_sli_ring *pring = NULL;
5600 int ret_val;
5601 unsigned long flags;
5602 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5603
5604 status = fc_block_rport(rport);
5605 if (status != 0 && status != SUCCESS)
5606 return status;
5607
5608 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5609 if (!lpfc_cmd)
5610 return ret;
5611
5612
5613 spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
5614
5615 spin_lock(&phba->hbalock);
5616
5617 if (phba->hba_flag & HBA_IOQ_FLUSH) {
5618 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5619 "3168 SCSI Layer abort requested I/O has been "
5620 "flushed by LLD.\n");
5621 ret = FAILED;
5622 goto out_unlock_hba;
5623 }
5624
5625 if (!lpfc_cmd->pCmd) {
5626 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5627 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5628 "x%x ID %d LUN %llu\n",
5629 SUCCESS, cmnd->device->id, cmnd->device->lun);
5630 goto out_unlock_hba;
5631 }
5632
5633 iocb = &lpfc_cmd->cur_iocbq;
5634 if (phba->sli_rev == LPFC_SLI_REV4) {
5635 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5636 if (!pring_s4) {
5637 ret = FAILED;
5638 goto out_unlock_hba;
5639 }
5640 spin_lock(&pring_s4->ring_lock);
5641 }
5642
5643 if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
5644 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5645 "3169 SCSI Layer abort requested I/O has been "
5646 "cancelled by LLD.\n");
5647 ret = FAILED;
5648 goto out_unlock_ring;
5649 }
5650
5651
5652
5653
5654
5655
5656 if (lpfc_cmd->pCmd != cmnd) {
5657 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5658 "3170 SCSI Layer abort requested I/O has been "
5659 "completed by LLD.\n");
5660 goto out_unlock_ring;
5661 }
5662
5663 WARN_ON(iocb->io_buf != lpfc_cmd);
5664
5665
5666 if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
5667 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5668 "3389 SCSI Layer I/O Abort Request is pending\n");
5669 if (phba->sli_rev == LPFC_SLI_REV4)
5670 spin_unlock(&pring_s4->ring_lock);
5671 spin_unlock(&phba->hbalock);
5672 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5673 goto wait_for_cmpl;
5674 }
5675
5676 lpfc_cmd->waitq = &waitq;
5677 if (phba->sli_rev == LPFC_SLI_REV4) {
5678 spin_unlock(&pring_s4->ring_lock);
5679 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5680 lpfc_sli_abort_fcp_cmpl);
5681 } else {
5682 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5683 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5684 lpfc_sli_abort_fcp_cmpl);
5685 }
5686
5687
5688 lpfc_issue_hb_tmo(phba);
5689
5690 if (ret_val != IOCB_SUCCESS) {
5691
5692 lpfc_cmd->waitq = NULL;
5693 ret = FAILED;
5694 goto out_unlock_hba;
5695 }
5696
5697
5698 spin_unlock(&phba->hbalock);
5699 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5700
5701 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5702 lpfc_sli_handle_fast_ring_event(phba,
5703 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5704
5705 wait_for_cmpl:
5706
5707
5708
5709
5710 wait_event_timeout(waitq,
5711 (lpfc_cmd->pCmd != cmnd),
5712 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5713
5714 spin_lock(&lpfc_cmd->buf_lock);
5715
5716 if (lpfc_cmd->pCmd == cmnd) {
5717 ret = FAILED;
5718 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5719 "0748 abort handler timed out waiting "
5720 "for aborting I/O (xri:x%x) to complete: "
5721 "ret %#x, ID %d, LUN %llu\n",
5722 iocb->sli4_xritag, ret,
5723 cmnd->device->id, cmnd->device->lun);
5724 }
5725
5726 lpfc_cmd->waitq = NULL;
5727
5728 spin_unlock(&lpfc_cmd->buf_lock);
5729 goto out;
5730
5731 out_unlock_ring:
5732 if (phba->sli_rev == LPFC_SLI_REV4)
5733 spin_unlock(&pring_s4->ring_lock);
5734 out_unlock_hba:
5735 spin_unlock(&phba->hbalock);
5736 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5737 out:
5738 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5739 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5740 "LUN %llu\n", ret, cmnd->device->id,
5741 cmnd->device->lun);
5742 return ret;
5743 }
5744
5745 static char *
5746 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
5747 {
5748 switch (task_mgmt_cmd) {
5749 case FCP_ABORT_TASK_SET:
5750 return "ABORT_TASK_SET";
5751 case FCP_CLEAR_TASK_SET:
5752 return "FCP_CLEAR_TASK_SET";
5753 case FCP_BUS_RESET:
5754 return "FCP_BUS_RESET";
5755 case FCP_LUN_RESET:
5756 return "FCP_LUN_RESET";
5757 case FCP_TARGET_RESET:
5758 return "FCP_TARGET_RESET";
5759 case FCP_CLEAR_ACA:
5760 return "FCP_CLEAR_ACA";
5761 case FCP_TERMINATE_TASK:
5762 return "FCP_TERMINATE_TASK";
5763 default:
5764 return "unknown";
5765 }
5766 }
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780 static int
5781 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
5782 {
5783 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
5784 uint32_t rsp_info;
5785 uint32_t rsp_len;
5786 uint8_t rsp_info_code;
5787 int ret = FAILED;
5788
5789
5790 if (fcprsp == NULL)
5791 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5792 "0703 fcp_rsp is missing\n");
5793 else {
5794 rsp_info = fcprsp->rspStatus2;
5795 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5796 rsp_info_code = fcprsp->rspInfo3;
5797
5798
5799 lpfc_printf_vlog(vport, KERN_INFO,
5800 LOG_FCP,
5801 "0706 fcp_rsp valid 0x%x,"
5802 " rsp len=%d code 0x%x\n",
5803 rsp_info,
5804 rsp_len, rsp_info_code);
5805
5806
5807
5808
5809
5810 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
5811 ((rsp_len == 8) || (rsp_len == 4))) {
5812 switch (rsp_info_code) {
5813 case RSP_NO_FAILURE:
5814 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5815 "0715 Task Mgmt No Failure\n");
5816 ret = SUCCESS;
5817 break;
5818 case RSP_TM_NOT_SUPPORTED:
5819 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5820 "0716 Task Mgmt Target "
5821 "reject\n");
5822 break;
5823 case RSP_TM_NOT_COMPLETED:
5824 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5825 "0717 Task Mgmt Target "
5826 "failed TM\n");
5827 break;
5828 case RSP_TM_INVALID_LU:
5829 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5830 "0718 Task Mgmt to invalid "
5831 "LUN\n");
5832 break;
5833 }
5834 }
5835 }
5836 return ret;
5837 }
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855 static int
5856 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
5857 unsigned int tgt_id, uint64_t lun_id,
5858 uint8_t task_mgmt_cmd)
5859 {
5860 struct lpfc_hba *phba = vport->phba;
5861 struct lpfc_io_buf *lpfc_cmd;
5862 struct lpfc_iocbq *iocbq;
5863 struct lpfc_iocbq *iocbqrsp;
5864 struct lpfc_rport_data *rdata;
5865 struct lpfc_nodelist *pnode;
5866 int ret;
5867 int status;
5868
5869 rdata = rport->dd_data;
5870 if (!rdata || !rdata->pnode)
5871 return FAILED;
5872 pnode = rdata->pnode;
5873
5874 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
5875 if (lpfc_cmd == NULL)
5876 return FAILED;
5877 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5878 lpfc_cmd->rdata = rdata;
5879 lpfc_cmd->pCmd = NULL;
5880 lpfc_cmd->ndlp = pnode;
5881
5882 status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5883 task_mgmt_cmd);
5884 if (!status) {
5885 lpfc_release_scsi_buf(phba, lpfc_cmd);
5886 return FAILED;
5887 }
5888
5889 iocbq = &lpfc_cmd->cur_iocbq;
5890 iocbqrsp = lpfc_sli_get_iocbq(phba);
5891 if (iocbqrsp == NULL) {
5892 lpfc_release_scsi_buf(phba, lpfc_cmd);
5893 return FAILED;
5894 }
5895 iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
5896 iocbq->vport = vport;
5897
5898 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5899 "0702 Issue %s to TGT %d LUN %llu "
5900 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5901 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5902 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5903 iocbq->cmd_flag);
5904
5905 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5906 iocbq, iocbqrsp, lpfc_cmd->timeout);
5907 if ((status != IOCB_SUCCESS) ||
5908 (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
5909 if (status != IOCB_SUCCESS ||
5910 get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
5911 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5912 "0727 TMF %s to TGT %d LUN %llu "
5913 "failed (%d, %d) cmd_flag x%x\n",
5914 lpfc_taskmgmt_name(task_mgmt_cmd),
5915 tgt_id, lun_id,
5916 get_job_ulpstatus(phba, iocbqrsp),
5917 get_job_word4(phba, iocbqrsp),
5918 iocbq->cmd_flag);
5919
5920 if (status == IOCB_SUCCESS) {
5921 if (get_job_ulpstatus(phba, iocbqrsp) ==
5922 IOSTAT_FCP_RSP_ERROR)
5923
5924
5925 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5926 else
5927 ret = FAILED;
5928 } else if ((status == IOCB_TIMEDOUT) ||
5929 (status == IOCB_ABORTED)) {
5930 ret = TIMEOUT_ERROR;
5931 } else {
5932 ret = FAILED;
5933 }
5934 } else
5935 ret = SUCCESS;
5936
5937 lpfc_sli_release_iocbq(phba, iocbqrsp);
5938
5939 if (status != IOCB_TIMEDOUT)
5940 lpfc_release_scsi_buf(phba, lpfc_cmd);
5941
5942 return ret;
5943 }
5944
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957 static int
5958 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
5959 {
5960 struct lpfc_rport_data *rdata;
5961 struct lpfc_nodelist *pnode = NULL;
5962 unsigned long later;
5963
5964 rdata = rport->dd_data;
5965 if (!rdata) {
5966 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5967 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5968 return FAILED;
5969 }
5970 pnode = rdata->pnode;
5971
5972
5973
5974
5975
5976 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5977 while (time_after(later, jiffies)) {
5978 if (!pnode)
5979 return FAILED;
5980 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5981 return SUCCESS;
5982 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5983 rdata = rport->dd_data;
5984 if (!rdata)
5985 return FAILED;
5986 pnode = rdata->pnode;
5987 }
5988 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5989 return FAILED;
5990 return SUCCESS;
5991 }
5992
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009 static int
6010 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
6011 uint64_t lun_id, lpfc_ctx_cmd context)
6012 {
6013 struct lpfc_hba *phba = vport->phba;
6014 unsigned long later;
6015 int cnt;
6016
6017 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6018 if (cnt)
6019 lpfc_sli_abort_taskmgmt(vport,
6020 &phba->sli.sli3_ring[LPFC_FCP_RING],
6021 tgt_id, lun_id, context);
6022 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6023 while (time_after(later, jiffies) && cnt) {
6024 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
6025 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6026 }
6027 if (cnt) {
6028 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6029 "0724 I/O flush failure for context %s : cnt x%x\n",
6030 ((context == LPFC_CTX_LUN) ? "LUN" :
6031 ((context == LPFC_CTX_TGT) ? "TGT" :
6032 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
6033 cnt);
6034 return FAILED;
6035 }
6036 return SUCCESS;
6037 }
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047
6048
6049
6050 static int
6051 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
6052 {
6053 struct Scsi_Host *shost = cmnd->device->host;
6054 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
6055 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6056 struct lpfc_rport_data *rdata;
6057 struct lpfc_nodelist *pnode;
6058 unsigned tgt_id = cmnd->device->id;
6059 uint64_t lun_id = cmnd->device->lun;
6060 struct lpfc_scsi_event_header scsi_event;
6061 int status;
6062 u32 logit = LOG_FCP;
6063
6064 if (!rport)
6065 return FAILED;
6066
6067 rdata = rport->dd_data;
6068 if (!rdata || !rdata->pnode) {
6069 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6070 "0798 Device Reset rdata failure: rdata x%px\n",
6071 rdata);
6072 return FAILED;
6073 }
6074 pnode = rdata->pnode;
6075 status = fc_block_rport(rport);
6076 if (status != 0 && status != SUCCESS)
6077 return status;
6078
6079 status = lpfc_chk_tgt_mapped(vport, rport);
6080 if (status == FAILED) {
6081 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6082 "0721 Device Reset rport failure: rdata x%px\n", rdata);
6083 return FAILED;
6084 }
6085
6086 scsi_event.event_type = FC_REG_SCSI_EVENT;
6087 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6088 scsi_event.lun = lun_id;
6089 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6090 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6091
6092 fc_host_post_vendor_event(shost, fc_get_event_number(),
6093 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6094
6095 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6096 FCP_LUN_RESET);
6097 if (status != SUCCESS)
6098 logit = LOG_TRACE_EVENT;
6099
6100 lpfc_printf_vlog(vport, KERN_ERR, logit,
6101 "0713 SCSI layer issued Device Reset (%d, %llu) "
6102 "return x%x\n", tgt_id, lun_id, status);
6103
6104
6105
6106
6107
6108
6109
6110 if (status == SUCCESS)
6111 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6112 LPFC_CTX_LUN);
6113
6114 return status;
6115 }
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128 static int
6129 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6130 {
6131 struct Scsi_Host *shost = cmnd->device->host;
6132 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
6133 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6134 struct lpfc_rport_data *rdata;
6135 struct lpfc_nodelist *pnode;
6136 unsigned tgt_id = cmnd->device->id;
6137 uint64_t lun_id = cmnd->device->lun;
6138 struct lpfc_scsi_event_header scsi_event;
6139 int status;
6140 u32 logit = LOG_FCP;
6141 u32 dev_loss_tmo = vport->cfg_devloss_tmo;
6142 unsigned long flags;
6143 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
6144
6145 if (!rport)
6146 return FAILED;
6147
6148 rdata = rport->dd_data;
6149 if (!rdata || !rdata->pnode) {
6150 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6151 "0799 Target Reset rdata failure: rdata x%px\n",
6152 rdata);
6153 return FAILED;
6154 }
6155 pnode = rdata->pnode;
6156 status = fc_block_rport(rport);
6157 if (status != 0 && status != SUCCESS)
6158 return status;
6159
6160 status = lpfc_chk_tgt_mapped(vport, rport);
6161 if (status == FAILED) {
6162 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6163 "0722 Target Reset rport failure: rdata x%px\n", rdata);
6164 if (pnode) {
6165 spin_lock_irqsave(&pnode->lock, flags);
6166 pnode->nlp_flag &= ~NLP_NPR_ADISC;
6167 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6168 spin_unlock_irqrestore(&pnode->lock, flags);
6169 }
6170 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6171 LPFC_CTX_TGT);
6172 return FAST_IO_FAIL;
6173 }
6174
6175 scsi_event.event_type = FC_REG_SCSI_EVENT;
6176 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6177 scsi_event.lun = 0;
6178 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6179 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6180
6181 fc_host_post_vendor_event(shost, fc_get_event_number(),
6182 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6183
6184 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6185 FCP_TARGET_RESET);
6186 if (status != SUCCESS) {
6187 logit = LOG_TRACE_EVENT;
6188
6189
6190 spin_lock_irqsave(&pnode->lock, flags);
6191 if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
6192 !pnode->logo_waitq) {
6193 pnode->logo_waitq = &waitq;
6194 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6195 pnode->nlp_flag |= NLP_ISSUE_LOGO;
6196 pnode->save_flags |= NLP_WAIT_FOR_LOGO;
6197 spin_unlock_irqrestore(&pnode->lock, flags);
6198 lpfc_unreg_rpi(vport, pnode);
6199 wait_event_timeout(waitq,
6200 (!(pnode->save_flags &
6201 NLP_WAIT_FOR_LOGO)),
6202 msecs_to_jiffies(dev_loss_tmo *
6203 1000));
6204
6205 if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
6206 lpfc_printf_vlog(vport, KERN_ERR, logit,
6207 "0725 SCSI layer TGTRST "
6208 "failed & LOGO TMO (%d, %llu) "
6209 "return x%x\n",
6210 tgt_id, lun_id, status);
6211 spin_lock_irqsave(&pnode->lock, flags);
6212 pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
6213 } else {
6214 spin_lock_irqsave(&pnode->lock, flags);
6215 }
6216 pnode->logo_waitq = NULL;
6217 spin_unlock_irqrestore(&pnode->lock, flags);
6218 status = SUCCESS;
6219
6220 } else {
6221 spin_unlock_irqrestore(&pnode->lock, flags);
6222 status = FAILED;
6223 }
6224 }
6225
6226 lpfc_printf_vlog(vport, KERN_ERR, logit,
6227 "0723 SCSI layer issued Target Reset (%d, %llu) "
6228 "return x%x\n", tgt_id, lun_id, status);
6229
6230
6231
6232
6233
6234
6235
6236 if (status == SUCCESS)
6237 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6238 LPFC_CTX_TGT);
6239 return status;
6240 }
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258 static int
6259 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6260 {
6261 struct Scsi_Host *shost = cmnd->device->host;
6262 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6263 struct lpfc_hba *phba = vport->phba;
6264 int rc, ret = SUCCESS;
6265
6266 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6267 "3172 SCSI layer issued Host Reset Data:\n");
6268
6269 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6270 lpfc_offline(phba);
6271 rc = lpfc_sli_brdrestart(phba);
6272 if (rc)
6273 goto error;
6274
6275
6276 if (phba->sli_rev < LPFC_SLI_REV4) {
6277 rc = lpfc_sli_chipset_init(phba);
6278 if (rc)
6279 goto error;
6280 }
6281
6282 rc = lpfc_online(phba);
6283 if (rc)
6284 goto error;
6285
6286 lpfc_unblock_mgmt_io(phba);
6287
6288 return ret;
6289 error:
6290 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6291 "3323 Failed host reset\n");
6292 lpfc_unblock_mgmt_io(phba);
6293 return FAILED;
6294 }
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309 static int
6310 lpfc_slave_alloc(struct scsi_device *sdev)
6311 {
6312 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6313 struct lpfc_hba *phba = vport->phba;
6314 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6315 uint32_t total = 0;
6316 uint32_t num_to_alloc = 0;
6317 int num_allocated = 0;
6318 uint32_t sdev_cnt;
6319 struct lpfc_device_data *device_data;
6320 unsigned long flags;
6321 struct lpfc_name target_wwpn;
6322
6323 if (!rport || fc_remote_port_chkready(rport))
6324 return -ENXIO;
6325
6326 if (phba->cfg_fof) {
6327
6328
6329
6330
6331
6332
6333 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6334 spin_lock_irqsave(&phba->devicelock, flags);
6335 device_data = __lpfc_get_device_data(phba,
6336 &phba->luns,
6337 &vport->fc_portname,
6338 &target_wwpn,
6339 sdev->lun);
6340 if (!device_data) {
6341 spin_unlock_irqrestore(&phba->devicelock, flags);
6342 device_data = lpfc_create_device_data(phba,
6343 &vport->fc_portname,
6344 &target_wwpn,
6345 sdev->lun,
6346 phba->cfg_XLanePriority,
6347 true);
6348 if (!device_data)
6349 return -ENOMEM;
6350 spin_lock_irqsave(&phba->devicelock, flags);
6351 list_add_tail(&device_data->listentry, &phba->luns);
6352 }
6353 device_data->rport_data = rport->dd_data;
6354 device_data->available = true;
6355 spin_unlock_irqrestore(&phba->devicelock, flags);
6356 sdev->hostdata = device_data;
6357 } else {
6358 sdev->hostdata = rport->dd_data;
6359 }
6360 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6361
6362
6363 if (phba->sli_rev == LPFC_SLI_REV4)
6364 return 0;
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375 total = phba->total_scsi_bufs;
6376 num_to_alloc = vport->cfg_lun_queue_depth + 2;
6377
6378
6379 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6380 return 0;
6381
6382
6383 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6384 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6385 "0704 At limitation of %d preallocated "
6386 "command buffers\n", total);
6387 return 0;
6388
6389 } else if (total + num_to_alloc >
6390 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6391 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6392 "0705 Allocation request of %d "
6393 "command buffers will exceed max of %d. "
6394 "Reducing allocation request to %d.\n",
6395 num_to_alloc, phba->cfg_hba_queue_depth,
6396 (phba->cfg_hba_queue_depth - total));
6397 num_to_alloc = phba->cfg_hba_queue_depth - total;
6398 }
6399 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6400 if (num_to_alloc != num_allocated) {
6401 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6402 "0708 Allocation request of %d "
6403 "command buffers did not succeed. "
6404 "Allocated %d buffers.\n",
6405 num_to_alloc, num_allocated);
6406 }
6407 if (num_allocated > 0)
6408 phba->total_scsi_bufs += num_allocated;
6409 return 0;
6410 }
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421
6422
6423 static int
6424 lpfc_slave_configure(struct scsi_device *sdev)
6425 {
6426 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6427 struct lpfc_hba *phba = vport->phba;
6428
6429 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6430
6431 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6432 lpfc_sli_handle_fast_ring_event(phba,
6433 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6434 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6435 lpfc_poll_rearm_timer(phba);
6436 }
6437
6438 return 0;
6439 }
6440
6441
6442
6443
6444
6445
6446
6447 static void
6448 lpfc_slave_destroy(struct scsi_device *sdev)
6449 {
6450 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6451 struct lpfc_hba *phba = vport->phba;
6452 unsigned long flags;
6453 struct lpfc_device_data *device_data = sdev->hostdata;
6454
6455 atomic_dec(&phba->sdev_cnt);
6456 if ((phba->cfg_fof) && (device_data)) {
6457 spin_lock_irqsave(&phba->devicelock, flags);
6458 device_data->available = false;
6459 if (!device_data->oas_enabled)
6460 lpfc_delete_device_data(phba, device_data);
6461 spin_unlock_irqrestore(&phba->devicelock, flags);
6462 }
6463 sdev->hostdata = NULL;
6464 return;
6465 }
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486 struct lpfc_device_data*
6487 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6488 struct lpfc_name *target_wwpn, uint64_t lun,
6489 uint32_t pri, bool atomic_create)
6490 {
6491
6492 struct lpfc_device_data *lun_info;
6493 int memory_flags;
6494
6495 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6496 !(phba->cfg_fof))
6497 return NULL;
6498
6499
6500
6501 if (atomic_create)
6502 memory_flags = GFP_ATOMIC;
6503 else
6504 memory_flags = GFP_KERNEL;
6505 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6506 if (!lun_info)
6507 return NULL;
6508 INIT_LIST_HEAD(&lun_info->listentry);
6509 lun_info->rport_data = NULL;
6510 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6511 sizeof(struct lpfc_name));
6512 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6513 sizeof(struct lpfc_name));
6514 lun_info->device_id.lun = lun;
6515 lun_info->oas_enabled = false;
6516 lun_info->priority = pri;
6517 lun_info->available = false;
6518 return lun_info;
6519 }
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529 void
6530 lpfc_delete_device_data(struct lpfc_hba *phba,
6531 struct lpfc_device_data *lun_info)
6532 {
6533
6534 if (unlikely(!phba) || !lun_info ||
6535 !(phba->cfg_fof))
6536 return;
6537
6538 if (!list_empty(&lun_info->listentry))
6539 list_del(&lun_info->listentry);
6540 mempool_free(lun_info, phba->device_data_mem_pool);
6541 return;
6542 }
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560 struct lpfc_device_data*
6561 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6562 struct lpfc_name *vport_wwpn,
6563 struct lpfc_name *target_wwpn, uint64_t lun)
6564 {
6565
6566 struct lpfc_device_data *lun_info;
6567
6568 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6569 !phba->cfg_fof)
6570 return NULL;
6571
6572
6573
6574 list_for_each_entry(lun_info, list, listentry) {
6575 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6576 sizeof(struct lpfc_name)) == 0) &&
6577 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6578 sizeof(struct lpfc_name)) == 0) &&
6579 (lun_info->device_id.lun == lun))
6580 return lun_info;
6581 }
6582
6583 return NULL;
6584 }
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613 bool
6614 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6615 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6616 struct lpfc_name *found_vport_wwpn,
6617 struct lpfc_name *found_target_wwpn,
6618 uint64_t *found_lun,
6619 uint32_t *found_lun_status,
6620 uint32_t *found_lun_pri)
6621 {
6622
6623 unsigned long flags;
6624 struct lpfc_device_data *lun_info;
6625 struct lpfc_device_id *device_id;
6626 uint64_t lun;
6627 bool found = false;
6628
6629 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6630 !starting_lun || !found_vport_wwpn ||
6631 !found_target_wwpn || !found_lun || !found_lun_status ||
6632 (*starting_lun == NO_MORE_OAS_LUN) ||
6633 !phba->cfg_fof)
6634 return false;
6635
6636 lun = *starting_lun;
6637 *found_lun = NO_MORE_OAS_LUN;
6638 *starting_lun = NO_MORE_OAS_LUN;
6639
6640
6641
6642 spin_lock_irqsave(&phba->devicelock, flags);
6643 list_for_each_entry(lun_info, &phba->luns, listentry) {
6644 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6645 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6646 sizeof(struct lpfc_name)) == 0)) &&
6647 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6648 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6649 sizeof(struct lpfc_name)) == 0)) &&
6650 (lun_info->oas_enabled)) {
6651 device_id = &lun_info->device_id;
6652 if ((!found) &&
6653 ((lun == FIND_FIRST_OAS_LUN) ||
6654 (device_id->lun == lun))) {
6655 *found_lun = device_id->lun;
6656 memcpy(found_vport_wwpn,
6657 &device_id->vport_wwpn,
6658 sizeof(struct lpfc_name));
6659 memcpy(found_target_wwpn,
6660 &device_id->target_wwpn,
6661 sizeof(struct lpfc_name));
6662 if (lun_info->available)
6663 *found_lun_status =
6664 OAS_LUN_STATUS_EXISTS;
6665 else
6666 *found_lun_status = 0;
6667 *found_lun_pri = lun_info->priority;
6668 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6669 memset(vport_wwpn, 0x0,
6670 sizeof(struct lpfc_name));
6671 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6672 memset(target_wwpn, 0x0,
6673 sizeof(struct lpfc_name));
6674 found = true;
6675 } else if (found) {
6676 *starting_lun = device_id->lun;
6677 memcpy(vport_wwpn, &device_id->vport_wwpn,
6678 sizeof(struct lpfc_name));
6679 memcpy(target_wwpn, &device_id->target_wwpn,
6680 sizeof(struct lpfc_name));
6681 break;
6682 }
6683 }
6684 }
6685 spin_unlock_irqrestore(&phba->devicelock, flags);
6686 return found;
6687 }
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704
6705
6706
6707
6708
6709
6710 bool
6711 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6712 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6713 {
6714
6715 struct lpfc_device_data *lun_info;
6716 unsigned long flags;
6717
6718 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6719 !phba->cfg_fof)
6720 return false;
6721
6722 spin_lock_irqsave(&phba->devicelock, flags);
6723
6724
6725 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6726 target_wwpn, lun);
6727 if (lun_info) {
6728 if (!lun_info->oas_enabled)
6729 lun_info->oas_enabled = true;
6730 lun_info->priority = pri;
6731 spin_unlock_irqrestore(&phba->devicelock, flags);
6732 return true;
6733 }
6734
6735
6736 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6737 pri, true);
6738 if (lun_info) {
6739 lun_info->oas_enabled = true;
6740 lun_info->priority = pri;
6741 lun_info->available = false;
6742 list_add_tail(&lun_info->listentry, &phba->luns);
6743 spin_unlock_irqrestore(&phba->devicelock, flags);
6744 return true;
6745 }
6746 spin_unlock_irqrestore(&phba->devicelock, flags);
6747 return false;
6748 }
6749
6750
6751
6752
6753
6754
6755
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770 bool
6771 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6772 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6773 {
6774
6775 struct lpfc_device_data *lun_info;
6776 unsigned long flags;
6777
6778 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6779 !phba->cfg_fof)
6780 return false;
6781
6782 spin_lock_irqsave(&phba->devicelock, flags);
6783
6784
6785 lun_info = __lpfc_get_device_data(phba,
6786 &phba->luns, vport_wwpn,
6787 target_wwpn, lun);
6788 if (lun_info) {
6789 lun_info->oas_enabled = false;
6790 lun_info->priority = pri;
6791 if (!lun_info->available)
6792 lpfc_delete_device_data(phba, lun_info);
6793 spin_unlock_irqrestore(&phba->devicelock, flags);
6794 return true;
6795 }
6796
6797 spin_unlock_irqrestore(&phba->devicelock, flags);
6798 return false;
6799 }
6800
6801 static int
6802 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
6803 {
6804 return SCSI_MLQUEUE_HOST_BUSY;
6805 }
6806
6807 static int
6808 lpfc_no_slave(struct scsi_device *sdev)
6809 {
6810 return -ENODEV;
6811 }
6812
6813 struct scsi_host_template lpfc_template_nvme = {
6814 .module = THIS_MODULE,
6815 .name = LPFC_DRIVER_NAME,
6816 .proc_name = LPFC_DRIVER_NAME,
6817 .info = lpfc_info,
6818 .queuecommand = lpfc_no_command,
6819 .slave_alloc = lpfc_no_slave,
6820 .slave_configure = lpfc_no_slave,
6821 .scan_finished = lpfc_scan_finished,
6822 .this_id = -1,
6823 .sg_tablesize = 1,
6824 .cmd_per_lun = 1,
6825 .shost_groups = lpfc_hba_groups,
6826 .max_sectors = 0xFFFFFFFF,
6827 .vendor_id = LPFC_NL_VENDOR_ID,
6828 .track_queue_depth = 0,
6829 };
6830
6831 struct scsi_host_template lpfc_template = {
6832 .module = THIS_MODULE,
6833 .name = LPFC_DRIVER_NAME,
6834 .proc_name = LPFC_DRIVER_NAME,
6835 .info = lpfc_info,
6836 .queuecommand = lpfc_queuecommand,
6837 .eh_timed_out = fc_eh_timed_out,
6838 .eh_should_retry_cmd = fc_eh_should_retry_cmd,
6839 .eh_abort_handler = lpfc_abort_handler,
6840 .eh_device_reset_handler = lpfc_device_reset_handler,
6841 .eh_target_reset_handler = lpfc_target_reset_handler,
6842 .eh_host_reset_handler = lpfc_host_reset_handler,
6843 .slave_alloc = lpfc_slave_alloc,
6844 .slave_configure = lpfc_slave_configure,
6845 .slave_destroy = lpfc_slave_destroy,
6846 .scan_finished = lpfc_scan_finished,
6847 .this_id = -1,
6848 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6849 .cmd_per_lun = LPFC_CMD_PER_LUN,
6850 .shost_groups = lpfc_hba_groups,
6851 .max_sectors = 0xFFFFFFFF,
6852 .vendor_id = LPFC_NL_VENDOR_ID,
6853 .change_queue_depth = scsi_change_queue_depth,
6854 .track_queue_depth = 1,
6855 };