0001
0002
0003
0004
0005
0006 #include <linux/mempool.h>
0007 #include <linux/errno.h>
0008 #include <linux/init.h>
0009 #include <linux/workqueue.h>
0010 #include <linux/pci.h>
0011 #include <linux/scatterlist.h>
0012 #include <linux/skbuff.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/etherdevice.h>
0015 #include <linux/if_ether.h>
0016 #include <linux/if_vlan.h>
0017 #include <linux/delay.h>
0018 #include <linux/gfp.h>
0019 #include <scsi/scsi.h>
0020 #include <scsi/scsi_host.h>
0021 #include <scsi/scsi_device.h>
0022 #include <scsi/scsi_cmnd.h>
0023 #include <scsi/scsi_tcq.h>
0024 #include <scsi/fc/fc_els.h>
0025 #include <scsi/fc/fc_fcoe.h>
0026 #include <scsi/libfc.h>
0027 #include <scsi/fc_frame.h>
0028 #include "fnic_io.h"
0029 #include "fnic.h"
0030
0031 const char *fnic_state_str[] = {
0032 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
0033 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
0034 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
0035 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
0036 };
0037
0038 static const char *fnic_ioreq_state_str[] = {
0039 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
0040 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
0041 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
0042 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
0043 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
0044 };
0045
0046 static const char *fcpio_status_str[] = {
0047 [FCPIO_SUCCESS] = "FCPIO_SUCCESS",
0048 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
0049 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
0050 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
0051 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
0052 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
0053 [FCPIO_ABORTED] = "FCPIO_ABORTED",
0054 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
0055 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
0056 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
0057 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
0058 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
0059 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
0060 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
0061 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
0062 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
0063 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
0064 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
0065 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
0066 };
0067
0068 const char *fnic_state_to_str(unsigned int state)
0069 {
0070 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
0071 return "unknown";
0072
0073 return fnic_state_str[state];
0074 }
0075
0076 static const char *fnic_ioreq_state_to_str(unsigned int state)
0077 {
0078 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
0079 !fnic_ioreq_state_str[state])
0080 return "unknown";
0081
0082 return fnic_ioreq_state_str[state];
0083 }
0084
0085 static const char *fnic_fcpio_status_to_str(unsigned int status)
0086 {
0087 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
0088 return "unknown";
0089
0090 return fcpio_status_str[status];
0091 }
0092
0093 static void fnic_cleanup_io(struct fnic *fnic);
0094
0095 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
0096 struct scsi_cmnd *sc)
0097 {
0098 u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
0099
0100 return &fnic->io_req_lock[hash];
0101 }
0102
0103 static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
0104 int tag)
0105 {
0106 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
0107 }
0108
0109
0110
0111
0112
0113 static void fnic_release_ioreq_buf(struct fnic *fnic,
0114 struct fnic_io_req *io_req,
0115 struct scsi_cmnd *sc)
0116 {
0117 if (io_req->sgl_list_pa)
0118 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
0119 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
0120 DMA_TO_DEVICE);
0121 scsi_dma_unmap(sc);
0122
0123 if (io_req->sgl_cnt)
0124 mempool_free(io_req->sgl_list_alloc,
0125 fnic->io_sgl_pool[io_req->sgl_type]);
0126 if (io_req->sense_buf_pa)
0127 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
0128 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
0129 }
0130
0131
0132 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
0133 {
0134
0135 if (!fnic->fw_ack_recd[0])
0136 return 1;
0137
0138
0139
0140
0141
0142 if (wq->to_clean_index <= fnic->fw_ack_index[0])
0143 wq->ring.desc_avail += (fnic->fw_ack_index[0]
0144 - wq->to_clean_index + 1);
0145 else
0146 wq->ring.desc_avail += (wq->ring.desc_count
0147 - wq->to_clean_index
0148 + fnic->fw_ack_index[0] + 1);
0149
0150
0151
0152
0153
0154
0155 wq->to_clean_index =
0156 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
0157
0158
0159 fnic->fw_ack_recd[0] = 0;
0160 return 0;
0161 }
0162
0163
0164
0165
0166
0167
0168 void
0169 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
0170 unsigned long clearbits)
0171 {
0172 unsigned long flags = 0;
0173 unsigned long host_lock_flags = 0;
0174
0175 spin_lock_irqsave(&fnic->fnic_lock, flags);
0176 spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
0177
0178 if (clearbits)
0179 fnic->state_flags &= ~st_flags;
0180 else
0181 fnic->state_flags |= st_flags;
0182
0183 spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
0184 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0185
0186 return;
0187 }
0188
0189
0190
0191
0192
0193
0194 int fnic_fw_reset_handler(struct fnic *fnic)
0195 {
0196 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
0197 int ret = 0;
0198 unsigned long flags;
0199
0200
0201 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
0202
0203 skb_queue_purge(&fnic->frame_queue);
0204 skb_queue_purge(&fnic->tx_queue);
0205
0206
0207 while (atomic_read(&fnic->in_flight))
0208 schedule_timeout(msecs_to_jiffies(1));
0209
0210 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
0211
0212 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
0213 free_wq_copy_descs(fnic, wq);
0214
0215 if (!vnic_wq_copy_desc_avail(wq))
0216 ret = -EAGAIN;
0217 else {
0218 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
0219 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
0220 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
0221 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
0222 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
0223 atomic64_read(
0224 &fnic->fnic_stats.fw_stats.active_fw_reqs));
0225 }
0226
0227 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
0228
0229 if (!ret) {
0230 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
0231 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0232 "Issued fw reset\n");
0233 } else {
0234 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
0235 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0236 "Failed to issue fw reset\n");
0237 }
0238
0239 return ret;
0240 }
0241
0242
0243
0244
0245
0246
0247 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
0248 {
0249 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
0250 enum fcpio_flogi_reg_format_type format;
0251 struct fc_lport *lp = fnic->lport;
0252 u8 gw_mac[ETH_ALEN];
0253 int ret = 0;
0254 unsigned long flags;
0255
0256 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
0257
0258 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
0259 free_wq_copy_descs(fnic, wq);
0260
0261 if (!vnic_wq_copy_desc_avail(wq)) {
0262 ret = -EAGAIN;
0263 goto flogi_reg_ioreq_end;
0264 }
0265
0266 if (fnic->ctlr.map_dest) {
0267 eth_broadcast_addr(gw_mac);
0268 format = FCPIO_FLOGI_REG_DEF_DEST;
0269 } else {
0270 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
0271 format = FCPIO_FLOGI_REG_GW_DEST;
0272 }
0273
0274 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
0275 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
0276 fc_id, gw_mac,
0277 fnic->data_src_addr,
0278 lp->r_a_tov, lp->e_d_tov);
0279 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0280 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
0281 fc_id, fnic->data_src_addr, gw_mac);
0282 } else {
0283 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
0284 format, fc_id, gw_mac);
0285 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0286 "FLOGI reg issued fcid %x map %d dest %pM\n",
0287 fc_id, fnic->ctlr.map_dest, gw_mac);
0288 }
0289
0290 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
0291 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
0292 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
0293 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
0294 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
0295
0296 flogi_reg_ioreq_end:
0297 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
0298 return ret;
0299 }
0300
0301
0302
0303
0304
0305 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
0306 struct vnic_wq_copy *wq,
0307 struct fnic_io_req *io_req,
0308 struct scsi_cmnd *sc,
0309 int sg_count)
0310 {
0311 struct scatterlist *sg;
0312 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
0313 struct fc_rport_libfc_priv *rp = rport->dd_data;
0314 struct host_sg_desc *desc;
0315 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
0316 unsigned int i;
0317 unsigned long intr_flags;
0318 int flags;
0319 u8 exch_flags;
0320 struct scsi_lun fc_lun;
0321
0322 if (sg_count) {
0323
0324 desc = io_req->sgl_list;
0325 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
0326 desc->addr = cpu_to_le64(sg_dma_address(sg));
0327 desc->len = cpu_to_le32(sg_dma_len(sg));
0328 desc->_resvd = 0;
0329 desc++;
0330 }
0331
0332 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
0333 io_req->sgl_list,
0334 sizeof(io_req->sgl_list[0]) * sg_count,
0335 DMA_TO_DEVICE);
0336 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
0337 printk(KERN_ERR "DMA mapping failed\n");
0338 return SCSI_MLQUEUE_HOST_BUSY;
0339 }
0340 }
0341
0342 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
0343 sc->sense_buffer,
0344 SCSI_SENSE_BUFFERSIZE,
0345 DMA_FROM_DEVICE);
0346 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
0347 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
0348 sizeof(io_req->sgl_list[0]) * sg_count,
0349 DMA_TO_DEVICE);
0350 printk(KERN_ERR "DMA mapping failed\n");
0351 return SCSI_MLQUEUE_HOST_BUSY;
0352 }
0353
0354 int_to_scsilun(sc->device->lun, &fc_lun);
0355
0356
0357 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
0358
0359 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
0360 free_wq_copy_descs(fnic, wq);
0361
0362 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
0363 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
0364 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
0365 "fnic_queue_wq_copy_desc failure - no descriptors\n");
0366 atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
0367 return SCSI_MLQUEUE_HOST_BUSY;
0368 }
0369
0370 flags = 0;
0371 if (sc->sc_data_direction == DMA_FROM_DEVICE)
0372 flags = FCPIO_ICMND_RDDATA;
0373 else if (sc->sc_data_direction == DMA_TO_DEVICE)
0374 flags = FCPIO_ICMND_WRDATA;
0375
0376 exch_flags = 0;
0377 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
0378 (rp->flags & FC_RP_FLAGS_RETRY))
0379 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
0380
0381 fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
0382 0, exch_flags, io_req->sgl_cnt,
0383 SCSI_SENSE_BUFFERSIZE,
0384 io_req->sgl_list_pa,
0385 io_req->sense_buf_pa,
0386 0,
0387 FCPIO_ICMND_PTA_SIMPLE,
0388
0389 flags,
0390 sc->cmnd, sc->cmd_len,
0391 scsi_bufflen(sc),
0392 fc_lun.scsi_lun, io_req->port_id,
0393 rport->maxframe_size, rp->r_a_tov,
0394 rp->e_d_tov);
0395
0396 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
0397 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
0398 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
0399 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
0400 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
0401
0402 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
0403 return 0;
0404 }
0405
0406
0407
0408
0409
0410
0411 static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
0412 {
0413 void (*done)(struct scsi_cmnd *) = scsi_done;
0414 const int tag = scsi_cmd_to_rq(sc)->tag;
0415 struct fc_lport *lp = shost_priv(sc->device->host);
0416 struct fc_rport *rport;
0417 struct fnic_io_req *io_req = NULL;
0418 struct fnic *fnic = lport_priv(lp);
0419 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
0420 struct vnic_wq_copy *wq;
0421 int ret;
0422 u64 cmd_trace;
0423 int sg_count = 0;
0424 unsigned long flags = 0;
0425 unsigned long ptr;
0426 spinlock_t *io_lock = NULL;
0427 int io_lock_acquired = 0;
0428 struct fc_rport_libfc_priv *rp;
0429
0430 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
0431 return SCSI_MLQUEUE_HOST_BUSY;
0432
0433 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
0434 return SCSI_MLQUEUE_HOST_BUSY;
0435
0436 rport = starget_to_rport(scsi_target(sc->device));
0437 if (!rport) {
0438 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0439 "returning DID_NO_CONNECT for IO as rport is NULL\n");
0440 sc->result = DID_NO_CONNECT << 16;
0441 done(sc);
0442 return 0;
0443 }
0444
0445 ret = fc_remote_port_chkready(rport);
0446 if (ret) {
0447 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0448 "rport is not ready\n");
0449 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
0450 sc->result = ret;
0451 done(sc);
0452 return 0;
0453 }
0454
0455 rp = rport->dd_data;
0456 if (!rp || rp->rp_state == RPORT_ST_DELETE) {
0457 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0458 "rport 0x%x removed, returning DID_NO_CONNECT\n",
0459 rport->port_id);
0460
0461 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
0462 sc->result = DID_NO_CONNECT<<16;
0463 done(sc);
0464 return 0;
0465 }
0466
0467 if (rp->rp_state != RPORT_ST_READY) {
0468 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0469 "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
0470 rport->port_id, rp->rp_state);
0471
0472 sc->result = DID_IMM_RETRY << 16;
0473 done(sc);
0474 return 0;
0475 }
0476
0477 if (lp->state != LPORT_ST_READY || !(lp->link_up))
0478 return SCSI_MLQUEUE_HOST_BUSY;
0479
0480 atomic_inc(&fnic->in_flight);
0481
0482
0483
0484
0485
0486
0487 spin_unlock(lp->host->host_lock);
0488 fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
0489 fnic_priv(sc)->flags = FNIC_NO_FLAGS;
0490
0491
0492 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
0493 if (!io_req) {
0494 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
0495 ret = SCSI_MLQUEUE_HOST_BUSY;
0496 goto out;
0497 }
0498 memset(io_req, 0, sizeof(*io_req));
0499
0500
0501 sg_count = scsi_dma_map(sc);
0502 if (sg_count < 0) {
0503 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
0504 tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
0505 mempool_free(io_req, fnic->io_req_pool);
0506 goto out;
0507 }
0508
0509
0510 io_req->sgl_cnt = sg_count;
0511 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
0512 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
0513 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
0514
0515 if (sg_count) {
0516 io_req->sgl_list =
0517 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
0518 GFP_ATOMIC);
0519 if (!io_req->sgl_list) {
0520 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
0521 ret = SCSI_MLQUEUE_HOST_BUSY;
0522 scsi_dma_unmap(sc);
0523 mempool_free(io_req, fnic->io_req_pool);
0524 goto out;
0525 }
0526
0527
0528 io_req->sgl_list_alloc = io_req->sgl_list;
0529 ptr = (unsigned long) io_req->sgl_list;
0530 if (ptr % FNIC_SG_DESC_ALIGN) {
0531 io_req->sgl_list = (struct host_sg_desc *)
0532 (((unsigned long) ptr
0533 + FNIC_SG_DESC_ALIGN - 1)
0534 & ~(FNIC_SG_DESC_ALIGN - 1));
0535 }
0536 }
0537
0538
0539
0540
0541
0542 io_lock = fnic_io_lock_hash(fnic, sc);
0543 spin_lock_irqsave(io_lock, flags);
0544
0545
0546 io_lock_acquired = 1;
0547 io_req->port_id = rport->port_id;
0548 io_req->start_time = jiffies;
0549 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
0550 fnic_priv(sc)->io_req = io_req;
0551 fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
0552
0553
0554 wq = &fnic->wq_copy[0];
0555 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
0556 if (ret) {
0557
0558
0559
0560
0561 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
0562 tag, sc, 0, 0, 0, fnic_flags_and_state(sc));
0563 io_req = fnic_priv(sc)->io_req;
0564 fnic_priv(sc)->io_req = NULL;
0565 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
0566 spin_unlock_irqrestore(io_lock, flags);
0567 if (io_req) {
0568 fnic_release_ioreq_buf(fnic, io_req, sc);
0569 mempool_free(io_req, fnic->io_req_pool);
0570 }
0571 atomic_dec(&fnic->in_flight);
0572
0573 spin_lock(lp->host->host_lock);
0574 return ret;
0575 } else {
0576 atomic64_inc(&fnic_stats->io_stats.active_ios);
0577 atomic64_inc(&fnic_stats->io_stats.num_ios);
0578 if (atomic64_read(&fnic_stats->io_stats.active_ios) >
0579 atomic64_read(&fnic_stats->io_stats.max_active_ios))
0580 atomic64_set(&fnic_stats->io_stats.max_active_ios,
0581 atomic64_read(&fnic_stats->io_stats.active_ios));
0582
0583
0584 fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
0585 }
0586 out:
0587 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
0588 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
0589 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
0590 sc->cmnd[5]);
0591
0592 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
0593 tag, sc, io_req, sg_count, cmd_trace,
0594 fnic_flags_and_state(sc));
0595
0596
0597 if (io_lock_acquired)
0598 spin_unlock_irqrestore(io_lock, flags);
0599
0600 atomic_dec(&fnic->in_flight);
0601
0602 spin_lock(lp->host->host_lock);
0603 return ret;
0604 }
0605
0606 DEF_SCSI_QCMD(fnic_queuecommand)
0607
0608
0609
0610
0611
0612 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
0613 struct fcpio_fw_req *desc)
0614 {
0615 u8 type;
0616 u8 hdr_status;
0617 struct fcpio_tag tag;
0618 int ret = 0;
0619 unsigned long flags;
0620 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
0621
0622 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
0623
0624 atomic64_inc(&reset_stats->fw_reset_completions);
0625
0626
0627 fnic_cleanup_io(fnic);
0628
0629 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
0630 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
0631 atomic64_set(&fnic->io_cmpl_skip, 0);
0632
0633 spin_lock_irqsave(&fnic->fnic_lock, flags);
0634
0635
0636 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
0637
0638 if (!hdr_status) {
0639 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0640 "reset cmpl success\n");
0641
0642 fnic->state = FNIC_IN_ETH_MODE;
0643 } else {
0644 FNIC_SCSI_DBG(KERN_DEBUG,
0645 fnic->lport->host,
0646 "fnic fw_reset : failed %s\n",
0647 fnic_fcpio_status_to_str(hdr_status));
0648
0649
0650
0651
0652
0653
0654
0655 fnic->state = FNIC_IN_FC_MODE;
0656 atomic64_inc(&reset_stats->fw_reset_failures);
0657 ret = -1;
0658 }
0659 } else {
0660 FNIC_SCSI_DBG(KERN_DEBUG,
0661 fnic->lport->host,
0662 "Unexpected state %s while processing"
0663 " reset cmpl\n", fnic_state_to_str(fnic->state));
0664 atomic64_inc(&reset_stats->fw_reset_failures);
0665 ret = -1;
0666 }
0667
0668
0669 if (fnic->remove_wait)
0670 complete(fnic->remove_wait);
0671
0672
0673
0674
0675
0676 if (fnic->remove_wait || ret) {
0677 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0678 skb_queue_purge(&fnic->tx_queue);
0679 goto reset_cmpl_handler_end;
0680 }
0681
0682 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0683
0684 fnic_flush_tx(fnic);
0685
0686 reset_cmpl_handler_end:
0687 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
0688
0689 return ret;
0690 }
0691
0692
0693
0694
0695
0696 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
0697 struct fcpio_fw_req *desc)
0698 {
0699 u8 type;
0700 u8 hdr_status;
0701 struct fcpio_tag tag;
0702 int ret = 0;
0703 unsigned long flags;
0704
0705 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
0706
0707
0708 spin_lock_irqsave(&fnic->fnic_lock, flags);
0709
0710 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
0711
0712
0713 if (!hdr_status) {
0714 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0715 "flog reg succeeded\n");
0716 fnic->state = FNIC_IN_FC_MODE;
0717 } else {
0718 FNIC_SCSI_DBG(KERN_DEBUG,
0719 fnic->lport->host,
0720 "fnic flogi reg :failed %s\n",
0721 fnic_fcpio_status_to_str(hdr_status));
0722 fnic->state = FNIC_IN_ETH_MODE;
0723 ret = -1;
0724 }
0725 } else {
0726 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
0727 "Unexpected fnic state %s while"
0728 " processing flogi reg completion\n",
0729 fnic_state_to_str(fnic->state));
0730 ret = -1;
0731 }
0732
0733 if (!ret) {
0734 if (fnic->stop_rx_link_events) {
0735 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0736 goto reg_cmpl_handler_end;
0737 }
0738 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0739
0740 fnic_flush_tx(fnic);
0741 queue_work(fnic_event_queue, &fnic->frame_work);
0742 } else {
0743 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
0744 }
0745
0746 reg_cmpl_handler_end:
0747 return ret;
0748 }
0749
0750 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
0751 u16 request_out)
0752 {
0753 if (wq->to_clean_index <= wq->to_use_index) {
0754
0755 if (request_out < wq->to_clean_index ||
0756 request_out >= wq->to_use_index)
0757 return 0;
0758 } else {
0759
0760 if (request_out < wq->to_clean_index &&
0761 request_out >= wq->to_use_index)
0762 return 0;
0763 }
0764
0765 return 1;
0766 }
0767
0768
0769
0770
0771
0772
0773
0774
0775 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
0776 unsigned int cq_index,
0777 struct fcpio_fw_req *desc)
0778 {
0779 struct vnic_wq_copy *wq;
0780 u16 request_out = desc->u.ack.request_out;
0781 unsigned long flags;
0782 u64 *ox_id_tag = (u64 *)(void *)desc;
0783
0784
0785 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
0786 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
0787
0788 fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
0789 if (is_ack_index_in_range(wq, request_out)) {
0790 fnic->fw_ack_index[0] = request_out;
0791 fnic->fw_ack_recd[0] = 1;
0792 } else
0793 atomic64_inc(
0794 &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
0795
0796 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
0797 FNIC_TRACE(fnic_fcpio_ack_handler,
0798 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
0799 ox_id_tag[4], ox_id_tag[5]);
0800 }
0801
0802
0803
0804
0805
0806 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
0807 struct fcpio_fw_req *desc)
0808 {
0809 u8 type;
0810 u8 hdr_status;
0811 struct fcpio_tag tag;
0812 u32 id;
0813 u64 xfer_len = 0;
0814 struct fcpio_icmnd_cmpl *icmnd_cmpl;
0815 struct fnic_io_req *io_req;
0816 struct scsi_cmnd *sc;
0817 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
0818 unsigned long flags;
0819 spinlock_t *io_lock;
0820 u64 cmd_trace;
0821 unsigned long start_time;
0822 unsigned long io_duration_time;
0823
0824
0825 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
0826 fcpio_tag_id_dec(&tag, &id);
0827 icmnd_cmpl = &desc->u.icmnd_cmpl;
0828
0829 if (id >= fnic->fnic_max_tag_id) {
0830 shost_printk(KERN_ERR, fnic->lport->host,
0831 "Tag out of range tag %x hdr status = %s\n",
0832 id, fnic_fcpio_status_to_str(hdr_status));
0833 return;
0834 }
0835
0836 sc = scsi_host_find_tag(fnic->lport->host, id);
0837 WARN_ON_ONCE(!sc);
0838 if (!sc) {
0839 atomic64_inc(&fnic_stats->io_stats.sc_null);
0840 shost_printk(KERN_ERR, fnic->lport->host,
0841 "icmnd_cmpl sc is null - "
0842 "hdr status = %s tag = 0x%x desc = 0x%p\n",
0843 fnic_fcpio_status_to_str(hdr_status), id, desc);
0844 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
0845 fnic->lport->host->host_no, id,
0846 ((u64)icmnd_cmpl->_resvd0[1] << 16 |
0847 (u64)icmnd_cmpl->_resvd0[0]),
0848 ((u64)hdr_status << 16 |
0849 (u64)icmnd_cmpl->scsi_status << 8 |
0850 (u64)icmnd_cmpl->flags), desc,
0851 (u64)icmnd_cmpl->residual, 0);
0852 return;
0853 }
0854
0855 io_lock = fnic_io_lock_hash(fnic, sc);
0856 spin_lock_irqsave(io_lock, flags);
0857 io_req = fnic_priv(sc)->io_req;
0858 WARN_ON_ONCE(!io_req);
0859 if (!io_req) {
0860 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
0861 fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
0862 spin_unlock_irqrestore(io_lock, flags);
0863 shost_printk(KERN_ERR, fnic->lport->host,
0864 "icmnd_cmpl io_req is null - "
0865 "hdr status = %s tag = 0x%x sc 0x%p\n",
0866 fnic_fcpio_status_to_str(hdr_status), id, sc);
0867 return;
0868 }
0869 start_time = io_req->start_time;
0870
0871
0872 io_req->io_completed = 1;
0873
0874
0875
0876
0877
0878 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
0879
0880
0881
0882
0883
0884 fnic_priv(sc)->flags |= FNIC_IO_DONE;
0885 fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
0886 spin_unlock_irqrestore(io_lock, flags);
0887 if(FCPIO_ABORTED == hdr_status)
0888 fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
0889
0890 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
0891 "icmnd_cmpl abts pending "
0892 "hdr status = %s tag = 0x%x sc = 0x%p "
0893 "scsi_status = %x residual = %d\n",
0894 fnic_fcpio_status_to_str(hdr_status),
0895 id, sc,
0896 icmnd_cmpl->scsi_status,
0897 icmnd_cmpl->residual);
0898 return;
0899 }
0900
0901
0902 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
0903
0904 icmnd_cmpl = &desc->u.icmnd_cmpl;
0905
0906 switch (hdr_status) {
0907 case FCPIO_SUCCESS:
0908 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
0909 xfer_len = scsi_bufflen(sc);
0910
0911 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) {
0912 xfer_len -= icmnd_cmpl->residual;
0913 scsi_set_resid(sc, icmnd_cmpl->residual);
0914 }
0915
0916 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
0917 atomic64_inc(&fnic_stats->misc_stats.check_condition);
0918
0919 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
0920 atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
0921 break;
0922
0923 case FCPIO_TIMEOUT:
0924 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
0925 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
0926 break;
0927
0928 case FCPIO_ABORTED:
0929 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
0930 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
0931 break;
0932
0933 case FCPIO_DATA_CNT_MISMATCH:
0934 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
0935 scsi_set_resid(sc, icmnd_cmpl->residual);
0936 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
0937 break;
0938
0939 case FCPIO_OUT_OF_RESOURCE:
0940 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
0941 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
0942 break;
0943
0944 case FCPIO_IO_NOT_FOUND:
0945 atomic64_inc(&fnic_stats->io_stats.io_not_found);
0946 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
0947 break;
0948
0949 case FCPIO_SGL_INVALID:
0950 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
0951 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
0952 break;
0953
0954 case FCPIO_FW_ERR:
0955 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
0956 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
0957 break;
0958
0959 case FCPIO_MSS_INVALID:
0960 atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
0961 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
0962 break;
0963
0964 case FCPIO_INVALID_HEADER:
0965 case FCPIO_INVALID_PARAM:
0966 case FCPIO_REQ_NOT_SUPPORTED:
0967 default:
0968 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
0969 break;
0970 }
0971
0972
0973 fnic_priv(sc)->io_req = NULL;
0974 fnic_priv(sc)->flags |= FNIC_IO_DONE;
0975
0976 if (hdr_status != FCPIO_SUCCESS) {
0977 atomic64_inc(&fnic_stats->io_stats.io_failures);
0978 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
0979 fnic_fcpio_status_to_str(hdr_status));
0980 }
0981
0982 fnic_release_ioreq_buf(fnic, io_req, sc);
0983
0984 cmd_trace = ((u64)hdr_status << 56) |
0985 (u64)icmnd_cmpl->scsi_status << 48 |
0986 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
0987 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
0988 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
0989
0990 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
0991 sc->device->host->host_no, id, sc,
0992 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
0993 (u64)icmnd_cmpl->_resvd0[0] << 48 |
0994 jiffies_to_msecs(jiffies - start_time)),
0995 desc, cmd_trace, fnic_flags_and_state(sc));
0996
0997 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
0998 fnic->lport->host_stats.fcp_input_requests++;
0999 fnic->fcp_input_bytes += xfer_len;
1000 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1001 fnic->lport->host_stats.fcp_output_requests++;
1002 fnic->fcp_output_bytes += xfer_len;
1003 } else
1004 fnic->lport->host_stats.fcp_control_requests++;
1005
1006
1007 scsi_done(sc);
1008 spin_unlock_irqrestore(io_lock, flags);
1009
1010 mempool_free(io_req, fnic->io_req_pool);
1011
1012 atomic64_dec(&fnic_stats->io_stats.active_ios);
1013 if (atomic64_read(&fnic->io_cmpl_skip))
1014 atomic64_dec(&fnic->io_cmpl_skip);
1015 else
1016 atomic64_inc(&fnic_stats->io_stats.io_completions);
1017
1018
1019 io_duration_time = jiffies_to_msecs(jiffies) -
1020 jiffies_to_msecs(start_time);
1021
1022 if(io_duration_time <= 10)
1023 atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1024 else if(io_duration_time <= 100)
1025 atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1026 else if(io_duration_time <= 500)
1027 atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1028 else if(io_duration_time <= 5000)
1029 atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1030 else if(io_duration_time <= 10000)
1031 atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1032 else if(io_duration_time <= 30000)
1033 atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1034 else {
1035 atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1036
1037 if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1038 atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1039 }
1040 }
1041
1042
1043
1044
1045 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1046 struct fcpio_fw_req *desc)
1047 {
1048 u8 type;
1049 u8 hdr_status;
1050 struct fcpio_tag tag;
1051 u32 id;
1052 struct scsi_cmnd *sc;
1053 struct fnic_io_req *io_req;
1054 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1055 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1056 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1057 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1058 unsigned long flags;
1059 spinlock_t *io_lock;
1060 unsigned long start_time;
1061
1062 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1063 fcpio_tag_id_dec(&tag, &id);
1064
1065 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1066 shost_printk(KERN_ERR, fnic->lport->host,
1067 "Tag out of range tag %x hdr status = %s\n",
1068 id, fnic_fcpio_status_to_str(hdr_status));
1069 return;
1070 }
1071
1072 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1073 WARN_ON_ONCE(!sc);
1074 if (!sc) {
1075 atomic64_inc(&fnic_stats->io_stats.sc_null);
1076 shost_printk(KERN_ERR, fnic->lport->host,
1077 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1078 fnic_fcpio_status_to_str(hdr_status), id);
1079 return;
1080 }
1081 io_lock = fnic_io_lock_hash(fnic, sc);
1082 spin_lock_irqsave(io_lock, flags);
1083 io_req = fnic_priv(sc)->io_req;
1084 WARN_ON_ONCE(!io_req);
1085 if (!io_req) {
1086 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1087 spin_unlock_irqrestore(io_lock, flags);
1088 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
1089 shost_printk(KERN_ERR, fnic->lport->host,
1090 "itmf_cmpl io_req is null - "
1091 "hdr status = %s tag = 0x%x sc 0x%p\n",
1092 fnic_fcpio_status_to_str(hdr_status), id, sc);
1093 return;
1094 }
1095 start_time = io_req->start_time;
1096
1097 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1098
1099
1100 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1101 "dev reset abts cmpl recd. id %x status %s\n",
1102 id, fnic_fcpio_status_to_str(hdr_status));
1103 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
1104 fnic_priv(sc)->abts_status = hdr_status;
1105 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1106 if (io_req->abts_done)
1107 complete(io_req->abts_done);
1108 spin_unlock_irqrestore(io_lock, flags);
1109 } else if (id & FNIC_TAG_ABORT) {
1110
1111 switch (hdr_status) {
1112 case FCPIO_SUCCESS:
1113 break;
1114 case FCPIO_TIMEOUT:
1115 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1116 atomic64_inc(&abts_stats->abort_fw_timeouts);
1117 else
1118 atomic64_inc(
1119 &term_stats->terminate_fw_timeouts);
1120 break;
1121 case FCPIO_ITMF_REJECTED:
1122 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1123 "abort reject recd. id %d\n",
1124 (int)(id & FNIC_TAG_MASK));
1125 break;
1126 case FCPIO_IO_NOT_FOUND:
1127 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1128 atomic64_inc(&abts_stats->abort_io_not_found);
1129 else
1130 atomic64_inc(
1131 &term_stats->terminate_io_not_found);
1132 break;
1133 default:
1134 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1135 atomic64_inc(&abts_stats->abort_failures);
1136 else
1137 atomic64_inc(
1138 &term_stats->terminate_failures);
1139 break;
1140 }
1141 if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
1142
1143 spin_unlock_irqrestore(io_lock, flags);
1144 return;
1145 }
1146
1147 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
1148 fnic_priv(sc)->abts_status = hdr_status;
1149
1150
1151 if (hdr_status == FCPIO_IO_NOT_FOUND)
1152 fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
1153
1154 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1155 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1156
1157 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1158 "abts cmpl recd. id %d status %s\n",
1159 (int)(id & FNIC_TAG_MASK),
1160 fnic_fcpio_status_to_str(hdr_status));
1161
1162
1163
1164
1165
1166
1167 if (io_req->abts_done) {
1168 complete(io_req->abts_done);
1169 spin_unlock_irqrestore(io_lock, flags);
1170 } else {
1171 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1172 "abts cmpl, completing IO\n");
1173 fnic_priv(sc)->io_req = NULL;
1174 sc->result = (DID_ERROR << 16);
1175
1176 spin_unlock_irqrestore(io_lock, flags);
1177
1178 fnic_release_ioreq_buf(fnic, io_req, sc);
1179 mempool_free(io_req, fnic->io_req_pool);
1180 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1181 sc->device->host->host_no, id,
1182 sc,
1183 jiffies_to_msecs(jiffies - start_time),
1184 desc,
1185 (((u64)hdr_status << 40) |
1186 (u64)sc->cmnd[0] << 32 |
1187 (u64)sc->cmnd[2] << 24 |
1188 (u64)sc->cmnd[3] << 16 |
1189 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1190 fnic_flags_and_state(sc));
1191 scsi_done(sc);
1192 atomic64_dec(&fnic_stats->io_stats.active_ios);
1193 if (atomic64_read(&fnic->io_cmpl_skip))
1194 atomic64_dec(&fnic->io_cmpl_skip);
1195 else
1196 atomic64_inc(&fnic_stats->io_stats.io_completions);
1197 }
1198 } else if (id & FNIC_TAG_DEV_RST) {
1199
1200 fnic_priv(sc)->lr_status = hdr_status;
1201 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1202 spin_unlock_irqrestore(io_lock, flags);
1203 fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
1204 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1205 sc->device->host->host_no, id, sc,
1206 jiffies_to_msecs(jiffies - start_time),
1207 desc, 0, fnic_flags_and_state(sc));
1208 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1209 "Terminate pending "
1210 "dev reset cmpl recd. id %d status %s\n",
1211 (int)(id & FNIC_TAG_MASK),
1212 fnic_fcpio_status_to_str(hdr_status));
1213 return;
1214 }
1215 if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
1216
1217 spin_unlock_irqrestore(io_lock, flags);
1218 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1219 sc->device->host->host_no, id, sc,
1220 jiffies_to_msecs(jiffies - start_time),
1221 desc, 0, fnic_flags_and_state(sc));
1222 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1223 "dev reset cmpl recd after time out. "
1224 "id %d status %s\n",
1225 (int)(id & FNIC_TAG_MASK),
1226 fnic_fcpio_status_to_str(hdr_status));
1227 return;
1228 }
1229 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
1230 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1231 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1232 "dev reset cmpl recd. id %d status %s\n",
1233 (int)(id & FNIC_TAG_MASK),
1234 fnic_fcpio_status_to_str(hdr_status));
1235 if (io_req->dr_done)
1236 complete(io_req->dr_done);
1237 spin_unlock_irqrestore(io_lock, flags);
1238
1239 } else {
1240 shost_printk(KERN_ERR, fnic->lport->host,
1241 "Unexpected itmf io state %s tag %x\n",
1242 fnic_ioreq_state_to_str(fnic_priv(sc)->state), id);
1243 spin_unlock_irqrestore(io_lock, flags);
1244 }
1245
1246 }
1247
1248
1249
1250
1251
1252 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1253 unsigned int cq_index,
1254 struct fcpio_fw_req *desc)
1255 {
1256 struct fnic *fnic = vnic_dev_priv(vdev);
1257
1258 switch (desc->hdr.type) {
1259 case FCPIO_ICMND_CMPL:
1260 case FCPIO_ITMF_CMPL:
1261 case FCPIO_FLOGI_REG_CMPL:
1262 case FCPIO_FLOGI_FIP_REG_CMPL:
1263 case FCPIO_RESET_CMPL:
1264 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1265 break;
1266 default:
1267 break;
1268 }
1269
1270 switch (desc->hdr.type) {
1271 case FCPIO_ACK:
1272 fnic_fcpio_ack_handler(fnic, cq_index, desc);
1273 break;
1274
1275 case FCPIO_ICMND_CMPL:
1276 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1277 break;
1278
1279 case FCPIO_ITMF_CMPL:
1280 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1281 break;
1282
1283 case FCPIO_FLOGI_REG_CMPL:
1284 case FCPIO_FLOGI_FIP_REG_CMPL:
1285 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1286 break;
1287
1288 case FCPIO_RESET_CMPL:
1289 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1290 break;
1291
1292 default:
1293 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1294 "firmware completion type %d\n",
1295 desc->hdr.type);
1296 break;
1297 }
1298
1299 return 0;
1300 }
1301
1302
1303
1304
1305
1306 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1307 {
1308 unsigned int wq_work_done = 0;
1309 unsigned int i, cq_index;
1310 unsigned int cur_work_done;
1311 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1312 u64 start_jiffies = 0;
1313 u64 end_jiffies = 0;
1314 u64 delta_jiffies = 0;
1315 u64 delta_ms = 0;
1316
1317 for (i = 0; i < fnic->wq_copy_count; i++) {
1318 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1319
1320 start_jiffies = jiffies;
1321 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1322 fnic_fcpio_cmpl_handler,
1323 copy_work_to_do);
1324 end_jiffies = jiffies;
1325
1326 wq_work_done += cur_work_done;
1327 delta_jiffies = end_jiffies - start_jiffies;
1328 if (delta_jiffies >
1329 (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
1330 atomic64_set(&misc_stats->max_isr_jiffies,
1331 delta_jiffies);
1332 delta_ms = jiffies_to_msecs(delta_jiffies);
1333 atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
1334 atomic64_set(&misc_stats->corr_work_done,
1335 cur_work_done);
1336 }
1337 }
1338 return wq_work_done;
1339 }
1340
1341 static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
1342 {
1343 const int tag = scsi_cmd_to_rq(sc)->tag;
1344 struct fnic *fnic = data;
1345 struct fnic_io_req *io_req;
1346 unsigned long flags = 0;
1347 spinlock_t *io_lock;
1348 unsigned long start_time = 0;
1349 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1350
1351 io_lock = fnic_io_lock_tag(fnic, tag);
1352 spin_lock_irqsave(io_lock, flags);
1353
1354 io_req = fnic_priv(sc)->io_req;
1355 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
1356 !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
1357
1358
1359
1360
1361 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1362 if (io_req && io_req->dr_done)
1363 complete(io_req->dr_done);
1364 else if (io_req && io_req->abts_done)
1365 complete(io_req->abts_done);
1366 spin_unlock_irqrestore(io_lock, flags);
1367 return true;
1368 } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
1369 spin_unlock_irqrestore(io_lock, flags);
1370 return true;
1371 }
1372 if (!io_req) {
1373 spin_unlock_irqrestore(io_lock, flags);
1374 goto cleanup_scsi_cmd;
1375 }
1376
1377 fnic_priv(sc)->io_req = NULL;
1378
1379 spin_unlock_irqrestore(io_lock, flags);
1380
1381
1382
1383
1384
1385 start_time = io_req->start_time;
1386 fnic_release_ioreq_buf(fnic, io_req, sc);
1387 mempool_free(io_req, fnic->io_req_pool);
1388
1389 cleanup_scsi_cmd:
1390 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1391 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1392 "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
1393 tag, sc, jiffies - start_time);
1394
1395 if (atomic64_read(&fnic->io_cmpl_skip))
1396 atomic64_dec(&fnic->io_cmpl_skip);
1397 else
1398 atomic64_inc(&fnic_stats->io_stats.io_completions);
1399
1400
1401 if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED))
1402 shost_printk(KERN_ERR, fnic->lport->host,
1403 "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
1404 tag, sc);
1405
1406 FNIC_TRACE(fnic_cleanup_io,
1407 sc->device->host->host_no, tag, sc,
1408 jiffies_to_msecs(jiffies - start_time),
1409 0, ((u64)sc->cmnd[0] << 32 |
1410 (u64)sc->cmnd[2] << 24 |
1411 (u64)sc->cmnd[3] << 16 |
1412 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1413 fnic_flags_and_state(sc));
1414
1415 scsi_done(sc);
1416
1417 return true;
1418 }
1419
1420 static void fnic_cleanup_io(struct fnic *fnic)
1421 {
1422 scsi_host_busy_iter(fnic->lport->host,
1423 fnic_cleanup_io_iter, fnic);
1424 }
1425
1426 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1427 struct fcpio_host_req *desc)
1428 {
1429 u32 id;
1430 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1431 struct fnic_io_req *io_req;
1432 struct scsi_cmnd *sc;
1433 unsigned long flags;
1434 spinlock_t *io_lock;
1435 unsigned long start_time = 0;
1436
1437
1438 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1439 id &= FNIC_TAG_MASK;
1440
1441 if (id >= fnic->fnic_max_tag_id)
1442 return;
1443
1444 sc = scsi_host_find_tag(fnic->lport->host, id);
1445 if (!sc)
1446 return;
1447
1448 io_lock = fnic_io_lock_hash(fnic, sc);
1449 spin_lock_irqsave(io_lock, flags);
1450
1451
1452 io_req = fnic_priv(sc)->io_req;
1453
1454
1455
1456 if (!io_req) {
1457 spin_unlock_irqrestore(io_lock, flags);
1458 goto wq_copy_cleanup_scsi_cmd;
1459 }
1460
1461 fnic_priv(sc)->io_req = NULL;
1462
1463 spin_unlock_irqrestore(io_lock, flags);
1464
1465 start_time = io_req->start_time;
1466 fnic_release_ioreq_buf(fnic, io_req, sc);
1467 mempool_free(io_req, fnic->io_req_pool);
1468
1469 wq_copy_cleanup_scsi_cmd:
1470 sc->result = DID_NO_CONNECT << 16;
1471 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1472 " DID_NO_CONNECT\n");
1473
1474 FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1475 sc->device->host->host_no, id, sc,
1476 jiffies_to_msecs(jiffies - start_time),
1477 0, ((u64)sc->cmnd[0] << 32 |
1478 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1479 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1480 fnic_flags_and_state(sc));
1481
1482 scsi_done(sc);
1483 }
1484
1485 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1486 u32 task_req, u8 *fc_lun,
1487 struct fnic_io_req *io_req)
1488 {
1489 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1490 struct Scsi_Host *host = fnic->lport->host;
1491 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1492 unsigned long flags;
1493
1494 spin_lock_irqsave(host->host_lock, flags);
1495 if (unlikely(fnic_chk_state_flags_locked(fnic,
1496 FNIC_FLAGS_IO_BLOCKED))) {
1497 spin_unlock_irqrestore(host->host_lock, flags);
1498 return 1;
1499 } else
1500 atomic_inc(&fnic->in_flight);
1501 spin_unlock_irqrestore(host->host_lock, flags);
1502
1503 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1504
1505 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1506 free_wq_copy_descs(fnic, wq);
1507
1508 if (!vnic_wq_copy_desc_avail(wq)) {
1509 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1510 atomic_dec(&fnic->in_flight);
1511 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1512 "fnic_queue_abort_io_req: failure: no descriptors\n");
1513 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1514 return 1;
1515 }
1516 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1517 0, task_req, tag, fc_lun, io_req->port_id,
1518 fnic->config.ra_tov, fnic->config.ed_tov);
1519
1520 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1521 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1522 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1523 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1524 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1525
1526 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1527 atomic_dec(&fnic->in_flight);
1528
1529 return 0;
1530 }
1531
1532 struct fnic_rport_abort_io_iter_data {
1533 struct fnic *fnic;
1534 u32 port_id;
1535 int term_cnt;
1536 };
1537
1538 static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
1539 {
1540 struct fnic_rport_abort_io_iter_data *iter_data = data;
1541 struct fnic *fnic = iter_data->fnic;
1542 int abt_tag = scsi_cmd_to_rq(sc)->tag;
1543 struct fnic_io_req *io_req;
1544 spinlock_t *io_lock;
1545 unsigned long flags;
1546 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1547 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1548 struct scsi_lun fc_lun;
1549 enum fnic_ioreq_state old_ioreq_state;
1550
1551 io_lock = fnic_io_lock_tag(fnic, abt_tag);
1552 spin_lock_irqsave(io_lock, flags);
1553
1554 io_req = fnic_priv(sc)->io_req;
1555
1556 if (!io_req || io_req->port_id != iter_data->port_id) {
1557 spin_unlock_irqrestore(io_lock, flags);
1558 return true;
1559 }
1560
1561 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
1562 !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
1563 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1564 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1565 sc);
1566 spin_unlock_irqrestore(io_lock, flags);
1567 return true;
1568 }
1569
1570
1571
1572
1573
1574 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1575 spin_unlock_irqrestore(io_lock, flags);
1576 return true;
1577 }
1578 if (io_req->abts_done) {
1579 shost_printk(KERN_ERR, fnic->lport->host,
1580 "fnic_rport_exch_reset: io_req->abts_done is set "
1581 "state is %s\n",
1582 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
1583 }
1584
1585 if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
1586 shost_printk(KERN_ERR, fnic->lport->host,
1587 "rport_exch_reset "
1588 "IO not yet issued %p tag 0x%x flags "
1589 "%x state %d\n",
1590 sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
1591 }
1592 old_ioreq_state = fnic_priv(sc)->state;
1593 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
1594 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
1595 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
1596 atomic64_inc(&reset_stats->device_reset_terminates);
1597 abt_tag |= FNIC_TAG_DEV_RST;
1598 }
1599 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1600 "fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
1601 BUG_ON(io_req->abts_done);
1602
1603 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1604 "fnic_rport_reset_exch: Issuing abts\n");
1605
1606 spin_unlock_irqrestore(io_lock, flags);
1607
1608
1609 int_to_scsilun(sc->device->lun, &fc_lun);
1610
1611 if (fnic_queue_abort_io_req(fnic, abt_tag,
1612 FCPIO_ITMF_ABT_TASK_TERM,
1613 fc_lun.scsi_lun, io_req)) {
1614
1615
1616
1617
1618
1619
1620 spin_lock_irqsave(io_lock, flags);
1621 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
1622 fnic_priv(sc)->state = old_ioreq_state;
1623 spin_unlock_irqrestore(io_lock, flags);
1624 } else {
1625 spin_lock_irqsave(io_lock, flags);
1626 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
1627 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
1628 else
1629 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
1630 spin_unlock_irqrestore(io_lock, flags);
1631 atomic64_inc(&term_stats->terminates);
1632 iter_data->term_cnt++;
1633 }
1634 return true;
1635 }
1636
1637 static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1638 {
1639 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1640 struct fnic_rport_abort_io_iter_data iter_data = {
1641 .fnic = fnic,
1642 .port_id = port_id,
1643 .term_cnt = 0,
1644 };
1645
1646 FNIC_SCSI_DBG(KERN_DEBUG,
1647 fnic->lport->host,
1648 "fnic_rport_exch_reset called portid 0x%06x\n",
1649 port_id);
1650
1651 if (fnic->in_remove)
1652 return;
1653
1654 scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
1655 &iter_data);
1656 if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
1657 atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
1658
1659 }
1660
1661 void fnic_terminate_rport_io(struct fc_rport *rport)
1662 {
1663 struct fc_rport_libfc_priv *rdata;
1664 struct fc_lport *lport;
1665 struct fnic *fnic;
1666
1667 if (!rport) {
1668 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1669 return;
1670 }
1671 rdata = rport->dd_data;
1672
1673 if (!rdata) {
1674 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1675 return;
1676 }
1677 lport = rdata->local_port;
1678
1679 if (!lport) {
1680 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1681 return;
1682 }
1683 fnic = lport_priv(lport);
1684 FNIC_SCSI_DBG(KERN_DEBUG,
1685 fnic->lport->host, "fnic_terminate_rport_io called"
1686 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1687 rport->port_name, rport->node_name, rport,
1688 rport->port_id);
1689
1690 if (fnic->in_remove)
1691 return;
1692
1693 fnic_rport_exch_reset(fnic, rport->port_id);
1694 }
1695
1696
1697
1698
1699
1700
1701 int fnic_abort_cmd(struct scsi_cmnd *sc)
1702 {
1703 struct request *const rq = scsi_cmd_to_rq(sc);
1704 struct fc_lport *lp;
1705 struct fnic *fnic;
1706 struct fnic_io_req *io_req = NULL;
1707 struct fc_rport *rport;
1708 spinlock_t *io_lock;
1709 unsigned long flags;
1710 unsigned long start_time = 0;
1711 int ret = SUCCESS;
1712 u32 task_req = 0;
1713 struct scsi_lun fc_lun;
1714 struct fnic_stats *fnic_stats;
1715 struct abort_stats *abts_stats;
1716 struct terminate_stats *term_stats;
1717 enum fnic_ioreq_state old_ioreq_state;
1718 const int tag = rq->tag;
1719 unsigned long abt_issued_time;
1720 DECLARE_COMPLETION_ONSTACK(tm_done);
1721
1722
1723 fc_block_scsi_eh(sc);
1724
1725
1726 lp = shost_priv(sc->device->host);
1727
1728 fnic = lport_priv(lp);
1729 fnic_stats = &fnic->fnic_stats;
1730 abts_stats = &fnic->fnic_stats.abts_stats;
1731 term_stats = &fnic->fnic_stats.term_stats;
1732
1733 rport = starget_to_rport(scsi_target(sc->device));
1734 FNIC_SCSI_DBG(KERN_DEBUG,
1735 fnic->lport->host,
1736 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1737 rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags);
1738
1739 fnic_priv(sc)->flags = FNIC_NO_FLAGS;
1740
1741 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1742 ret = FAILED;
1743 goto fnic_abort_cmd_end;
1744 }
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758 io_lock = fnic_io_lock_hash(fnic, sc);
1759 spin_lock_irqsave(io_lock, flags);
1760 io_req = fnic_priv(sc)->io_req;
1761 if (!io_req) {
1762 spin_unlock_irqrestore(io_lock, flags);
1763 goto fnic_abort_cmd_end;
1764 }
1765
1766 io_req->abts_done = &tm_done;
1767
1768 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1769 spin_unlock_irqrestore(io_lock, flags);
1770 goto wait_pending;
1771 }
1772
1773 abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1774 if (abt_issued_time <= 6000)
1775 atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
1776 else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
1777 atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
1778 else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
1779 atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
1780 else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
1781 atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
1782 else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
1783 atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
1784 else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
1785 atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
1786 else
1787 atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
1788
1789 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1790 "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
1791
1792
1793
1794
1795
1796
1797 old_ioreq_state = fnic_priv(sc)->state;
1798 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
1799 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
1800
1801 spin_unlock_irqrestore(io_lock, flags);
1802
1803
1804
1805
1806
1807
1808 if (fc_remote_port_chkready(rport) == 0)
1809 task_req = FCPIO_ITMF_ABT_TASK;
1810 else {
1811 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1812 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1813 }
1814
1815
1816 int_to_scsilun(sc->device->lun, &fc_lun);
1817
1818 if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
1819 io_req)) {
1820 spin_lock_irqsave(io_lock, flags);
1821 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
1822 fnic_priv(sc)->state = old_ioreq_state;
1823 io_req = fnic_priv(sc)->io_req;
1824 if (io_req)
1825 io_req->abts_done = NULL;
1826 spin_unlock_irqrestore(io_lock, flags);
1827 ret = FAILED;
1828 goto fnic_abort_cmd_end;
1829 }
1830 if (task_req == FCPIO_ITMF_ABT_TASK) {
1831 fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
1832 atomic64_inc(&fnic_stats->abts_stats.aborts);
1833 } else {
1834 fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
1835 atomic64_inc(&fnic_stats->term_stats.terminates);
1836 }
1837
1838
1839
1840
1841
1842
1843 wait_pending:
1844 wait_for_completion_timeout(&tm_done,
1845 msecs_to_jiffies
1846 (2 * fnic->config.ra_tov +
1847 fnic->config.ed_tov));
1848
1849
1850 spin_lock_irqsave(io_lock, flags);
1851
1852 io_req = fnic_priv(sc)->io_req;
1853 if (!io_req) {
1854 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1855 spin_unlock_irqrestore(io_lock, flags);
1856 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
1857 ret = FAILED;
1858 goto fnic_abort_cmd_end;
1859 }
1860 io_req->abts_done = NULL;
1861
1862
1863 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
1864 spin_unlock_irqrestore(io_lock, flags);
1865 if (task_req == FCPIO_ITMF_ABT_TASK) {
1866 atomic64_inc(&abts_stats->abort_drv_timeouts);
1867 } else {
1868 atomic64_inc(&term_stats->terminate_drv_timeouts);
1869 }
1870 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
1871 ret = FAILED;
1872 goto fnic_abort_cmd_end;
1873 }
1874
1875
1876
1877 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1878 spin_unlock_irqrestore(io_lock, flags);
1879 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1880 "Issuing Host reset due to out of order IO\n");
1881
1882 ret = FAILED;
1883 goto fnic_abort_cmd_end;
1884 }
1885
1886 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
1887
1888 start_time = io_req->start_time;
1889
1890
1891
1892
1893
1894 if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) {
1895 fnic_priv(sc)->io_req = NULL;
1896 } else {
1897 ret = FAILED;
1898 spin_unlock_irqrestore(io_lock, flags);
1899 goto fnic_abort_cmd_end;
1900 }
1901
1902 spin_unlock_irqrestore(io_lock, flags);
1903
1904 fnic_release_ioreq_buf(fnic, io_req, sc);
1905 mempool_free(io_req, fnic->io_req_pool);
1906
1907
1908 sc->result = DID_ABORT << 16;
1909 scsi_done(sc);
1910 atomic64_dec(&fnic_stats->io_stats.active_ios);
1911 if (atomic64_read(&fnic->io_cmpl_skip))
1912 atomic64_dec(&fnic->io_cmpl_skip);
1913 else
1914 atomic64_inc(&fnic_stats->io_stats.io_completions);
1915
1916 fnic_abort_cmd_end:
1917 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
1918 jiffies_to_msecs(jiffies - start_time),
1919 0, ((u64)sc->cmnd[0] << 32 |
1920 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1921 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1922 fnic_flags_and_state(sc));
1923
1924 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1925 "Returning from abort cmd type %x %s\n", task_req,
1926 (ret == SUCCESS) ?
1927 "SUCCESS" : "FAILED");
1928 return ret;
1929 }
1930
1931 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1932 struct scsi_cmnd *sc,
1933 struct fnic_io_req *io_req)
1934 {
1935 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1936 struct Scsi_Host *host = fnic->lport->host;
1937 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1938 struct scsi_lun fc_lun;
1939 int ret = 0;
1940 unsigned long intr_flags;
1941
1942 spin_lock_irqsave(host->host_lock, intr_flags);
1943 if (unlikely(fnic_chk_state_flags_locked(fnic,
1944 FNIC_FLAGS_IO_BLOCKED))) {
1945 spin_unlock_irqrestore(host->host_lock, intr_flags);
1946 return FAILED;
1947 } else
1948 atomic_inc(&fnic->in_flight);
1949 spin_unlock_irqrestore(host->host_lock, intr_flags);
1950
1951 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1952
1953 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1954 free_wq_copy_descs(fnic, wq);
1955
1956 if (!vnic_wq_copy_desc_avail(wq)) {
1957 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1958 "queue_dr_io_req failure - no descriptors\n");
1959 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
1960 ret = -EAGAIN;
1961 goto lr_io_req_end;
1962 }
1963
1964
1965 int_to_scsilun(sc->device->lun, &fc_lun);
1966
1967 fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
1968 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1969 fc_lun.scsi_lun, io_req->port_id,
1970 fnic->config.ra_tov, fnic->config.ed_tov);
1971
1972 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1973 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1974 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1975 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1976 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1977
1978 lr_io_req_end:
1979 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1980 atomic_dec(&fnic->in_flight);
1981
1982 return ret;
1983 }
1984
1985 struct fnic_pending_aborts_iter_data {
1986 struct fnic *fnic;
1987 struct scsi_cmnd *lr_sc;
1988 struct scsi_device *lun_dev;
1989 int ret;
1990 };
1991
1992 static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
1993 {
1994 struct fnic_pending_aborts_iter_data *iter_data = data;
1995 struct fnic *fnic = iter_data->fnic;
1996 struct scsi_device *lun_dev = iter_data->lun_dev;
1997 int abt_tag = scsi_cmd_to_rq(sc)->tag;
1998 struct fnic_io_req *io_req;
1999 spinlock_t *io_lock;
2000 unsigned long flags;
2001 struct scsi_lun fc_lun;
2002 DECLARE_COMPLETION_ONSTACK(tm_done);
2003 enum fnic_ioreq_state old_ioreq_state;
2004
2005 if (sc == iter_data->lr_sc || sc->device != lun_dev)
2006 return true;
2007
2008 io_lock = fnic_io_lock_tag(fnic, abt_tag);
2009 spin_lock_irqsave(io_lock, flags);
2010 io_req = fnic_priv(sc)->io_req;
2011 if (!io_req) {
2012 spin_unlock_irqrestore(io_lock, flags);
2013 return true;
2014 }
2015
2016
2017
2018
2019
2020 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2021 "Found IO in %s on lun\n",
2022 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2023
2024 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
2025 spin_unlock_irqrestore(io_lock, flags);
2026 return true;
2027 }
2028 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
2029 (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
2030 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2031 "%s dev rst not pending sc 0x%p\n", __func__,
2032 sc);
2033 spin_unlock_irqrestore(io_lock, flags);
2034 return true;
2035 }
2036
2037 if (io_req->abts_done)
2038 shost_printk(KERN_ERR, fnic->lport->host,
2039 "%s: io_req->abts_done is set state is %s\n",
2040 __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2041 old_ioreq_state = fnic_priv(sc)->state;
2042
2043
2044
2045
2046
2047
2048
2049 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
2050
2051 BUG_ON(io_req->abts_done);
2052
2053 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
2054 abt_tag |= FNIC_TAG_DEV_RST;
2055 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2056 "%s: dev rst sc 0x%p\n", __func__, sc);
2057 }
2058
2059 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
2060 io_req->abts_done = &tm_done;
2061 spin_unlock_irqrestore(io_lock, flags);
2062
2063
2064 int_to_scsilun(sc->device->lun, &fc_lun);
2065
2066 if (fnic_queue_abort_io_req(fnic, abt_tag,
2067 FCPIO_ITMF_ABT_TASK_TERM,
2068 fc_lun.scsi_lun, io_req)) {
2069 spin_lock_irqsave(io_lock, flags);
2070 io_req = fnic_priv(sc)->io_req;
2071 if (io_req)
2072 io_req->abts_done = NULL;
2073 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
2074 fnic_priv(sc)->state = old_ioreq_state;
2075 spin_unlock_irqrestore(io_lock, flags);
2076 iter_data->ret = FAILED;
2077 return false;
2078 } else {
2079 spin_lock_irqsave(io_lock, flags);
2080 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
2081 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
2082 spin_unlock_irqrestore(io_lock, flags);
2083 }
2084 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
2085
2086 wait_for_completion_timeout(&tm_done, msecs_to_jiffies
2087 (fnic->config.ed_tov));
2088
2089
2090 spin_lock_irqsave(io_lock, flags);
2091 io_req = fnic_priv(sc)->io_req;
2092 if (!io_req) {
2093 spin_unlock_irqrestore(io_lock, flags);
2094 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
2095 return true;
2096 }
2097
2098 io_req->abts_done = NULL;
2099
2100
2101 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
2102 spin_unlock_irqrestore(io_lock, flags);
2103 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
2104 iter_data->ret = FAILED;
2105 return false;
2106 }
2107 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
2108
2109
2110 if (sc != iter_data->lr_sc)
2111 fnic_priv(sc)->io_req = NULL;
2112 spin_unlock_irqrestore(io_lock, flags);
2113
2114
2115 if (sc != iter_data->lr_sc) {
2116 fnic_release_ioreq_buf(fnic, io_req, sc);
2117 mempool_free(io_req, fnic->io_req_pool);
2118 }
2119
2120
2121
2122
2123
2124
2125 sc->result = DID_RESET << 16;
2126 scsi_done(sc);
2127
2128 return true;
2129 }
2130
2131
2132
2133
2134
2135
2136
2137 static int fnic_clean_pending_aborts(struct fnic *fnic,
2138 struct scsi_cmnd *lr_sc,
2139 bool new_sc)
2140
2141 {
2142 int ret = SUCCESS;
2143 struct fnic_pending_aborts_iter_data iter_data = {
2144 .fnic = fnic,
2145 .lun_dev = lr_sc->device,
2146 .ret = SUCCESS,
2147 };
2148
2149 if (new_sc)
2150 iter_data.lr_sc = lr_sc;
2151
2152 scsi_host_busy_iter(fnic->lport->host,
2153 fnic_pending_aborts_iter, &iter_data);
2154 if (iter_data.ret == FAILED) {
2155 ret = iter_data.ret;
2156 goto clean_pending_aborts_end;
2157 }
2158 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2159
2160
2161 if (fnic_is_abts_pending(fnic, lr_sc))
2162 ret = FAILED;
2163
2164 clean_pending_aborts_end:
2165 return ret;
2166 }
2167
2168
2169
2170
2171
2172 static inline int
2173 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2174 {
2175 struct request *rq = scsi_cmd_to_rq(sc);
2176 struct request_queue *q = rq->q;
2177 struct request *dummy;
2178
2179 dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
2180 if (IS_ERR(dummy))
2181 return SCSI_NO_TAG;
2182
2183 rq->tag = dummy->tag;
2184 sc->host_scribble = (unsigned char *)dummy;
2185
2186 return dummy->tag;
2187 }
2188
2189
2190
2191
2192
2193 static inline void
2194 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2195 {
2196 struct request *dummy = (struct request *)sc->host_scribble;
2197
2198 blk_mq_free_request(dummy);
2199 }
2200
2201
2202
2203
2204
2205
2206 int fnic_device_reset(struct scsi_cmnd *sc)
2207 {
2208 struct request *rq = scsi_cmd_to_rq(sc);
2209 struct fc_lport *lp;
2210 struct fnic *fnic;
2211 struct fnic_io_req *io_req = NULL;
2212 struct fc_rport *rport;
2213 int status;
2214 int ret = FAILED;
2215 spinlock_t *io_lock;
2216 unsigned long flags;
2217 unsigned long start_time = 0;
2218 struct scsi_lun fc_lun;
2219 struct fnic_stats *fnic_stats;
2220 struct reset_stats *reset_stats;
2221 int tag = rq->tag;
2222 DECLARE_COMPLETION_ONSTACK(tm_done);
2223 int tag_gen_flag = 0;
2224 bool new_sc = 0;
2225
2226
2227 fc_block_scsi_eh(sc);
2228
2229
2230 lp = shost_priv(sc->device->host);
2231
2232 fnic = lport_priv(lp);
2233 fnic_stats = &fnic->fnic_stats;
2234 reset_stats = &fnic->fnic_stats.reset_stats;
2235
2236 atomic64_inc(&reset_stats->device_resets);
2237
2238 rport = starget_to_rport(scsi_target(sc->device));
2239 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2240 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2241 rport->port_id, sc->device->lun, sc);
2242
2243 if (lp->state != LPORT_ST_READY || !(lp->link_up))
2244 goto fnic_device_reset_end;
2245
2246
2247 if (fc_remote_port_chkready(rport)) {
2248 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2249 goto fnic_device_reset_end;
2250 }
2251
2252 fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
2253
2254
2255 if (unlikely(tag < 0)) {
2256
2257
2258
2259
2260 tag = fnic_scsi_host_start_tag(fnic, sc);
2261 if (unlikely(tag == SCSI_NO_TAG))
2262 goto fnic_device_reset_end;
2263 tag_gen_flag = 1;
2264 new_sc = 1;
2265 }
2266 io_lock = fnic_io_lock_hash(fnic, sc);
2267 spin_lock_irqsave(io_lock, flags);
2268 io_req = fnic_priv(sc)->io_req;
2269
2270
2271
2272
2273
2274 if (!io_req) {
2275 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2276 if (!io_req) {
2277 spin_unlock_irqrestore(io_lock, flags);
2278 goto fnic_device_reset_end;
2279 }
2280 memset(io_req, 0, sizeof(*io_req));
2281 io_req->port_id = rport->port_id;
2282 fnic_priv(sc)->io_req = io_req;
2283 }
2284 io_req->dr_done = &tm_done;
2285 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
2286 fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
2287 spin_unlock_irqrestore(io_lock, flags);
2288
2289 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2290
2291
2292
2293
2294
2295 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2296 spin_lock_irqsave(io_lock, flags);
2297 io_req = fnic_priv(sc)->io_req;
2298 if (io_req)
2299 io_req->dr_done = NULL;
2300 goto fnic_device_reset_clean;
2301 }
2302 spin_lock_irqsave(io_lock, flags);
2303 fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
2304 spin_unlock_irqrestore(io_lock, flags);
2305
2306
2307
2308
2309
2310 wait_for_completion_timeout(&tm_done,
2311 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2312
2313 spin_lock_irqsave(io_lock, flags);
2314 io_req = fnic_priv(sc)->io_req;
2315 if (!io_req) {
2316 spin_unlock_irqrestore(io_lock, flags);
2317 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2318 "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2319 goto fnic_device_reset_end;
2320 }
2321 io_req->dr_done = NULL;
2322
2323 status = fnic_priv(sc)->lr_status;
2324
2325
2326
2327
2328
2329 if (status == FCPIO_INVALID_CODE) {
2330 atomic64_inc(&reset_stats->device_reset_timeouts);
2331 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2332 "Device reset timed out\n");
2333 fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
2334 spin_unlock_irqrestore(io_lock, flags);
2335 int_to_scsilun(sc->device->lun, &fc_lun);
2336
2337
2338
2339
2340 while (1) {
2341 spin_lock_irqsave(io_lock, flags);
2342 if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
2343 spin_unlock_irqrestore(io_lock, flags);
2344 break;
2345 }
2346 spin_unlock_irqrestore(io_lock, flags);
2347 if (fnic_queue_abort_io_req(fnic,
2348 tag | FNIC_TAG_DEV_RST,
2349 FCPIO_ITMF_ABT_TASK_TERM,
2350 fc_lun.scsi_lun, io_req)) {
2351 wait_for_completion_timeout(&tm_done,
2352 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2353 } else {
2354 spin_lock_irqsave(io_lock, flags);
2355 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
2356 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
2357 io_req->abts_done = &tm_done;
2358 spin_unlock_irqrestore(io_lock, flags);
2359 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2360 "Abort and terminate issued on Device reset "
2361 "tag 0x%x sc 0x%p\n", tag, sc);
2362 break;
2363 }
2364 }
2365 while (1) {
2366 spin_lock_irqsave(io_lock, flags);
2367 if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
2368 spin_unlock_irqrestore(io_lock, flags);
2369 wait_for_completion_timeout(&tm_done,
2370 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2371 break;
2372 } else {
2373 io_req = fnic_priv(sc)->io_req;
2374 io_req->abts_done = NULL;
2375 goto fnic_device_reset_clean;
2376 }
2377 }
2378 } else {
2379 spin_unlock_irqrestore(io_lock, flags);
2380 }
2381
2382
2383 if (status != FCPIO_SUCCESS) {
2384 spin_lock_irqsave(io_lock, flags);
2385 FNIC_SCSI_DBG(KERN_DEBUG,
2386 fnic->lport->host,
2387 "Device reset completed - failed\n");
2388 io_req = fnic_priv(sc)->io_req;
2389 goto fnic_device_reset_clean;
2390 }
2391
2392
2393
2394
2395
2396
2397
2398
2399 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2400 spin_lock_irqsave(io_lock, flags);
2401 io_req = fnic_priv(sc)->io_req;
2402 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2403 "Device reset failed"
2404 " since could not abort all IOs\n");
2405 goto fnic_device_reset_clean;
2406 }
2407
2408
2409 spin_lock_irqsave(io_lock, flags);
2410 io_req = fnic_priv(sc)->io_req;
2411 if (io_req)
2412
2413 ret = SUCCESS;
2414
2415 fnic_device_reset_clean:
2416 if (io_req)
2417 fnic_priv(sc)->io_req = NULL;
2418
2419 spin_unlock_irqrestore(io_lock, flags);
2420
2421 if (io_req) {
2422 start_time = io_req->start_time;
2423 fnic_release_ioreq_buf(fnic, io_req, sc);
2424 mempool_free(io_req, fnic->io_req_pool);
2425 }
2426
2427 fnic_device_reset_end:
2428 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
2429 jiffies_to_msecs(jiffies - start_time),
2430 0, ((u64)sc->cmnd[0] << 32 |
2431 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2432 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2433 fnic_flags_and_state(sc));
2434
2435
2436 if (unlikely(tag_gen_flag))
2437 fnic_scsi_host_end_tag(fnic, sc);
2438
2439 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2440 "Returning from device reset %s\n",
2441 (ret == SUCCESS) ?
2442 "SUCCESS" : "FAILED");
2443
2444 if (ret == FAILED)
2445 atomic64_inc(&reset_stats->device_reset_failures);
2446
2447 return ret;
2448 }
2449
2450
2451 int fnic_reset(struct Scsi_Host *shost)
2452 {
2453 struct fc_lport *lp;
2454 struct fnic *fnic;
2455 int ret = 0;
2456 struct reset_stats *reset_stats;
2457
2458 lp = shost_priv(shost);
2459 fnic = lport_priv(lp);
2460 reset_stats = &fnic->fnic_stats.reset_stats;
2461
2462 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2463 "fnic_reset called\n");
2464
2465 atomic64_inc(&reset_stats->fnic_resets);
2466
2467
2468
2469
2470
2471 ret = fc_lport_reset(lp);
2472
2473 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2474 "Returning from fnic reset %s\n",
2475 (ret == 0) ?
2476 "SUCCESS" : "FAILED");
2477
2478 if (ret == 0)
2479 atomic64_inc(&reset_stats->fnic_reset_completions);
2480 else
2481 atomic64_inc(&reset_stats->fnic_reset_failures);
2482
2483 return ret;
2484 }
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495 int fnic_host_reset(struct scsi_cmnd *sc)
2496 {
2497 int ret;
2498 unsigned long wait_host_tmo;
2499 struct Scsi_Host *shost = sc->device->host;
2500 struct fc_lport *lp = shost_priv(shost);
2501 struct fnic *fnic = lport_priv(lp);
2502 unsigned long flags;
2503
2504 spin_lock_irqsave(&fnic->fnic_lock, flags);
2505 if (!fnic->internal_reset_inprogress) {
2506 fnic->internal_reset_inprogress = true;
2507 } else {
2508 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2509 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2510 "host reset in progress skipping another host reset\n");
2511 return SUCCESS;
2512 }
2513 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2514
2515
2516
2517
2518
2519
2520 ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2521 if (ret == SUCCESS) {
2522 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2523 ret = FAILED;
2524 while (time_before(jiffies, wait_host_tmo)) {
2525 if ((lp->state == LPORT_ST_READY) &&
2526 (lp->link_up)) {
2527 ret = SUCCESS;
2528 break;
2529 }
2530 ssleep(1);
2531 }
2532 }
2533
2534 spin_lock_irqsave(&fnic->fnic_lock, flags);
2535 fnic->internal_reset_inprogress = false;
2536 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2537 return ret;
2538 }
2539
2540
2541
2542
2543 void fnic_scsi_abort_io(struct fc_lport *lp)
2544 {
2545 int err = 0;
2546 unsigned long flags;
2547 enum fnic_state old_state;
2548 struct fnic *fnic = lport_priv(lp);
2549 DECLARE_COMPLETION_ONSTACK(remove_wait);
2550
2551
2552 retry_fw_reset:
2553 spin_lock_irqsave(&fnic->fnic_lock, flags);
2554 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
2555 fnic->link_events) {
2556
2557 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2558 schedule_timeout(msecs_to_jiffies(100));
2559 goto retry_fw_reset;
2560 }
2561
2562 fnic->remove_wait = &remove_wait;
2563 old_state = fnic->state;
2564 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2565 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2566 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2567
2568 err = fnic_fw_reset_handler(fnic);
2569 if (err) {
2570 spin_lock_irqsave(&fnic->fnic_lock, flags);
2571 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2572 fnic->state = old_state;
2573 fnic->remove_wait = NULL;
2574 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2575 return;
2576 }
2577
2578
2579 wait_for_completion_timeout(&remove_wait,
2580 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2581
2582 spin_lock_irqsave(&fnic->fnic_lock, flags);
2583 fnic->remove_wait = NULL;
2584 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2585 "fnic_scsi_abort_io %s\n",
2586 (fnic->state == FNIC_IN_ETH_MODE) ?
2587 "SUCCESS" : "FAILED");
2588 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2589
2590 }
2591
2592
2593
2594
2595 void fnic_scsi_cleanup(struct fc_lport *lp)
2596 {
2597 unsigned long flags;
2598 enum fnic_state old_state;
2599 struct fnic *fnic = lport_priv(lp);
2600
2601
2602 retry_fw_reset:
2603 spin_lock_irqsave(&fnic->fnic_lock, flags);
2604 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2605
2606 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2607 schedule_timeout(msecs_to_jiffies(100));
2608 goto retry_fw_reset;
2609 }
2610 old_state = fnic->state;
2611 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2612 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2613 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2614
2615 if (fnic_fw_reset_handler(fnic)) {
2616 spin_lock_irqsave(&fnic->fnic_lock, flags);
2617 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2618 fnic->state = old_state;
2619 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2620 }
2621
2622 }
2623
2624 void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2625 {
2626 }
2627
2628 void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2629 {
2630 struct fnic *fnic = lport_priv(lp);
2631
2632
2633 if (sid)
2634 goto call_fc_exch_mgr_reset;
2635
2636 if (did) {
2637 fnic_rport_exch_reset(fnic, did);
2638 goto call_fc_exch_mgr_reset;
2639 }
2640
2641
2642
2643
2644
2645 if (!fnic->in_remove)
2646 fnic_scsi_cleanup(lp);
2647 else
2648 fnic_scsi_abort_io(lp);
2649
2650
2651 call_fc_exch_mgr_reset:
2652 fc_exch_mgr_reset(lp, sid, did);
2653
2654 }
2655
2656 static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
2657 {
2658 struct fnic_pending_aborts_iter_data *iter_data = data;
2659 struct fnic *fnic = iter_data->fnic;
2660 int cmd_state;
2661 struct fnic_io_req *io_req;
2662 spinlock_t *io_lock;
2663 unsigned long flags;
2664
2665
2666
2667
2668
2669 if (iter_data->lr_sc && sc == iter_data->lr_sc)
2670 return true;
2671 if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
2672 return true;
2673
2674 io_lock = fnic_io_lock_hash(fnic, sc);
2675 spin_lock_irqsave(io_lock, flags);
2676
2677 io_req = fnic_priv(sc)->io_req;
2678 if (!io_req) {
2679 spin_unlock_irqrestore(io_lock, flags);
2680 return true;
2681 }
2682
2683
2684
2685
2686
2687 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2688 "Found IO in %s on lun\n",
2689 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2690 cmd_state = fnic_priv(sc)->state;
2691 spin_unlock_irqrestore(io_lock, flags);
2692 if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
2693 iter_data->ret = 1;
2694
2695 return iter_data->ret ? false : true;
2696 }
2697
2698
2699
2700
2701
2702
2703
2704
2705 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2706 {
2707 struct fnic_pending_aborts_iter_data iter_data = {
2708 .fnic = fnic,
2709 .lun_dev = NULL,
2710 .ret = 0,
2711 };
2712
2713 if (lr_sc) {
2714 iter_data.lun_dev = lr_sc->device;
2715 iter_data.lr_sc = lr_sc;
2716 }
2717
2718
2719 scsi_host_busy_iter(fnic->lport->host,
2720 fnic_abts_pending_iter, &iter_data);
2721
2722 return iter_data.ret;
2723 }