0001
0002
0003
0004 #include <linux/mempool.h>
0005 #include <linux/errno.h>
0006 #include <linux/init.h>
0007 #include <linux/workqueue.h>
0008 #include <linux/pci.h>
0009 #include <linux/spinlock.h>
0010 #include <linux/delay.h>
0011 #include <linux/gfp.h>
0012 #include <scsi/scsi.h>
0013 #include <scsi/scsi_host.h>
0014 #include <scsi/scsi_device.h>
0015 #include <scsi/scsi_cmnd.h>
0016 #include <scsi/scsi_tcq.h>
0017 #include <scsi/scsi_dbg.h>
0018
0019 #include "snic_io.h"
0020 #include "snic.h"
0021
0022 #define snic_cmd_tag(sc) (scsi_cmd_to_rq(sc)->tag)
0023
0024 const char *snic_state_str[] = {
0025 [SNIC_INIT] = "SNIC_INIT",
0026 [SNIC_ERROR] = "SNIC_ERROR",
0027 [SNIC_ONLINE] = "SNIC_ONLINE",
0028 [SNIC_OFFLINE] = "SNIC_OFFLINE",
0029 [SNIC_FWRESET] = "SNIC_FWRESET",
0030 };
0031
0032 static const char * const snic_req_state_str[] = {
0033 [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED",
0034 [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING",
0035 [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
0036 [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPLETE",
0037 [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING",
0038 [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPLETE",
0039 [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPLETE",
0040 };
0041
0042
0043 static const char * const snic_io_status_str[] = {
0044 [SNIC_STAT_IO_SUCCESS] = "SNIC_STAT_IO_SUCCESS",
0045 [SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR",
0046 [SNIC_STAT_OUT_OF_RES] = "SNIC_STAT_OUT_OF_RES",
0047 [SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM",
0048 [SNIC_STAT_REQ_NOT_SUP] = "SNIC_STAT_REQ_NOT_SUP",
0049 [SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND",
0050 [SNIC_STAT_ABORTED] = "SNIC_STAT_ABORTED",
0051 [SNIC_STAT_TIMEOUT] = "SNIC_STAT_TIMEOUT",
0052 [SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID",
0053 [SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH",
0054 [SNIC_STAT_FW_ERR] = "SNIC_STAT_FW_ERR",
0055 [SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT",
0056 [SNIC_STAT_ITMF_FAIL] = "SNIC_STAT_ITMF_FAIL",
0057 [SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN",
0058 [SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT",
0059 [SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE",
0060 [SNIC_STAT_NO_BOOTLUN] = "SNIC_STAT_NO_BOOTLUN",
0061 [SNIC_STAT_SCSI_ERR] = "SNIC_STAT_SCSI_ERR",
0062 [SNIC_STAT_NOT_READY] = "SNIC_STAT_NOT_READY",
0063 [SNIC_STAT_FATAL_ERROR] = "SNIC_STAT_FATAL_ERROR",
0064 };
0065
0066 static void snic_scsi_cleanup(struct snic *, int);
0067
0068 const char *
0069 snic_state_to_str(unsigned int state)
0070 {
0071 if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state])
0072 return "Unknown";
0073
0074 return snic_state_str[state];
0075 }
0076
0077 static const char *
0078 snic_io_status_to_str(unsigned int state)
0079 {
0080 if ((state >= ARRAY_SIZE(snic_io_status_str)) ||
0081 (!snic_io_status_str[state]))
0082 return "Unknown";
0083
0084 return snic_io_status_str[state];
0085 }
0086
0087 static const char *
0088 snic_ioreq_state_to_str(unsigned int state)
0089 {
0090 if (state >= ARRAY_SIZE(snic_req_state_str) ||
0091 !snic_req_state_str[state])
0092 return "Unknown";
0093
0094 return snic_req_state_str[state];
0095 }
0096
0097 static inline spinlock_t *
0098 snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc)
0099 {
0100 u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1);
0101
0102 return &snic->io_req_lock[hash];
0103 }
0104
0105 static inline spinlock_t *
0106 snic_io_lock_tag(struct snic *snic, int tag)
0107 {
0108 return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)];
0109 }
0110
0111
0112 static void
0113 snic_release_req_buf(struct snic *snic,
0114 struct snic_req_info *rqi,
0115 struct scsi_cmnd *sc)
0116 {
0117 struct snic_host_req *req = rqi_to_req(rqi);
0118
0119
0120 SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) ||
0121 (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) ||
0122 (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) ||
0123 (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) ||
0124 (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) ||
0125 (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) ||
0126 (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE)));
0127
0128 SNIC_SCSI_DBG(snic->shost,
0129 "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n",
0130 sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req,
0131 rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)),
0132 CMD_FLAGS(sc));
0133
0134 if (req->u.icmnd.sense_addr)
0135 dma_unmap_single(&snic->pdev->dev,
0136 le64_to_cpu(req->u.icmnd.sense_addr),
0137 SCSI_SENSE_BUFFERSIZE,
0138 DMA_FROM_DEVICE);
0139
0140 scsi_dma_unmap(sc);
0141
0142 snic_req_free(snic, rqi);
0143 }
0144
0145
0146
0147
0148 static int
0149 snic_queue_icmnd_req(struct snic *snic,
0150 struct snic_req_info *rqi,
0151 struct scsi_cmnd *sc,
0152 int sg_cnt)
0153 {
0154 struct scatterlist *sg;
0155 struct snic_sg_desc *sgd;
0156 dma_addr_t pa = 0;
0157 struct scsi_lun lun;
0158 u16 flags = 0;
0159 int ret = 0;
0160 unsigned int i;
0161
0162 if (sg_cnt) {
0163 flags = SNIC_ICMND_ESGL;
0164 sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req);
0165
0166 for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) {
0167 sgd->addr = cpu_to_le64(sg_dma_address(sg));
0168 sgd->len = cpu_to_le32(sg_dma_len(sg));
0169 sgd->_resvd = 0;
0170 sgd++;
0171 }
0172 }
0173
0174 pa = dma_map_single(&snic->pdev->dev,
0175 sc->sense_buffer,
0176 SCSI_SENSE_BUFFERSIZE,
0177 DMA_FROM_DEVICE);
0178 if (dma_mapping_error(&snic->pdev->dev, pa)) {
0179 SNIC_HOST_ERR(snic->shost,
0180 "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
0181 sc->sense_buffer, snic_cmd_tag(sc));
0182 ret = -ENOMEM;
0183
0184 return ret;
0185 }
0186
0187 int_to_scsilun(sc->device->lun, &lun);
0188 if (sc->sc_data_direction == DMA_FROM_DEVICE)
0189 flags |= SNIC_ICMND_RD;
0190 if (sc->sc_data_direction == DMA_TO_DEVICE)
0191 flags |= SNIC_ICMND_WR;
0192
0193
0194 snic_icmnd_init(rqi->req,
0195 snic_cmd_tag(sc),
0196 snic->config.hid,
0197 (ulong) rqi,
0198 flags,
0199 rqi->tgt_id,
0200 lun.scsi_lun,
0201 sc->cmnd,
0202 sc->cmd_len,
0203 scsi_bufflen(sc),
0204 sg_cnt,
0205 (ulong) req_to_sgl(rqi->req),
0206 pa,
0207 SCSI_SENSE_BUFFERSIZE);
0208
0209 atomic64_inc(&snic->s_stats.io.active);
0210 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
0211 if (ret) {
0212 atomic64_dec(&snic->s_stats.io.active);
0213 SNIC_HOST_ERR(snic->shost,
0214 "QIcmnd: Queuing Icmnd Failed. ret = %d\n",
0215 ret);
0216 } else
0217 snic_stats_update_active_ios(&snic->s_stats);
0218
0219 return ret;
0220 }
0221
0222
0223
0224
0225 static int
0226 snic_issue_scsi_req(struct snic *snic,
0227 struct snic_tgt *tgt,
0228 struct scsi_cmnd *sc)
0229 {
0230 struct snic_req_info *rqi = NULL;
0231 int sg_cnt = 0;
0232 int ret = 0;
0233 u32 tag = snic_cmd_tag(sc);
0234 u64 cmd_trc = 0, cmd_st_flags = 0;
0235 spinlock_t *io_lock = NULL;
0236 unsigned long flags;
0237
0238 CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED;
0239 CMD_FLAGS(sc) = SNIC_NO_FLAGS;
0240 sg_cnt = scsi_dma_map(sc);
0241 if (sg_cnt < 0) {
0242 SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0,
0243 sc->cmnd[0], sg_cnt, CMD_STATE(sc));
0244
0245 SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n");
0246 ret = -ENOMEM;
0247
0248 goto issue_sc_end;
0249 }
0250
0251 rqi = snic_req_init(snic, sg_cnt);
0252 if (!rqi) {
0253 scsi_dma_unmap(sc);
0254 ret = -ENOMEM;
0255
0256 goto issue_sc_end;
0257 }
0258
0259 rqi->tgt_id = tgt->id;
0260 rqi->sc = sc;
0261
0262 CMD_STATE(sc) = SNIC_IOREQ_PENDING;
0263 CMD_SP(sc) = (char *) rqi;
0264 cmd_trc = SNIC_TRC_CMD(sc);
0265 CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED);
0266 cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc);
0267 io_lock = snic_io_lock_hash(snic, sc);
0268
0269
0270 ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt);
0271 if (ret) {
0272 SNIC_HOST_ERR(snic->shost,
0273 "issue_sc: icmnd qing Failed for sc %p, err %d\n",
0274 sc, ret);
0275
0276 spin_lock_irqsave(io_lock, flags);
0277 rqi = (struct snic_req_info *) CMD_SP(sc);
0278 CMD_SP(sc) = NULL;
0279 CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
0280 CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED;
0281 spin_unlock_irqrestore(io_lock, flags);
0282
0283 if (rqi)
0284 snic_release_req_buf(snic, rqi, sc);
0285
0286 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0,
0287 SNIC_TRC_CMD_STATE_FLAGS(sc));
0288 } else {
0289 u32 io_sz = scsi_bufflen(sc) >> 9;
0290 u32 qtime = jiffies - rqi->start_time;
0291 struct snic_io_stats *iostats = &snic->s_stats.io;
0292
0293 if (io_sz > atomic64_read(&iostats->max_io_sz))
0294 atomic64_set(&iostats->max_io_sz, io_sz);
0295
0296 if (qtime > atomic64_read(&iostats->max_qtime))
0297 atomic64_set(&iostats->max_qtime, qtime);
0298
0299 SNIC_SCSI_DBG(snic->shost,
0300 "issue_sc:sc %p, tag %d queued to WQ.\n",
0301 sc, tag);
0302
0303 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi,
0304 sg_cnt, cmd_trc, cmd_st_flags);
0305 }
0306
0307 issue_sc_end:
0308
0309 return ret;
0310 }
0311
0312
0313
0314
0315
0316
0317
0318 int
0319 snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
0320 {
0321 struct snic_tgt *tgt = NULL;
0322 struct snic *snic = shost_priv(shost);
0323 int ret;
0324
0325 tgt = starget_to_tgt(scsi_target(sc->device));
0326 ret = snic_tgt_chkready(tgt);
0327 if (ret) {
0328 SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
0329 atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
0330 sc->result = ret;
0331 scsi_done(sc);
0332
0333 return 0;
0334 }
0335
0336 if (snic_get_state(snic) != SNIC_ONLINE) {
0337 SNIC_HOST_ERR(shost, "snic state is %s\n",
0338 snic_state_str[snic_get_state(snic)]);
0339
0340 return SCSI_MLQUEUE_HOST_BUSY;
0341 }
0342 atomic_inc(&snic->ios_inflight);
0343
0344 SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
0345 sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
0346
0347 ret = snic_issue_scsi_req(snic, tgt, sc);
0348 if (ret) {
0349 SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
0350 ret = SCSI_MLQUEUE_HOST_BUSY;
0351 }
0352
0353 atomic_dec(&snic->ios_inflight);
0354
0355 return ret;
0356 }
0357
0358
0359
0360
0361
0362 static void
0363 snic_proc_tmreq_pending_state(struct snic *snic,
0364 struct scsi_cmnd *sc,
0365 u8 cmpl_status)
0366 {
0367 int state = CMD_STATE(sc);
0368
0369 if (state == SNIC_IOREQ_ABTS_PENDING)
0370 CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING;
0371 else if (state == SNIC_IOREQ_LR_PENDING)
0372 CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING;
0373 else
0374 SNIC_BUG_ON(1);
0375
0376 switch (cmpl_status) {
0377 case SNIC_STAT_IO_SUCCESS:
0378 CMD_FLAGS(sc) |= SNIC_IO_DONE;
0379 break;
0380
0381 case SNIC_STAT_ABORTED:
0382 CMD_FLAGS(sc) |= SNIC_IO_ABORTED;
0383 break;
0384
0385 default:
0386 SNIC_BUG_ON(1);
0387 }
0388 }
0389
0390
0391
0392
0393
0394 static void
0395 snic_process_io_failed_state(struct snic *snic,
0396 struct snic_icmnd_cmpl *icmnd_cmpl,
0397 struct scsi_cmnd *sc,
0398 u8 cmpl_stat)
0399 {
0400 int res = 0;
0401
0402 switch (cmpl_stat) {
0403 case SNIC_STAT_TIMEOUT:
0404 atomic64_inc(&snic->s_stats.misc.io_tmo);
0405 res = DID_TIME_OUT;
0406 break;
0407
0408 case SNIC_STAT_ABORTED:
0409 atomic64_inc(&snic->s_stats.misc.io_aborted);
0410 res = DID_ABORT;
0411 break;
0412
0413 case SNIC_STAT_DATA_CNT_MISMATCH:
0414 atomic64_inc(&snic->s_stats.misc.data_cnt_mismat);
0415 scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
0416 res = DID_ERROR;
0417 break;
0418
0419 case SNIC_STAT_OUT_OF_RES:
0420 atomic64_inc(&snic->s_stats.fw.out_of_res);
0421 res = DID_REQUEUE;
0422 break;
0423
0424 case SNIC_STAT_IO_NOT_FOUND:
0425 atomic64_inc(&snic->s_stats.io.io_not_found);
0426 res = DID_ERROR;
0427 break;
0428
0429 case SNIC_STAT_SGL_INVALID:
0430 atomic64_inc(&snic->s_stats.misc.sgl_inval);
0431 res = DID_ERROR;
0432 break;
0433
0434 case SNIC_STAT_FW_ERR:
0435 atomic64_inc(&snic->s_stats.fw.io_errs);
0436 res = DID_ERROR;
0437 break;
0438
0439 case SNIC_STAT_SCSI_ERR:
0440 atomic64_inc(&snic->s_stats.fw.scsi_errs);
0441 break;
0442
0443 case SNIC_STAT_NOT_READY:
0444 case SNIC_STAT_DEV_OFFLINE:
0445 res = DID_NO_CONNECT;
0446 break;
0447
0448 case SNIC_STAT_INVALID_HDR:
0449 case SNIC_STAT_INVALID_PARM:
0450 case SNIC_STAT_REQ_NOT_SUP:
0451 case SNIC_STAT_CMND_REJECT:
0452 case SNIC_STAT_FATAL_ERROR:
0453 default:
0454 SNIC_SCSI_DBG(snic->shost,
0455 "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n");
0456 res = DID_ERROR;
0457 break;
0458 }
0459
0460 SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n",
0461 snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
0462
0463
0464 sc->result = (res << 16) | icmnd_cmpl->scsi_status;
0465 }
0466
0467
0468
0469
0470 static int
0471 snic_tmreq_pending(struct scsi_cmnd *sc)
0472 {
0473 int state = CMD_STATE(sc);
0474
0475 return ((state == SNIC_IOREQ_ABTS_PENDING) ||
0476 (state == SNIC_IOREQ_LR_PENDING));
0477 }
0478
0479
0480
0481
0482
0483 static int
0484 snic_process_icmnd_cmpl_status(struct snic *snic,
0485 struct snic_icmnd_cmpl *icmnd_cmpl,
0486 u8 cmpl_stat,
0487 struct scsi_cmnd *sc)
0488 {
0489 u8 scsi_stat = icmnd_cmpl->scsi_status;
0490 u64 xfer_len = 0;
0491 int ret = 0;
0492
0493
0494 CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
0495
0496 if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) {
0497 sc->result = (DID_OK << 16) | scsi_stat;
0498
0499 xfer_len = scsi_bufflen(sc);
0500
0501
0502 scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
0503
0504 if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) {
0505 xfer_len -= le32_to_cpu(icmnd_cmpl->resid);
0506 atomic64_inc(&snic->s_stats.misc.io_under_run);
0507 }
0508
0509 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
0510 atomic64_inc(&snic->s_stats.misc.qfull);
0511
0512 ret = 0;
0513 } else {
0514 snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat);
0515 atomic64_inc(&snic->s_stats.io.fail);
0516 SNIC_HOST_ERR(snic->shost,
0517 "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n",
0518 snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
0519 ret = 1;
0520 }
0521
0522 return ret;
0523 }
0524
0525
0526
0527
0528
0529
0530 static void
0531 snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
0532 {
0533 u8 typ, hdr_stat;
0534 u32 cmnd_id, hid;
0535 ulong ctx;
0536 struct scsi_cmnd *sc = NULL;
0537 struct snic_icmnd_cmpl *icmnd_cmpl = NULL;
0538 struct snic_host_req *req = NULL;
0539 struct snic_req_info *rqi = NULL;
0540 unsigned long flags, start_time;
0541 spinlock_t *io_lock;
0542 u8 sc_stat = 0;
0543
0544 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
0545 icmnd_cmpl = &fwreq->u.icmnd_cmpl;
0546 sc_stat = icmnd_cmpl->scsi_status;
0547
0548 SNIC_SCSI_DBG(snic->shost,
0549 "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n",
0550 typ, hdr_stat, cmnd_id, hid, ctx);
0551
0552 if (cmnd_id >= snic->max_tag_id) {
0553 SNIC_HOST_ERR(snic->shost,
0554 "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n",
0555 cmnd_id, snic_io_status_to_str(hdr_stat));
0556 return;
0557 }
0558
0559 sc = scsi_host_find_tag(snic->shost, cmnd_id);
0560 WARN_ON_ONCE(!sc);
0561
0562 if (!sc) {
0563 atomic64_inc(&snic->s_stats.io.sc_null);
0564 SNIC_HOST_ERR(snic->shost,
0565 "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n",
0566 snic_io_status_to_str(hdr_stat),
0567 cmnd_id,
0568 fwreq);
0569
0570 SNIC_TRC(snic->shost->host_no, cmnd_id, 0,
0571 ((u64)hdr_stat << 16 |
0572 (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags),
0573 (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx);
0574
0575 return;
0576 }
0577
0578 io_lock = snic_io_lock_hash(snic, sc);
0579
0580 spin_lock_irqsave(io_lock, flags);
0581 rqi = (struct snic_req_info *) CMD_SP(sc);
0582 SNIC_SCSI_DBG(snic->shost,
0583 "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n",
0584 sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
0585 CMD_FLAGS(sc), rqi);
0586
0587 if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
0588 spin_unlock_irqrestore(io_lock, flags);
0589
0590 return;
0591 }
0592
0593 SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
0594 WARN_ON_ONCE(req);
0595 if (!rqi) {
0596 atomic64_inc(&snic->s_stats.io.req_null);
0597 CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL;
0598 spin_unlock_irqrestore(io_lock, flags);
0599
0600 SNIC_HOST_ERR(snic->shost,
0601 "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n",
0602 snic_io_status_to_str(hdr_stat),
0603 cmnd_id, sc, CMD_FLAGS(sc));
0604 return;
0605 }
0606
0607 rqi = (struct snic_req_info *) ctx;
0608 start_time = rqi->start_time;
0609
0610
0611 rqi->io_cmpl = 1;
0612
0613
0614
0615
0616
0617 if (unlikely(snic_tmreq_pending(sc))) {
0618 snic_proc_tmreq_pending_state(snic, sc, hdr_stat);
0619 spin_unlock_irqrestore(io_lock, flags);
0620
0621 snic_stats_update_io_cmpl(&snic->s_stats);
0622
0623
0624 if (likely(hdr_stat == SNIC_STAT_ABORTED))
0625 return;
0626
0627 SNIC_SCSI_DBG(snic->shost,
0628 "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n",
0629 snic_ioreq_state_to_str(CMD_STATE(sc)),
0630 snic_io_status_to_str(hdr_stat),
0631 sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid),
0632 CMD_FLAGS(sc));
0633
0634 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
0635 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
0636 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
0637
0638 return;
0639 }
0640
0641 if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) {
0642 scsi_print_command(sc);
0643 SNIC_HOST_ERR(snic->shost,
0644 "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n",
0645 sc, sc->cmnd[0], cmnd_id,
0646 snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc));
0647 }
0648
0649
0650 CMD_SP(sc) = NULL;
0651 CMD_FLAGS(sc) |= SNIC_IO_DONE;
0652
0653 spin_unlock_irqrestore(io_lock, flags);
0654
0655
0656 snic_calc_io_process_time(snic, rqi);
0657
0658 snic_release_req_buf(snic, rqi, sc);
0659
0660 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
0661 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
0662 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
0663
0664
0665 scsi_done(sc);
0666
0667 snic_stats_update_io_cmpl(&snic->s_stats);
0668 }
0669
0670 static void
0671 snic_proc_dr_cmpl_locked(struct snic *snic,
0672 struct snic_fw_req *fwreq,
0673 u8 cmpl_stat,
0674 u32 cmnd_id,
0675 struct scsi_cmnd *sc)
0676 {
0677 struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc);
0678 u32 start_time = rqi->start_time;
0679
0680 CMD_LR_STATUS(sc) = cmpl_stat;
0681
0682 SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n",
0683 snic_ioreq_state_to_str(CMD_STATE(sc)));
0684
0685 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
0686 CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING;
0687
0688 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
0689 jiffies_to_msecs(jiffies - start_time),
0690 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
0691
0692 SNIC_SCSI_DBG(snic->shost,
0693 "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n",
0694 (int)(cmnd_id & SNIC_TAG_MASK),
0695 snic_io_status_to_str(cmpl_stat),
0696 CMD_FLAGS(sc));
0697
0698 return;
0699 }
0700
0701
0702 if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) {
0703 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
0704 jiffies_to_msecs(jiffies - start_time),
0705 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
0706
0707 SNIC_SCSI_DBG(snic->shost,
0708 "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n",
0709 (int)(cmnd_id & SNIC_TAG_MASK),
0710 snic_io_status_to_str(cmpl_stat),
0711 CMD_FLAGS(sc));
0712
0713 return;
0714 }
0715
0716 CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE;
0717 CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
0718
0719 SNIC_SCSI_DBG(snic->shost,
0720 "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n",
0721 (int)(cmnd_id & SNIC_TAG_MASK),
0722 snic_io_status_to_str(cmpl_stat),
0723 CMD_FLAGS(sc));
0724
0725 if (rqi->dr_done)
0726 complete(rqi->dr_done);
0727 }
0728
0729
0730
0731
0732 static void
0733 snic_update_abort_stats(struct snic *snic, u8 cmpl_stat)
0734 {
0735 struct snic_abort_stats *abt_stats = &snic->s_stats.abts;
0736
0737 SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n");
0738
0739 switch (cmpl_stat) {
0740 case SNIC_STAT_IO_SUCCESS:
0741 break;
0742
0743 case SNIC_STAT_TIMEOUT:
0744 atomic64_inc(&abt_stats->fw_tmo);
0745 break;
0746
0747 case SNIC_STAT_IO_NOT_FOUND:
0748 atomic64_inc(&abt_stats->io_not_found);
0749 break;
0750
0751 default:
0752 atomic64_inc(&abt_stats->fail);
0753 break;
0754 }
0755 }
0756
0757 static int
0758 snic_process_itmf_cmpl(struct snic *snic,
0759 struct snic_fw_req *fwreq,
0760 u32 cmnd_id,
0761 u8 cmpl_stat,
0762 struct scsi_cmnd *sc)
0763 {
0764 struct snic_req_info *rqi = NULL;
0765 u32 tm_tags = 0;
0766 spinlock_t *io_lock = NULL;
0767 unsigned long flags;
0768 u32 start_time = 0;
0769 int ret = 0;
0770
0771 io_lock = snic_io_lock_hash(snic, sc);
0772 spin_lock_irqsave(io_lock, flags);
0773 if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
0774 spin_unlock_irqrestore(io_lock, flags);
0775
0776 return ret;
0777 }
0778 rqi = (struct snic_req_info *) CMD_SP(sc);
0779 WARN_ON_ONCE(!rqi);
0780
0781 if (!rqi) {
0782 atomic64_inc(&snic->s_stats.io.req_null);
0783 spin_unlock_irqrestore(io_lock, flags);
0784 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
0785 SNIC_HOST_ERR(snic->shost,
0786 "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n",
0787 snic_io_status_to_str(cmpl_stat), cmnd_id, sc,
0788 CMD_FLAGS(sc));
0789
0790 return ret;
0791 }
0792
0793
0794 tm_tags = cmnd_id & ~(SNIC_TAG_MASK);
0795
0796 start_time = rqi->start_time;
0797 cmnd_id &= (SNIC_TAG_MASK);
0798
0799 switch (tm_tags) {
0800 case SNIC_TAG_ABORT:
0801
0802 snic_update_abort_stats(snic, cmpl_stat);
0803
0804 if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) {
0805
0806 ret = -1;
0807 spin_unlock_irqrestore(io_lock, flags);
0808 break;
0809 }
0810
0811 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
0812 CMD_ABTS_STATUS(sc) = cmpl_stat;
0813 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
0814
0815 SNIC_SCSI_DBG(snic->shost,
0816 "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n",
0817 cmnd_id,
0818 snic_io_status_to_str(cmpl_stat),
0819 CMD_FLAGS(sc));
0820
0821
0822
0823
0824
0825
0826 if (rqi->abts_done) {
0827 complete(rqi->abts_done);
0828 spin_unlock_irqrestore(io_lock, flags);
0829
0830 break;
0831 }
0832
0833 CMD_SP(sc) = NULL;
0834 sc->result = (DID_ERROR << 16);
0835 SNIC_SCSI_DBG(snic->shost,
0836 "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n",
0837 sc, CMD_FLAGS(sc));
0838
0839 spin_unlock_irqrestore(io_lock, flags);
0840
0841 snic_release_req_buf(snic, rqi, sc);
0842
0843 SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
0844 jiffies_to_msecs(jiffies - start_time),
0845 (ulong) fwreq, SNIC_TRC_CMD(sc),
0846 SNIC_TRC_CMD_STATE_FLAGS(sc));
0847
0848 scsi_done(sc);
0849
0850 break;
0851
0852 case SNIC_TAG_DEV_RST:
0853 case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST:
0854 snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc);
0855 spin_unlock_irqrestore(io_lock, flags);
0856 ret = 0;
0857
0858 break;
0859
0860 case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST:
0861
0862
0863 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
0864 CMD_ABTS_STATUS(sc) = cmpl_stat;
0865 CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
0866
0867 SNIC_SCSI_DBG(snic->shost,
0868 "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n",
0869 cmnd_id, snic_io_status_to_str(cmpl_stat),
0870 CMD_FLAGS(sc));
0871
0872 if (rqi->abts_done)
0873 complete(rqi->abts_done);
0874
0875 spin_unlock_irqrestore(io_lock, flags);
0876
0877 break;
0878
0879 default:
0880 spin_unlock_irqrestore(io_lock, flags);
0881 SNIC_HOST_ERR(snic->shost,
0882 "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags);
0883
0884 SNIC_HOST_ERR(snic->shost,
0885 "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n",
0886 snic_ioreq_state_to_str(CMD_STATE(sc)),
0887 cmnd_id,
0888 CMD_FLAGS(sc));
0889 ret = -1;
0890 SNIC_BUG_ON(1);
0891
0892 break;
0893 }
0894
0895 return ret;
0896 }
0897
0898
0899
0900
0901
0902 static void
0903 snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
0904 {
0905 struct scsi_cmnd *sc = NULL;
0906 struct snic_req_info *rqi = NULL;
0907 struct snic_itmf_cmpl *itmf_cmpl = NULL;
0908 ulong ctx;
0909 u32 cmnd_id;
0910 u32 hid;
0911 u8 typ;
0912 u8 hdr_stat;
0913
0914 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
0915 SNIC_SCSI_DBG(snic->shost,
0916 "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n",
0917 __func__, typ, hdr_stat, cmnd_id, hid, ctx);
0918
0919 itmf_cmpl = &fwreq->u.itmf_cmpl;
0920 SNIC_SCSI_DBG(snic->shost,
0921 "Itmf_cmpl: nterm %u , flags 0x%x\n",
0922 le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags);
0923
0924
0925 if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) {
0926 rqi = (struct snic_req_info *) ctx;
0927 sc = rqi->sc;
0928
0929 goto ioctl_dev_rst;
0930 }
0931
0932 if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) {
0933 SNIC_HOST_ERR(snic->shost,
0934 "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
0935 cmnd_id, snic_io_status_to_str(hdr_stat));
0936 SNIC_BUG_ON(1);
0937
0938 return;
0939 }
0940
0941 sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK);
0942 WARN_ON_ONCE(!sc);
0943
0944 ioctl_dev_rst:
0945 if (!sc) {
0946 atomic64_inc(&snic->s_stats.io.sc_null);
0947 SNIC_HOST_ERR(snic->shost,
0948 "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
0949 snic_io_status_to_str(hdr_stat), cmnd_id);
0950
0951 return;
0952 }
0953
0954 snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc);
0955 }
0956
0957
0958
0959 static void
0960 snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc)
0961 {
0962 struct snic_stats *st = &snic->s_stats;
0963 long act_ios = 0, act_fwreqs = 0;
0964
0965 SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n");
0966 snic_scsi_cleanup(snic, snic_cmd_tag(sc));
0967
0968
0969 act_ios = atomic64_read(&st->io.active);
0970 atomic64_add(act_ios, &st->io.compl);
0971 atomic64_sub(act_ios, &st->io.active);
0972
0973 act_fwreqs = atomic64_read(&st->fw.actv_reqs);
0974 atomic64_sub(act_fwreqs, &st->fw.actv_reqs);
0975 }
0976
0977
0978
0979
0980
0981
0982
0983
0984 static int
0985 snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
0986 {
0987 ulong ctx;
0988 u32 cmnd_id;
0989 u32 hid;
0990 u8 typ;
0991 u8 hdr_stat;
0992 struct scsi_cmnd *sc = NULL;
0993 struct snic_req_info *rqi = NULL;
0994 spinlock_t *io_lock = NULL;
0995 unsigned long flags, gflags;
0996 int ret = 0;
0997
0998 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
0999 SNIC_HOST_INFO(snic->shost,
1000 "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n",
1001 cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
1002
1003 SNIC_SCSI_DBG(snic->shost,
1004 "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
1005 typ, hdr_stat, cmnd_id, hid, ctx);
1006
1007
1008 if (cmnd_id == SCSI_NO_TAG) {
1009 rqi = (struct snic_req_info *) ctx;
1010 SNIC_HOST_INFO(snic->shost,
1011 "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n",
1012 cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
1013 sc = rqi->sc;
1014
1015 goto ioctl_hba_rst;
1016 }
1017
1018 if (cmnd_id >= snic->max_tag_id) {
1019 SNIC_HOST_ERR(snic->shost,
1020 "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
1021 cmnd_id, snic_io_status_to_str(hdr_stat));
1022 SNIC_BUG_ON(1);
1023
1024 return 1;
1025 }
1026
1027 sc = scsi_host_find_tag(snic->shost, cmnd_id);
1028 ioctl_hba_rst:
1029 if (!sc) {
1030 atomic64_inc(&snic->s_stats.io.sc_null);
1031 SNIC_HOST_ERR(snic->shost,
1032 "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
1033 snic_io_status_to_str(hdr_stat), cmnd_id);
1034 ret = 1;
1035
1036 return ret;
1037 }
1038
1039 SNIC_HOST_INFO(snic->shost,
1040 "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n",
1041 sc, rqi, cmnd_id, CMD_FLAGS(sc));
1042
1043 io_lock = snic_io_lock_hash(snic, sc);
1044 spin_lock_irqsave(io_lock, flags);
1045
1046 if (!snic->remove_wait) {
1047 spin_unlock_irqrestore(io_lock, flags);
1048 SNIC_HOST_ERR(snic->shost,
1049 "reset_cmpl:host reset completed after timeout\n");
1050 ret = 1;
1051
1052 return ret;
1053 }
1054
1055 rqi = (struct snic_req_info *) CMD_SP(sc);
1056 WARN_ON_ONCE(!rqi);
1057
1058 if (!rqi) {
1059 atomic64_inc(&snic->s_stats.io.req_null);
1060 spin_unlock_irqrestore(io_lock, flags);
1061 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1062 SNIC_HOST_ERR(snic->shost,
1063 "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n",
1064 snic_io_status_to_str(hdr_stat), cmnd_id, sc,
1065 CMD_FLAGS(sc));
1066
1067 ret = 1;
1068
1069 return ret;
1070 }
1071
1072 spin_unlock_irqrestore(io_lock, flags);
1073
1074
1075 snic_hba_reset_scsi_cleanup(snic, sc);
1076
1077 SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE &&
1078 snic_get_state(snic) != SNIC_FWRESET);
1079
1080
1081 spin_lock_irqsave(io_lock, flags);
1082 spin_lock_irqsave(&snic->snic_lock, gflags);
1083 if (snic_get_state(snic) == SNIC_FWRESET)
1084 snic_set_state(snic, SNIC_ONLINE);
1085 spin_unlock_irqrestore(&snic->snic_lock, gflags);
1086
1087 if (snic->remove_wait)
1088 complete(snic->remove_wait);
1089
1090 spin_unlock_irqrestore(io_lock, flags);
1091 atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl);
1092
1093 ret = 0;
1094
1095 if (snic->config.xpt_type == SNIC_DAS)
1096 return ret;
1097
1098 SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n");
1099 queue_work(snic_glob->event_q, &snic->disc_work);
1100
1101 return ret;
1102 }
1103
1104 static void
1105 snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq)
1106 {
1107 SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n");
1108
1109 SNIC_ASSERT_NOT_IMPL(1);
1110 }
1111
1112 static void
1113 snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq)
1114 {
1115 u8 typ, hdr_stat;
1116 u32 cmnd_id, hid;
1117 ulong ctx;
1118 struct snic_async_evnotify *aen = &fwreq->u.async_ev;
1119 u32 event_id = 0;
1120
1121 snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
1122 SNIC_SCSI_DBG(snic->shost,
1123 "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
1124 typ, hdr_stat, cmnd_id, hid, ctx);
1125
1126 event_id = le32_to_cpu(aen->ev_id);
1127
1128 switch (event_id) {
1129 case SNIC_EV_TGT_OFFLINE:
1130 SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n");
1131 break;
1132
1133 case SNIC_EV_TGT_ONLINE:
1134 SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n");
1135 break;
1136
1137 case SNIC_EV_LUN_OFFLINE:
1138 SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n");
1139 break;
1140
1141 case SNIC_EV_LUN_ONLINE:
1142 SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n");
1143 break;
1144
1145 case SNIC_EV_CONF_CHG:
1146 SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n");
1147 break;
1148
1149 case SNIC_EV_TGT_ADDED:
1150 SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n");
1151 break;
1152
1153 case SNIC_EV_TGT_DELTD:
1154 SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n");
1155 break;
1156
1157 case SNIC_EV_LUN_ADDED:
1158 SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n");
1159 break;
1160
1161 case SNIC_EV_LUN_DELTD:
1162 SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n");
1163 break;
1164
1165 case SNIC_EV_DISC_CMPL:
1166 SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n");
1167 break;
1168
1169 default:
1170 SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n");
1171 SNIC_BUG_ON(1);
1172 break;
1173 }
1174
1175 SNIC_ASSERT_NOT_IMPL(1);
1176 }
1177
1178
1179
1180
1181
1182 static int
1183 snic_io_cmpl_handler(struct vnic_dev *vdev,
1184 unsigned int cq_idx,
1185 struct snic_fw_req *fwreq)
1186 {
1187 struct snic *snic = svnic_dev_priv(vdev);
1188 u64 start = jiffies, cmpl_time;
1189
1190 snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq));
1191
1192
1193 if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) &&
1194 (fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL))
1195 atomic64_dec(&snic->s_stats.fw.actv_reqs);
1196
1197 SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) &&
1198 (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY));
1199
1200
1201 switch (fwreq->hdr.status) {
1202 case SNIC_STAT_NOT_READY:
1203 SNIC_HOST_ERR(snic->shost,
1204 "sNIC SubSystem is NOT Ready.\n");
1205 break;
1206
1207 case SNIC_STAT_FATAL_ERROR:
1208 SNIC_HOST_ERR(snic->shost,
1209 "sNIC SubSystem in Unrecoverable State.\n");
1210 break;
1211 }
1212
1213 switch (fwreq->hdr.type) {
1214 case SNIC_RSP_EXCH_VER_CMPL:
1215 snic_io_exch_ver_cmpl_handler(snic, fwreq);
1216 break;
1217
1218 case SNIC_RSP_REPORT_TGTS_CMPL:
1219 snic_report_tgt_cmpl_handler(snic, fwreq);
1220 break;
1221
1222 case SNIC_RSP_ICMND_CMPL:
1223 snic_icmnd_cmpl_handler(snic, fwreq);
1224 break;
1225
1226 case SNIC_RSP_ITMF_CMPL:
1227 snic_itmf_cmpl_handler(snic, fwreq);
1228 break;
1229
1230 case SNIC_RSP_HBA_RESET_CMPL:
1231 snic_hba_reset_cmpl_handler(snic, fwreq);
1232 break;
1233
1234 case SNIC_MSG_ACK:
1235 snic_msg_ack_handler(snic, fwreq);
1236 break;
1237
1238 case SNIC_MSG_ASYNC_EVNOTIFY:
1239 snic_aen_handler(snic, fwreq);
1240 break;
1241
1242 default:
1243 SNIC_BUG_ON(1);
1244 SNIC_SCSI_DBG(snic->shost,
1245 "Unknown Firmware completion request type %d\n",
1246 fwreq->hdr.type);
1247 break;
1248 }
1249
1250
1251 cmpl_time = jiffies - start;
1252 if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time))
1253 atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time);
1254
1255 return 0;
1256 }
1257
1258
1259
1260
1261
1262
1263 int
1264 snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work)
1265 {
1266 unsigned int num_ent = 0;
1267 unsigned int cq_idx;
1268 unsigned int nent_per_cq;
1269 struct snic_misc_stats *misc_stats = &snic->s_stats.misc;
1270
1271 for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) {
1272 nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx],
1273 snic_io_cmpl_handler,
1274 io_cmpl_work);
1275 num_ent += nent_per_cq;
1276
1277 if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents))
1278 atomic64_set(&misc_stats->max_cq_ents, nent_per_cq);
1279 }
1280
1281 return num_ent;
1282 }
1283
1284
1285
1286
1287
1288
1289 static int
1290 snic_queue_itmf_req(struct snic *snic,
1291 struct snic_host_req *tmreq,
1292 struct scsi_cmnd *sc,
1293 u32 tmf,
1294 u32 req_id)
1295 {
1296 struct snic_req_info *rqi = req_to_rqi(tmreq);
1297 struct scsi_lun lun;
1298 int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag;
1299 int ret = 0;
1300
1301 SNIC_BUG_ON(!rqi);
1302 SNIC_BUG_ON(!rqi->tm_tag);
1303
1304
1305 int_to_scsilun(sc->device->lun, &lun);
1306
1307
1308 snic_itmf_init(tmreq,
1309 tm_tag,
1310 snic->config.hid,
1311 (ulong) rqi,
1312 0 ,
1313 req_id,
1314 rqi->tgt_id,
1315 lun.scsi_lun,
1316 tmf);
1317
1318
1319
1320
1321
1322
1323
1324
1325 ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq));
1326 if (ret)
1327 SNIC_HOST_ERR(snic->shost,
1328 "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n",
1329 tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret);
1330 else
1331 SNIC_SCSI_DBG(snic->shost,
1332 "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.",
1333 tmf, sc, rqi, req_id, snic_cmd_tag(sc));
1334
1335 return ret;
1336 }
1337
1338 static int
1339 snic_issue_tm_req(struct snic *snic,
1340 struct snic_req_info *rqi,
1341 struct scsi_cmnd *sc,
1342 int tmf)
1343 {
1344 struct snic_host_req *tmreq = NULL;
1345 int req_id = 0, tag = snic_cmd_tag(sc);
1346 int ret = 0;
1347
1348 if (snic_get_state(snic) == SNIC_FWRESET)
1349 return -EBUSY;
1350
1351 atomic_inc(&snic->ios_inflight);
1352
1353 SNIC_SCSI_DBG(snic->shost,
1354 "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n",
1355 tmf, rqi, tag);
1356
1357
1358 if (tmf == SNIC_ITMF_LUN_RESET) {
1359 tmreq = snic_dr_req_init(snic, rqi);
1360 req_id = SCSI_NO_TAG;
1361 } else {
1362 tmreq = snic_abort_req_init(snic, rqi);
1363 req_id = tag;
1364 }
1365
1366 if (!tmreq) {
1367 ret = -ENOMEM;
1368
1369 goto tmreq_err;
1370 }
1371
1372 ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
1373
1374 tmreq_err:
1375 if (ret) {
1376 SNIC_HOST_ERR(snic->shost,
1377 "issu_tmreq: Queueing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
1378 tmf, sc, rqi, req_id, tag, ret);
1379 } else {
1380 SNIC_SCSI_DBG(snic->shost,
1381 "issu_tmreq: Queueing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
1382 tmf, sc, rqi, req_id, tag);
1383 }
1384
1385 atomic_dec(&snic->ios_inflight);
1386
1387 return ret;
1388 }
1389
1390
1391
1392
1393 static int
1394 snic_queue_abort_req(struct snic *snic,
1395 struct snic_req_info *rqi,
1396 struct scsi_cmnd *sc,
1397 int tmf)
1398 {
1399 SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n",
1400 sc, rqi, snic_cmd_tag(sc), tmf);
1401
1402
1403 rqi->tm_tag |= SNIC_TAG_ABORT;
1404
1405 return snic_issue_tm_req(snic, rqi, sc, tmf);
1406 }
1407
1408
1409
1410
1411 static int
1412 snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
1413 {
1414 struct snic_req_info *rqi = NULL;
1415 spinlock_t *io_lock = NULL;
1416 unsigned long flags;
1417 int ret = 0, tag = snic_cmd_tag(sc);
1418
1419 io_lock = snic_io_lock_hash(snic, sc);
1420 spin_lock_irqsave(io_lock, flags);
1421 rqi = (struct snic_req_info *) CMD_SP(sc);
1422 if (!rqi) {
1423 atomic64_inc(&snic->s_stats.io.req_null);
1424 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1425
1426 SNIC_SCSI_DBG(snic->shost,
1427 "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n",
1428 tag, sc, CMD_FLAGS(sc));
1429 ret = FAILED;
1430
1431 goto abort_fail;
1432 }
1433
1434 rqi->abts_done = NULL;
1435
1436 ret = FAILED;
1437
1438
1439 switch (CMD_ABTS_STATUS(sc)) {
1440 case SNIC_INVALID_CODE:
1441
1442 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT;
1443 atomic64_inc(&snic->s_stats.abts.drv_tmo);
1444 SNIC_SCSI_DBG(snic->shost,
1445 "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n",
1446 sc, snic_cmd_tag(sc), CMD_FLAGS(sc));
1447
1448 rqi = NULL;
1449
1450 goto abort_fail;
1451
1452 case SNIC_STAT_IO_SUCCESS:
1453 case SNIC_STAT_IO_NOT_FOUND:
1454 ret = SUCCESS;
1455
1456
1457
1458
1459
1460 sc->result = (DID_ERROR << 16);
1461 scsi_done(sc);
1462 break;
1463
1464 default:
1465
1466 ret = FAILED;
1467 rqi = NULL;
1468 break;
1469 }
1470
1471 CMD_SP(sc) = NULL;
1472 SNIC_HOST_INFO(snic->shost,
1473 "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n",
1474 tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)),
1475 CMD_FLAGS(sc));
1476
1477 abort_fail:
1478 spin_unlock_irqrestore(io_lock, flags);
1479 if (rqi)
1480 snic_release_req_buf(snic, rqi, sc);
1481
1482 return ret;
1483 }
1484
1485
1486
1487
1488 static int
1489 snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc)
1490 {
1491 struct snic_req_info *rqi = NULL;
1492 enum snic_ioreq_state sv_state;
1493 struct snic_tgt *tgt = NULL;
1494 spinlock_t *io_lock = NULL;
1495 DECLARE_COMPLETION_ONSTACK(tm_done);
1496 unsigned long flags;
1497 int ret = 0, tmf = 0, tag = snic_cmd_tag(sc);
1498
1499 tgt = starget_to_tgt(scsi_target(sc->device));
1500 if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
1501 tmf = SNIC_ITMF_ABTS_TASK_TERM;
1502 else
1503 tmf = SNIC_ITMF_ABTS_TASK;
1504
1505
1506
1507 io_lock = snic_io_lock_hash(snic, sc);
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 spin_lock_irqsave(io_lock, flags);
1522 rqi = (struct snic_req_info *) CMD_SP(sc);
1523 if (!rqi) {
1524 spin_unlock_irqrestore(io_lock, flags);
1525
1526 SNIC_HOST_ERR(snic->shost,
1527 "abt_cmd: rqi is null. Tag %d flags 0x%llx\n",
1528 tag, CMD_FLAGS(sc));
1529
1530 ret = SUCCESS;
1531
1532 goto send_abts_end;
1533 }
1534
1535 rqi->abts_done = &tm_done;
1536 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
1537 spin_unlock_irqrestore(io_lock, flags);
1538
1539 ret = 0;
1540 goto abts_pending;
1541 }
1542 SNIC_BUG_ON(!rqi->abts_done);
1543
1544
1545 sv_state = CMD_STATE(sc);
1546
1547
1548
1549
1550
1551
1552
1553 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
1554 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
1555
1556 SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag);
1557
1558 spin_unlock_irqrestore(io_lock, flags);
1559
1560
1561 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
1562 if (ret) {
1563 atomic64_inc(&snic->s_stats.abts.q_fail);
1564 SNIC_HOST_ERR(snic->shost,
1565 "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
1566 tag, ret, CMD_FLAGS(sc));
1567
1568 spin_lock_irqsave(io_lock, flags);
1569
1570 CMD_STATE(sc) = sv_state;
1571 rqi = (struct snic_req_info *) CMD_SP(sc);
1572 if (rqi)
1573 rqi->abts_done = NULL;
1574 spin_unlock_irqrestore(io_lock, flags);
1575 ret = FAILED;
1576
1577 goto send_abts_end;
1578 }
1579
1580 spin_lock_irqsave(io_lock, flags);
1581 if (tmf == SNIC_ITMF_ABTS_TASK) {
1582 CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED;
1583 atomic64_inc(&snic->s_stats.abts.num);
1584 } else {
1585
1586 CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED;
1587 }
1588 spin_unlock_irqrestore(io_lock, flags);
1589
1590 SNIC_SCSI_DBG(snic->shost,
1591 "send_abt_cmd: sc %p Tag %x flags 0x%llx\n",
1592 sc, tag, CMD_FLAGS(sc));
1593
1594
1595 ret = 0;
1596
1597 abts_pending:
1598
1599
1600
1601
1602
1603 wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
1604
1605 send_abts_end:
1606 return ret;
1607 }
1608
1609
1610
1611
1612
1613
1614 int
1615 snic_abort_cmd(struct scsi_cmnd *sc)
1616 {
1617 struct snic *snic = shost_priv(sc->device->host);
1618 int ret = SUCCESS, tag = snic_cmd_tag(sc);
1619 u32 start_time = jiffies;
1620
1621 SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
1622 sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag);
1623
1624 if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
1625 SNIC_HOST_ERR(snic->shost,
1626 "abt_cmd: tag %x Parent Devs are not rdy\n",
1627 tag);
1628 ret = FAST_IO_FAIL;
1629
1630 goto abort_end;
1631 }
1632
1633
1634 ret = snic_send_abort_and_wait(snic, sc);
1635 if (ret)
1636 goto abort_end;
1637
1638 ret = snic_abort_finish(snic, sc);
1639
1640 abort_end:
1641 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
1642 jiffies_to_msecs(jiffies - start_time), 0,
1643 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
1644
1645 SNIC_SCSI_DBG(snic->shost,
1646 "abts: Abort Req Status = %s\n",
1647 (ret == SUCCESS) ? "SUCCESS" :
1648 ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED"));
1649
1650 return ret;
1651 }
1652
1653
1654
1655 static int
1656 snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc)
1657 {
1658 struct snic_req_info *rqi = NULL;
1659 struct scsi_cmnd *sc = NULL;
1660 struct scsi_device *lr_sdev = NULL;
1661 spinlock_t *io_lock = NULL;
1662 u32 tag;
1663 unsigned long flags;
1664
1665 if (lr_sc)
1666 lr_sdev = lr_sc->device;
1667
1668
1669 for (tag = 0; tag < snic->max_tag_id; tag++) {
1670 io_lock = snic_io_lock_tag(snic, tag);
1671
1672 spin_lock_irqsave(io_lock, flags);
1673 sc = scsi_host_find_tag(snic->shost, tag);
1674
1675 if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) {
1676 spin_unlock_irqrestore(io_lock, flags);
1677
1678 continue;
1679 }
1680
1681 rqi = (struct snic_req_info *) CMD_SP(sc);
1682 if (!rqi) {
1683 spin_unlock_irqrestore(io_lock, flags);
1684
1685 continue;
1686 }
1687
1688
1689
1690
1691
1692 SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n",
1693 snic_ioreq_state_to_str(CMD_STATE(sc)));
1694
1695 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
1696 spin_unlock_irqrestore(io_lock, flags);
1697
1698 return 1;
1699 }
1700
1701 spin_unlock_irqrestore(io_lock, flags);
1702 }
1703
1704 return 0;
1705 }
1706
1707 static int
1708 snic_dr_clean_single_req(struct snic *snic,
1709 u32 tag,
1710 struct scsi_device *lr_sdev)
1711 {
1712 struct snic_req_info *rqi = NULL;
1713 struct snic_tgt *tgt = NULL;
1714 struct scsi_cmnd *sc = NULL;
1715 spinlock_t *io_lock = NULL;
1716 u32 sv_state = 0, tmf = 0;
1717 DECLARE_COMPLETION_ONSTACK(tm_done);
1718 unsigned long flags;
1719 int ret = 0;
1720
1721 io_lock = snic_io_lock_tag(snic, tag);
1722 spin_lock_irqsave(io_lock, flags);
1723 sc = scsi_host_find_tag(snic->shost, tag);
1724
1725
1726 if (!sc || sc->device != lr_sdev)
1727 goto skip_clean;
1728
1729 rqi = (struct snic_req_info *) CMD_SP(sc);
1730
1731 if (!rqi)
1732 goto skip_clean;
1733
1734
1735 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
1736 goto skip_clean;
1737
1738
1739 if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
1740 (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
1741
1742 SNIC_SCSI_DBG(snic->shost,
1743 "clean_single_req: devrst is not pending sc 0x%p\n",
1744 sc);
1745
1746 goto skip_clean;
1747 }
1748
1749 SNIC_SCSI_DBG(snic->shost,
1750 "clean_single_req: Found IO in %s on lun\n",
1751 snic_ioreq_state_to_str(CMD_STATE(sc)));
1752
1753
1754 sv_state = CMD_STATE(sc);
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
1765 SNIC_BUG_ON(rqi->abts_done);
1766
1767 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
1768 rqi->tm_tag = SNIC_TAG_DEV_RST;
1769
1770 SNIC_SCSI_DBG(snic->shost,
1771 "clean_single_req:devrst sc 0x%p\n", sc);
1772 }
1773
1774 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
1775 rqi->abts_done = &tm_done;
1776 spin_unlock_irqrestore(io_lock, flags);
1777
1778 tgt = starget_to_tgt(scsi_target(sc->device));
1779 if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
1780 tmf = SNIC_ITMF_ABTS_TASK_TERM;
1781 else
1782 tmf = SNIC_ITMF_ABTS_TASK;
1783
1784
1785 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
1786 if (ret) {
1787 SNIC_HOST_ERR(snic->shost,
1788 "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n",
1789 sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
1790
1791 spin_lock_irqsave(io_lock, flags);
1792 rqi = (struct snic_req_info *) CMD_SP(sc);
1793 if (rqi)
1794 rqi->abts_done = NULL;
1795
1796
1797 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
1798 CMD_STATE(sc) = sv_state;
1799
1800 ret = 1;
1801 goto skip_clean;
1802 }
1803
1804 spin_lock_irqsave(io_lock, flags);
1805 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
1806 CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
1807
1808 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
1809 spin_unlock_irqrestore(io_lock, flags);
1810
1811 wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
1812
1813
1814 spin_lock_irqsave(io_lock, flags);
1815 rqi = (struct snic_req_info *) CMD_SP(sc);
1816 if (!rqi) {
1817 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1818 goto skip_clean;
1819 }
1820 rqi->abts_done = NULL;
1821
1822
1823 if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) {
1824 SNIC_HOST_ERR(snic->shost,
1825 "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n",
1826 sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
1827
1828 CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
1829 ret = 1;
1830
1831 goto skip_clean;
1832 }
1833
1834 CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
1835 CMD_SP(sc) = NULL;
1836 spin_unlock_irqrestore(io_lock, flags);
1837
1838 snic_release_req_buf(snic, rqi, sc);
1839
1840 sc->result = (DID_ERROR << 16);
1841 scsi_done(sc);
1842
1843 ret = 0;
1844
1845 return ret;
1846
1847 skip_clean:
1848 spin_unlock_irqrestore(io_lock, flags);
1849
1850 return ret;
1851 }
1852
1853 static int
1854 snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
1855 {
1856 struct scsi_device *lr_sdev = lr_sc->device;
1857 u32 tag = 0;
1858 int ret = FAILED;
1859
1860 for (tag = 0; tag < snic->max_tag_id; tag++) {
1861 if (tag == snic_cmd_tag(lr_sc))
1862 continue;
1863
1864 ret = snic_dr_clean_single_req(snic, tag, lr_sdev);
1865 if (ret) {
1866 SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag);
1867
1868 goto clean_err;
1869 }
1870 }
1871
1872 schedule_timeout(msecs_to_jiffies(100));
1873
1874
1875 if (snic_is_abts_pending(snic, lr_sc)) {
1876 ret = FAILED;
1877
1878 goto clean_err;
1879 }
1880
1881 ret = 0;
1882 SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n");
1883
1884 return ret;
1885
1886 clean_err:
1887 ret = FAILED;
1888 SNIC_HOST_ERR(snic->shost,
1889 "Failed to Clean Pending IOs on %s device.\n",
1890 dev_name(&lr_sdev->sdev_gendev));
1891
1892 return ret;
1893
1894 }
1895
1896
1897
1898
1899 static int
1900 snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
1901 {
1902 struct snic_req_info *rqi = NULL;
1903 spinlock_t *io_lock = NULL;
1904 unsigned long flags;
1905 int lr_res = 0;
1906 int ret = FAILED;
1907
1908 io_lock = snic_io_lock_hash(snic, sc);
1909 spin_lock_irqsave(io_lock, flags);
1910 rqi = (struct snic_req_info *) CMD_SP(sc);
1911 if (!rqi) {
1912 spin_unlock_irqrestore(io_lock, flags);
1913 SNIC_SCSI_DBG(snic->shost,
1914 "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n",
1915 snic_cmd_tag(sc), sc, CMD_FLAGS(sc));
1916
1917 ret = FAILED;
1918 goto dr_fini_end;
1919 }
1920
1921 rqi->dr_done = NULL;
1922
1923 lr_res = CMD_LR_STATUS(sc);
1924
1925 switch (lr_res) {
1926 case SNIC_INVALID_CODE:
1927
1928 SNIC_SCSI_DBG(snic->shost,
1929 "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n",
1930 snic_cmd_tag(sc), CMD_FLAGS(sc));
1931
1932 CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT;
1933 ret = FAILED;
1934
1935 goto dr_failed;
1936
1937 case SNIC_STAT_IO_SUCCESS:
1938 SNIC_SCSI_DBG(snic->shost,
1939 "dr_fini: Tag %x Dev Reset cmpl\n",
1940 snic_cmd_tag(sc));
1941 ret = 0;
1942 break;
1943
1944 default:
1945 SNIC_HOST_ERR(snic->shost,
1946 "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n",
1947 snic_cmd_tag(sc),
1948 snic_io_status_to_str(lr_res), CMD_FLAGS(sc));
1949 ret = FAILED;
1950 goto dr_failed;
1951 }
1952 spin_unlock_irqrestore(io_lock, flags);
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962 ret = snic_dr_clean_pending_req(snic, sc);
1963 if (ret) {
1964 spin_lock_irqsave(io_lock, flags);
1965 SNIC_SCSI_DBG(snic->shost,
1966 "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n",
1967 snic_cmd_tag(sc));
1968 rqi = (struct snic_req_info *) CMD_SP(sc);
1969
1970 goto dr_failed;
1971 } else {
1972
1973 spin_lock_irqsave(io_lock, flags);
1974 rqi = (struct snic_req_info *) CMD_SP(sc);
1975 if (rqi)
1976 ret = SUCCESS;
1977 else
1978 ret = FAILED;
1979 }
1980
1981 dr_failed:
1982 lockdep_assert_held(io_lock);
1983 if (rqi)
1984 CMD_SP(sc) = NULL;
1985 spin_unlock_irqrestore(io_lock, flags);
1986
1987 if (rqi)
1988 snic_release_req_buf(snic, rqi, sc);
1989
1990 dr_fini_end:
1991 return ret;
1992 }
1993
1994 static int
1995 snic_queue_dr_req(struct snic *snic,
1996 struct snic_req_info *rqi,
1997 struct scsi_cmnd *sc)
1998 {
1999
2000 rqi->tm_tag |= SNIC_TAG_DEV_RST;
2001
2002 return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET);
2003 }
2004
2005 static int
2006 snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc)
2007 {
2008 struct snic_req_info *rqi = NULL;
2009 enum snic_ioreq_state sv_state;
2010 spinlock_t *io_lock = NULL;
2011 unsigned long flags;
2012 DECLARE_COMPLETION_ONSTACK(tm_done);
2013 int ret = FAILED, tag = snic_cmd_tag(sc);
2014
2015 io_lock = snic_io_lock_hash(snic, sc);
2016 spin_lock_irqsave(io_lock, flags);
2017 CMD_FLAGS(sc) |= SNIC_DEVICE_RESET;
2018 rqi = (struct snic_req_info *) CMD_SP(sc);
2019 if (!rqi) {
2020 SNIC_HOST_ERR(snic->shost,
2021 "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n",
2022 tag, CMD_FLAGS(sc));
2023 spin_unlock_irqrestore(io_lock, flags);
2024
2025 ret = FAILED;
2026 goto send_dr_end;
2027 }
2028
2029
2030 sv_state = CMD_STATE(sc);
2031
2032 CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING;
2033 CMD_LR_STATUS(sc) = SNIC_INVALID_CODE;
2034
2035 SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag);
2036
2037 rqi->dr_done = &tm_done;
2038 SNIC_BUG_ON(!rqi->dr_done);
2039
2040 spin_unlock_irqrestore(io_lock, flags);
2041
2042
2043
2044
2045
2046
2047
2048 ret = snic_queue_dr_req(snic, rqi, sc);
2049 if (ret) {
2050 SNIC_HOST_ERR(snic->shost,
2051 "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n",
2052 tag, ret, CMD_FLAGS(sc));
2053
2054 spin_lock_irqsave(io_lock, flags);
2055
2056 CMD_STATE(sc) = sv_state;
2057 rqi = (struct snic_req_info *) CMD_SP(sc);
2058 if (rqi)
2059 rqi->dr_done = NULL;
2060
2061 spin_unlock_irqrestore(io_lock, flags);
2062 ret = FAILED;
2063
2064 goto send_dr_end;
2065 }
2066
2067 spin_lock_irqsave(io_lock, flags);
2068 CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED;
2069 spin_unlock_irqrestore(io_lock, flags);
2070
2071 ret = 0;
2072
2073 wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT);
2074
2075 send_dr_end:
2076 return ret;
2077 }
2078
2079
2080
2081
2082
2083 static int
2084 snic_dev_reset_supported(struct scsi_device *sdev)
2085 {
2086 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
2087
2088 if (tgt->tdata.typ == SNIC_TGT_DAS)
2089 return 0;
2090
2091 return 1;
2092 }
2093
2094 static void
2095 snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag)
2096 {
2097 struct snic_req_info *rqi = NULL;
2098 spinlock_t *io_lock = NULL;
2099 unsigned long flags;
2100 u32 start_time = jiffies;
2101
2102 io_lock = snic_io_lock_hash(snic, sc);
2103 spin_lock_irqsave(io_lock, flags);
2104 rqi = (struct snic_req_info *) CMD_SP(sc);
2105 if (rqi) {
2106 start_time = rqi->start_time;
2107 CMD_SP(sc) = NULL;
2108 }
2109
2110 CMD_FLAGS(sc) |= flag;
2111 spin_unlock_irqrestore(io_lock, flags);
2112
2113 if (rqi)
2114 snic_release_req_buf(snic, rqi, sc);
2115
2116 SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc,
2117 jiffies_to_msecs(jiffies - start_time), (ulong) rqi,
2118 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2119 }
2120
2121
2122
2123
2124
2125
2126 int
2127 snic_device_reset(struct scsi_cmnd *sc)
2128 {
2129 struct Scsi_Host *shost = sc->device->host;
2130 struct snic *snic = shost_priv(shost);
2131 struct snic_req_info *rqi = NULL;
2132 int tag = snic_cmd_tag(sc);
2133 int start_time = jiffies;
2134 int ret = FAILED;
2135 int dr_supp = 0;
2136
2137 SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
2138 sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
2139 snic_cmd_tag(sc));
2140 dr_supp = snic_dev_reset_supported(sc->device);
2141 if (!dr_supp) {
2142
2143 SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n");
2144 snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP);
2145
2146 goto dev_rst_end;
2147 }
2148
2149 if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
2150 snic_unlink_and_release_req(snic, sc, 0);
2151 SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n");
2152
2153 goto dev_rst_end;
2154 }
2155
2156
2157 if (unlikely(tag <= SNIC_NO_TAG)) {
2158 SNIC_HOST_INFO(snic->shost,
2159 "Devrst: LUN Reset Recvd thru IOCTL.\n");
2160
2161 rqi = snic_req_init(snic, 0);
2162 if (!rqi)
2163 goto dev_rst_end;
2164
2165 memset(scsi_cmd_priv(sc), 0,
2166 sizeof(struct snic_internal_io_state));
2167 CMD_SP(sc) = (char *)rqi;
2168 CMD_FLAGS(sc) = SNIC_NO_FLAGS;
2169
2170
2171 rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST;
2172 rqi->sc = sc;
2173 }
2174
2175 ret = snic_send_dr_and_wait(snic, sc);
2176 if (ret) {
2177 SNIC_HOST_ERR(snic->shost,
2178 "Devrst: IO w/ Tag %x Failed w/ err = %d\n",
2179 tag, ret);
2180
2181 snic_unlink_and_release_req(snic, sc, 0);
2182
2183 goto dev_rst_end;
2184 }
2185
2186 ret = snic_dr_finish(snic, sc);
2187
2188 dev_rst_end:
2189 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
2190 jiffies_to_msecs(jiffies - start_time),
2191 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2192
2193 SNIC_SCSI_DBG(snic->shost,
2194 "Devrst: Returning from Device Reset : %s\n",
2195 (ret == SUCCESS) ? "SUCCESS" : "FAILED");
2196
2197 return ret;
2198 }
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210 static int
2211 snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc)
2212 {
2213 struct snic_req_info *rqi = NULL;
2214 struct snic_host_req *req = NULL;
2215 spinlock_t *io_lock = NULL;
2216 DECLARE_COMPLETION_ONSTACK(wait);
2217 unsigned long flags;
2218 int ret = -ENOMEM;
2219
2220 rqi = snic_req_init(snic, 0);
2221 if (!rqi) {
2222 ret = -ENOMEM;
2223
2224 goto hba_rst_end;
2225 }
2226
2227 if (snic_cmd_tag(sc) == SCSI_NO_TAG) {
2228 memset(scsi_cmd_priv(sc), 0,
2229 sizeof(struct snic_internal_io_state));
2230 SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n");
2231 rqi->sc = sc;
2232 }
2233
2234 req = rqi_to_req(rqi);
2235
2236 io_lock = snic_io_lock_hash(snic, sc);
2237 spin_lock_irqsave(io_lock, flags);
2238 SNIC_BUG_ON(CMD_SP(sc) != NULL);
2239 CMD_STATE(sc) = SNIC_IOREQ_PENDING;
2240 CMD_SP(sc) = (char *) rqi;
2241 CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED;
2242 snic->remove_wait = &wait;
2243 spin_unlock_irqrestore(io_lock, flags);
2244
2245
2246 snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc),
2247 snic->config.hid, 0, (ulong) rqi);
2248
2249 req->u.reset.flags = 0;
2250
2251 ret = snic_queue_wq_desc(snic, req, sizeof(*req));
2252 if (ret) {
2253 SNIC_HOST_ERR(snic->shost,
2254 "issu_hr:Queuing HBA Reset Failed. w err %d\n",
2255 ret);
2256
2257 goto hba_rst_err;
2258 }
2259
2260 spin_lock_irqsave(io_lock, flags);
2261 CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED;
2262 spin_unlock_irqrestore(io_lock, flags);
2263 atomic64_inc(&snic->s_stats.reset.hba_resets);
2264 SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n");
2265
2266 wait_for_completion_timeout(snic->remove_wait,
2267 SNIC_HOST_RESET_TIMEOUT);
2268
2269 if (snic_get_state(snic) == SNIC_FWRESET) {
2270 SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n");
2271 ret = -ETIMEDOUT;
2272
2273 goto hba_rst_err;
2274 }
2275
2276 spin_lock_irqsave(io_lock, flags);
2277 snic->remove_wait = NULL;
2278 rqi = (struct snic_req_info *) CMD_SP(sc);
2279 CMD_SP(sc) = NULL;
2280 spin_unlock_irqrestore(io_lock, flags);
2281
2282 if (rqi)
2283 snic_req_free(snic, rqi);
2284
2285 ret = 0;
2286
2287 return ret;
2288
2289 hba_rst_err:
2290 spin_lock_irqsave(io_lock, flags);
2291 snic->remove_wait = NULL;
2292 rqi = (struct snic_req_info *) CMD_SP(sc);
2293 CMD_SP(sc) = NULL;
2294 spin_unlock_irqrestore(io_lock, flags);
2295
2296 if (rqi)
2297 snic_req_free(snic, rqi);
2298
2299 hba_rst_end:
2300 SNIC_HOST_ERR(snic->shost,
2301 "reset:HBA Reset Failed w/ err = %d.\n",
2302 ret);
2303
2304 return ret;
2305 }
2306
2307 int
2308 snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
2309 {
2310 struct snic *snic = shost_priv(shost);
2311 enum snic_state sv_state;
2312 unsigned long flags;
2313 int ret = FAILED;
2314
2315
2316 sv_state = snic_get_state(snic);
2317
2318 spin_lock_irqsave(&snic->snic_lock, flags);
2319 if (snic_get_state(snic) == SNIC_FWRESET) {
2320 spin_unlock_irqrestore(&snic->snic_lock, flags);
2321 SNIC_HOST_INFO(shost, "reset:prev reset is in progress\n");
2322
2323 msleep(SNIC_HOST_RESET_TIMEOUT);
2324 ret = SUCCESS;
2325
2326 goto reset_end;
2327 }
2328
2329 snic_set_state(snic, SNIC_FWRESET);
2330 spin_unlock_irqrestore(&snic->snic_lock, flags);
2331
2332
2333
2334 while (atomic_read(&snic->ios_inflight))
2335 schedule_timeout(msecs_to_jiffies(1));
2336
2337 ret = snic_issue_hba_reset(snic, sc);
2338 if (ret) {
2339 SNIC_HOST_ERR(shost,
2340 "reset:Host Reset Failed w/ err %d.\n",
2341 ret);
2342 spin_lock_irqsave(&snic->snic_lock, flags);
2343 snic_set_state(snic, sv_state);
2344 spin_unlock_irqrestore(&snic->snic_lock, flags);
2345 atomic64_inc(&snic->s_stats.reset.hba_reset_fail);
2346 ret = FAILED;
2347
2348 goto reset_end;
2349 }
2350
2351 ret = SUCCESS;
2352
2353 reset_end:
2354 return ret;
2355 }
2356
2357
2358
2359
2360
2361
2362
2363
2364 int
2365 snic_host_reset(struct scsi_cmnd *sc)
2366 {
2367 struct Scsi_Host *shost = sc->device->host;
2368 u32 start_time = jiffies;
2369 int ret;
2370
2371 SNIC_SCSI_DBG(shost,
2372 "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
2373 sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
2374 snic_cmd_tag(sc), CMD_FLAGS(sc));
2375
2376 ret = snic_reset(shost, sc);
2377
2378 SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc,
2379 jiffies_to_msecs(jiffies - start_time),
2380 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2381
2382 return ret;
2383 }
2384
2385
2386
2387
2388 static void
2389 snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc)
2390 {
2391 struct snic_req_info *rqi = NULL;
2392
2393 SNIC_SCSI_DBG(snic->shost,
2394 "Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
2395 sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
2396
2397
2398
2399
2400
2401
2402 CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS;
2403
2404 rqi = (struct snic_req_info *) CMD_SP(sc);
2405 if (!rqi)
2406 return;
2407
2408 if (rqi->dr_done)
2409 complete(rqi->dr_done);
2410 else if (rqi->abts_done)
2411 complete(rqi->abts_done);
2412 }
2413
2414
2415
2416
2417 static void
2418 snic_scsi_cleanup(struct snic *snic, int ex_tag)
2419 {
2420 struct snic_req_info *rqi = NULL;
2421 struct scsi_cmnd *sc = NULL;
2422 spinlock_t *io_lock = NULL;
2423 unsigned long flags;
2424 int tag;
2425 u64 st_time = 0;
2426
2427 SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n");
2428
2429 for (tag = 0; tag < snic->max_tag_id; tag++) {
2430
2431 if (tag == ex_tag)
2432 continue;
2433
2434 io_lock = snic_io_lock_tag(snic, tag);
2435 spin_lock_irqsave(io_lock, flags);
2436 sc = scsi_host_find_tag(snic->shost, tag);
2437 if (!sc) {
2438 spin_unlock_irqrestore(io_lock, flags);
2439
2440 continue;
2441 }
2442
2443 if (unlikely(snic_tmreq_pending(sc))) {
2444
2445
2446
2447
2448 snic_cmpl_pending_tmreq(snic, sc);
2449 spin_unlock_irqrestore(io_lock, flags);
2450
2451 continue;
2452 }
2453
2454 rqi = (struct snic_req_info *) CMD_SP(sc);
2455 if (!rqi) {
2456 spin_unlock_irqrestore(io_lock, flags);
2457
2458 goto cleanup;
2459 }
2460
2461 SNIC_SCSI_DBG(snic->shost,
2462 "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n",
2463 sc, rqi, tag, CMD_FLAGS(sc));
2464
2465 CMD_SP(sc) = NULL;
2466 CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP;
2467 spin_unlock_irqrestore(io_lock, flags);
2468 st_time = rqi->start_time;
2469
2470 SNIC_HOST_INFO(snic->shost,
2471 "sc_clean: Releasing rqi %p : flags 0x%llx\n",
2472 rqi, CMD_FLAGS(sc));
2473
2474 snic_release_req_buf(snic, rqi, sc);
2475
2476 cleanup:
2477 sc->result = DID_TRANSPORT_DISRUPTED << 16;
2478 SNIC_HOST_INFO(snic->shost,
2479 "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
2480 sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi,
2481 jiffies_to_msecs(jiffies - st_time));
2482
2483
2484 snic_stats_update_io_cmpl(&snic->s_stats);
2485
2486 SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
2487 jiffies_to_msecs(jiffies - st_time), 0,
2488 SNIC_TRC_CMD(sc),
2489 SNIC_TRC_CMD_STATE_FLAGS(sc));
2490
2491 scsi_done(sc);
2492 }
2493 }
2494
2495 void
2496 snic_shutdown_scsi_cleanup(struct snic *snic)
2497 {
2498 SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n");
2499
2500 snic_scsi_cleanup(snic, SCSI_NO_TAG);
2501 }
2502
2503
2504
2505
2506
2507 static int
2508 snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
2509 {
2510 struct snic_req_info *rqi = NULL;
2511 spinlock_t *io_lock = NULL;
2512 unsigned long flags;
2513 u32 sv_state = 0;
2514 int ret = 0;
2515
2516 io_lock = snic_io_lock_hash(snic, sc);
2517 spin_lock_irqsave(io_lock, flags);
2518 rqi = (struct snic_req_info *) CMD_SP(sc);
2519 if (!rqi)
2520 goto skip_internal_abts;
2521
2522 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
2523 goto skip_internal_abts;
2524
2525 if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
2526 (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
2527
2528 SNIC_SCSI_DBG(snic->shost,
2529 "internal_abts: dev rst not pending sc 0x%p\n",
2530 sc);
2531
2532 goto skip_internal_abts;
2533 }
2534
2535
2536 if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) {
2537 SNIC_SCSI_DBG(snic->shost,
2538 "internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n",
2539 sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc));
2540
2541 goto skip_internal_abts;
2542 }
2543
2544 sv_state = CMD_STATE(sc);
2545 CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
2546 CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
2547 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING;
2548
2549 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
2550
2551 rqi->tm_tag = SNIC_TAG_DEV_RST;
2552 SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc);
2553 }
2554
2555 SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n",
2556 snic_cmd_tag(sc));
2557 SNIC_BUG_ON(rqi->abts_done);
2558 spin_unlock_irqrestore(io_lock, flags);
2559
2560 ret = snic_queue_abort_req(snic, rqi, sc, tmf);
2561 if (ret) {
2562 SNIC_HOST_ERR(snic->shost,
2563 "internal_abts: Tag = %x , Failed w/ err = %d\n",
2564 snic_cmd_tag(sc), ret);
2565
2566 spin_lock_irqsave(io_lock, flags);
2567
2568 if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
2569 CMD_STATE(sc) = sv_state;
2570
2571 goto skip_internal_abts;
2572 }
2573
2574 spin_lock_irqsave(io_lock, flags);
2575 if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
2576 CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
2577 else
2578 CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
2579
2580 ret = SUCCESS;
2581
2582 skip_internal_abts:
2583 lockdep_assert_held(io_lock);
2584 spin_unlock_irqrestore(io_lock, flags);
2585
2586 return ret;
2587 }
2588
2589
2590
2591
2592 int
2593 snic_tgt_scsi_abort_io(struct snic_tgt *tgt)
2594 {
2595 struct snic *snic = NULL;
2596 struct scsi_cmnd *sc = NULL;
2597 struct snic_tgt *sc_tgt = NULL;
2598 spinlock_t *io_lock = NULL;
2599 unsigned long flags;
2600 int ret = 0, tag, abt_cnt = 0, tmf = 0;
2601
2602 if (!tgt)
2603 return -1;
2604
2605 snic = shost_priv(snic_tgt_to_shost(tgt));
2606 SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n");
2607
2608 if (tgt->tdata.typ == SNIC_TGT_DAS)
2609 tmf = SNIC_ITMF_ABTS_TASK;
2610 else
2611 tmf = SNIC_ITMF_ABTS_TASK_TERM;
2612
2613 for (tag = 0; tag < snic->max_tag_id; tag++) {
2614 io_lock = snic_io_lock_tag(snic, tag);
2615
2616 spin_lock_irqsave(io_lock, flags);
2617 sc = scsi_host_find_tag(snic->shost, tag);
2618 if (!sc) {
2619 spin_unlock_irqrestore(io_lock, flags);
2620
2621 continue;
2622 }
2623
2624 sc_tgt = starget_to_tgt(scsi_target(sc->device));
2625 if (sc_tgt != tgt) {
2626 spin_unlock_irqrestore(io_lock, flags);
2627
2628 continue;
2629 }
2630 spin_unlock_irqrestore(io_lock, flags);
2631
2632 ret = snic_internal_abort_io(snic, sc, tmf);
2633 if (ret < 0) {
2634 SNIC_HOST_ERR(snic->shost,
2635 "tgt_abt_io: Tag %x, Failed w err = %d\n",
2636 tag, ret);
2637
2638 continue;
2639 }
2640
2641 if (ret == SUCCESS)
2642 abt_cnt++;
2643 }
2644
2645 SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt);
2646
2647 return 0;
2648 }