0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/device.h>
0036 #include <linux/delay.h>
0037 #include <linux/ctype.h>
0038 #include <linux/kernel.h>
0039 #include <linux/slab.h>
0040 #include <linux/string.h>
0041 #include <linux/compiler.h>
0042 #include <linux/export.h>
0043 #include <linux/module.h>
0044 #include <asm/unaligned.h>
0045 #include <asm/page.h>
0046 #include <scsi/scsi.h>
0047 #include <scsi/scsi_device.h>
0048 #include <scsi/scsi_transport_fc.h>
0049
0050 #include "csio_hw.h"
0051 #include "csio_lnode.h"
0052 #include "csio_rnode.h"
0053 #include "csio_scsi.h"
0054 #include "csio_init.h"
0055
0056 int csio_scsi_eqsize = 65536;
0057 int csio_scsi_iqlen = 128;
0058 int csio_scsi_ioreqs = 2048;
0059 uint32_t csio_max_scan_tmo;
0060 uint32_t csio_delta_scan_tmo = 5;
0061 int csio_lun_qdepth = 32;
0062
0063 static int csio_ddp_descs = 128;
0064
0065 static int csio_do_abrt_cls(struct csio_hw *,
0066 struct csio_ioreq *, bool);
0067
0068 static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
0069 static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
0070 static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
0071 static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
0072 static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
0073 static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 static bool
0084 csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
0085 {
0086 struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
0087
0088 switch (sld->level) {
0089 case CSIO_LEV_LUN:
0090 if (scmnd == NULL)
0091 return false;
0092
0093 return ((ioreq->lnode == sld->lnode) &&
0094 (ioreq->rnode == sld->rnode) &&
0095 ((uint64_t)scmnd->device->lun == sld->oslun));
0096
0097 case CSIO_LEV_RNODE:
0098 return ((ioreq->lnode == sld->lnode) &&
0099 (ioreq->rnode == sld->rnode));
0100 case CSIO_LEV_LNODE:
0101 return (ioreq->lnode == sld->lnode);
0102 case CSIO_LEV_ALL:
0103 return true;
0104 default:
0105 return false;
0106 }
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 static void
0118 csio_scsi_gather_active_ios(struct csio_scsim *scm,
0119 struct csio_scsi_level_data *sld,
0120 struct list_head *dest)
0121 {
0122 struct list_head *tmp, *next;
0123
0124 if (list_empty(&scm->active_q))
0125 return;
0126
0127
0128 if (sld->level == CSIO_LEV_ALL) {
0129 list_splice_tail_init(&scm->active_q, dest);
0130 return;
0131 }
0132
0133 list_for_each_safe(tmp, next, &scm->active_q) {
0134 if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
0135 list_del_init(tmp);
0136 list_add_tail(tmp, dest);
0137 }
0138 }
0139 }
0140
0141 static inline bool
0142 csio_scsi_itnexus_loss_error(uint16_t error)
0143 {
0144 switch (error) {
0145 case FW_ERR_LINK_DOWN:
0146 case FW_RDEV_NOT_READY:
0147 case FW_ERR_RDEV_LOST:
0148 case FW_ERR_RDEV_LOGO:
0149 case FW_ERR_RDEV_IMPL_LOGO:
0150 return true;
0151 }
0152 return false;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162 static inline void
0163 csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
0164 {
0165 struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
0166 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0167
0168
0169 if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) {
0170 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
0171 fcp_cmnd->fc_tm_flags = 0;
0172 fcp_cmnd->fc_cmdref = 0;
0173
0174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
0175 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
0176 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
0177
0178 if (req->nsge)
0179 if (req->datadir == DMA_TO_DEVICE)
0180 fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
0181 else
0182 fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
0183 else
0184 fcp_cmnd->fc_flags = 0;
0185 } else {
0186 memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
0187 int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
0188 fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags;
0189 }
0190 }
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 static inline void
0201 csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
0202 {
0203 struct csio_hw *hw = req->lnode->hwp;
0204 struct csio_rnode *rn = req->rnode;
0205 struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
0206 struct csio_dma_buf *dma_buf;
0207 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
0208
0209 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_CMD_WR) |
0210 FW_SCSI_CMD_WR_IMMDLEN(imm));
0211 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
0212 FW_WR_LEN16_V(
0213 DIV_ROUND_UP(size, 16)));
0214
0215 wr->cookie = (uintptr_t) req;
0216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
0217 wr->tmo_val = (uint8_t) req->tmo;
0218 wr->r3 = 0;
0219 memset(&wr->r5, 0, 8);
0220
0221
0222 dma_buf = &req->dma_buf;
0223
0224
0225 wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
0226 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
0227
0228 wr->r6 = 0;
0229
0230 wr->u.fcoe.ctl_pri = 0;
0231 wr->u.fcoe.cp_en_class = 0;
0232 wr->u.fcoe.r4_lo[0] = 0;
0233 wr->u.fcoe.r4_lo[1] = 0;
0234
0235
0236 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
0237 sizeof(struct fw_scsi_cmd_wr)));
0238 }
0239
0240 #define CSIO_SCSI_CMD_WR_SZ(_imm) \
0241 (sizeof(struct fw_scsi_cmd_wr) + \
0242 ALIGN((_imm), 16))
0243
0244 #define CSIO_SCSI_CMD_WR_SZ_16(_imm) \
0245 (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
0246
0247
0248
0249
0250
0251
0252
0253
0254 static inline void
0255 csio_scsi_cmd(struct csio_ioreq *req)
0256 {
0257 struct csio_wr_pair wrp;
0258 struct csio_hw *hw = req->lnode->hwp;
0259 struct csio_scsim *scsim = csio_hw_to_scsim(hw);
0260 uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
0261
0262 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
0263 if (unlikely(req->drv_status != 0))
0264 return;
0265
0266 if (wrp.size1 >= size) {
0267
0268 csio_scsi_init_cmd_wr(req, wrp.addr1, size);
0269 } else {
0270 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
0271
0272
0273
0274
0275
0276 csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
0277 memcpy(wrp.addr1, tmpwr, wrp.size1);
0278 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
0279 }
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289 static inline void
0290 csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
0291 struct ulptx_sgl *sgl)
0292 {
0293 struct ulptx_sge_pair *sge_pair = NULL;
0294 struct scatterlist *sgel;
0295 uint32_t i = 0;
0296 uint32_t xfer_len;
0297 struct list_head *tmp;
0298 struct csio_dma_buf *dma_buf;
0299 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0300
0301 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F |
0302 ULPTX_NSGE_V(req->nsge));
0303
0304 if (likely(!req->dcopy)) {
0305 scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
0306 if (i == 0) {
0307 sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
0308 sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
0309 sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
0310 continue;
0311 }
0312 if ((i - 1) & 0x1) {
0313 sge_pair->addr[1] = cpu_to_be64(
0314 sg_dma_address(sgel));
0315 sge_pair->len[1] = cpu_to_be32(
0316 sg_dma_len(sgel));
0317 sge_pair++;
0318 } else {
0319 sge_pair->addr[0] = cpu_to_be64(
0320 sg_dma_address(sgel));
0321 sge_pair->len[0] = cpu_to_be32(
0322 sg_dma_len(sgel));
0323 }
0324 }
0325 } else {
0326
0327 xfer_len = scsi_bufflen(scmnd);
0328 list_for_each(tmp, &req->gen_list) {
0329 dma_buf = (struct csio_dma_buf *)tmp;
0330 if (i == 0) {
0331 sgl->addr0 = cpu_to_be64(dma_buf->paddr);
0332 sgl->len0 = cpu_to_be32(
0333 min(xfer_len, dma_buf->len));
0334 sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
0335 } else if ((i - 1) & 0x1) {
0336 sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
0337 sge_pair->len[1] = cpu_to_be32(
0338 min(xfer_len, dma_buf->len));
0339 sge_pair++;
0340 } else {
0341 sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
0342 sge_pair->len[0] = cpu_to_be32(
0343 min(xfer_len, dma_buf->len));
0344 }
0345 xfer_len -= min(xfer_len, dma_buf->len);
0346 i++;
0347 }
0348 }
0349 }
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 static inline void
0360 csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
0361 {
0362 struct csio_hw *hw = req->lnode->hwp;
0363 struct csio_rnode *rn = req->rnode;
0364 struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
0365 struct ulptx_sgl *sgl;
0366 struct csio_dma_buf *dma_buf;
0367 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
0368 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0369
0370 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_READ_WR) |
0371 FW_SCSI_READ_WR_IMMDLEN(imm));
0372 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
0373 FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
0374 wr->cookie = (uintptr_t)req;
0375 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
0376 wr->tmo_val = (uint8_t)(req->tmo);
0377 wr->use_xfer_cnt = 1;
0378 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
0379 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
0380
0381 dma_buf = &req->dma_buf;
0382
0383
0384 wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
0385 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
0386
0387 wr->r4 = 0;
0388
0389 wr->u.fcoe.ctl_pri = 0;
0390 wr->u.fcoe.cp_en_class = 0;
0391 wr->u.fcoe.r3_lo[0] = 0;
0392 wr->u.fcoe.r3_lo[1] = 0;
0393 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
0394 sizeof(struct fw_scsi_read_wr)));
0395
0396
0397 sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
0398 sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
0399
0400
0401 csio_scsi_init_ultptx_dsgl(hw, req, sgl);
0402 }
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 static inline void
0413 csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
0414 {
0415 struct csio_hw *hw = req->lnode->hwp;
0416 struct csio_rnode *rn = req->rnode;
0417 struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
0418 struct ulptx_sgl *sgl;
0419 struct csio_dma_buf *dma_buf;
0420 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
0421 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0422
0423 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_WRITE_WR) |
0424 FW_SCSI_WRITE_WR_IMMDLEN(imm));
0425 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
0426 FW_WR_LEN16_V(DIV_ROUND_UP(size, 16)));
0427 wr->cookie = (uintptr_t)req;
0428 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
0429 wr->tmo_val = (uint8_t)(req->tmo);
0430 wr->use_xfer_cnt = 1;
0431 wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
0432 wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
0433
0434 dma_buf = &req->dma_buf;
0435
0436
0437 wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
0438 wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
0439
0440 wr->r4 = 0;
0441
0442 wr->u.fcoe.ctl_pri = 0;
0443 wr->u.fcoe.cp_en_class = 0;
0444 wr->u.fcoe.r3_lo[0] = 0;
0445 wr->u.fcoe.r3_lo[1] = 0;
0446 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
0447 sizeof(struct fw_scsi_write_wr)));
0448
0449
0450 sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
0451 sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
0452
0453
0454 csio_scsi_init_ultptx_dsgl(hw, req, sgl);
0455 }
0456
0457
0458 #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \
0459 do { \
0460 (sz) = sizeof(struct fw_scsi_##oper##_wr) + \
0461 ALIGN((imm), 16) + \
0462 sizeof(struct ulptx_sgl); \
0463 \
0464 if (unlikely((req)->nsge > 1)) \
0465 (sz) += (sizeof(struct ulptx_sge_pair) * \
0466 (ALIGN(((req)->nsge - 1), 2) / 2)); \
0467 \
0468 } while (0)
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478 static inline void
0479 csio_scsi_read(struct csio_ioreq *req)
0480 {
0481 struct csio_wr_pair wrp;
0482 uint32_t size;
0483 struct csio_hw *hw = req->lnode->hwp;
0484 struct csio_scsim *scsim = csio_hw_to_scsim(hw);
0485
0486 CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
0487 size = ALIGN(size, 16);
0488
0489 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
0490 if (likely(req->drv_status == 0)) {
0491 if (likely(wrp.size1 >= size)) {
0492
0493 csio_scsi_init_read_wr(req, wrp.addr1, size);
0494 } else {
0495 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
0496
0497
0498
0499
0500 csio_scsi_init_read_wr(req, (void *)tmpwr, size);
0501 memcpy(wrp.addr1, tmpwr, wrp.size1);
0502 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
0503 }
0504 }
0505 }
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 static inline void
0516 csio_scsi_write(struct csio_ioreq *req)
0517 {
0518 struct csio_wr_pair wrp;
0519 uint32_t size;
0520 struct csio_hw *hw = req->lnode->hwp;
0521 struct csio_scsim *scsim = csio_hw_to_scsim(hw);
0522
0523 CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
0524 size = ALIGN(size, 16);
0525
0526 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
0527 if (likely(req->drv_status == 0)) {
0528 if (likely(wrp.size1 >= size)) {
0529
0530 csio_scsi_init_write_wr(req, wrp.addr1, size);
0531 } else {
0532 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
0533
0534
0535
0536
0537 csio_scsi_init_write_wr(req, (void *)tmpwr, size);
0538 memcpy(wrp.addr1, tmpwr, wrp.size1);
0539 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
0540 }
0541 }
0542 }
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552 static inline void
0553 csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
0554 {
0555 #ifdef __CSIO_DEBUG__
0556 struct csio_hw *hw = req->lnode->hwp;
0557 #endif
0558 struct scatterlist *sgel = NULL;
0559 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
0560 uint64_t sg_addr = 0;
0561 uint32_t ddp_pagesz = 4096;
0562 uint32_t buf_off;
0563 struct csio_dma_buf *dma_buf = NULL;
0564 uint32_t alloc_len = 0;
0565 uint32_t xfer_len = 0;
0566 uint32_t sg_len = 0;
0567 uint32_t i;
0568
0569 scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
0570 sg_addr = sg_dma_address(sgel);
0571 sg_len = sg_dma_len(sgel);
0572
0573 buf_off = sg_addr & (ddp_pagesz - 1);
0574
0575
0576 if (i != 0 && buf_off) {
0577 csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
0578 sg_addr, sg_len);
0579 goto unaligned;
0580 }
0581
0582
0583 if ((i != (req->nsge - 1)) &&
0584 ((buf_off + sg_len) & (ddp_pagesz - 1))) {
0585 csio_dbg(hw,
0586 "SGL addr not ending on page boundary"
0587 "(%llx:%d)\n", sg_addr, sg_len);
0588 goto unaligned;
0589 }
0590 }
0591
0592
0593 req->dcopy = 0;
0594 csio_scsi_read(req);
0595
0596 return;
0597
0598 unaligned:
0599 CSIO_INC_STATS(scsim, n_unaligned);
0600
0601
0602
0603
0604 req->dcopy = 1;
0605
0606
0607 INIT_LIST_HEAD(&req->gen_list);
0608 xfer_len = scsi_bufflen(scmnd);
0609
0610 i = 0;
0611
0612 while (alloc_len < xfer_len) {
0613 dma_buf = csio_get_scsi_ddp(scsim);
0614 if (dma_buf == NULL || i > scsim->max_sge) {
0615 req->drv_status = -EBUSY;
0616 break;
0617 }
0618 alloc_len += dma_buf->len;
0619
0620 list_add_tail(&dma_buf->list, &req->gen_list);
0621 i++;
0622 }
0623
0624 if (!req->drv_status) {
0625
0626 req->nsge = i;
0627 csio_scsi_read(req);
0628 return;
0629 }
0630
0631
0632 if (i > 0)
0633 csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
0634 }
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645 static inline void
0646 csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
0647 bool abort)
0648 {
0649 struct csio_hw *hw = req->lnode->hwp;
0650 struct csio_rnode *rn = req->rnode;
0651 struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
0652
0653 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_SCSI_ABRT_CLS_WR));
0654 wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(rn->flowid) |
0655 FW_WR_LEN16_V(
0656 DIV_ROUND_UP(size, 16)));
0657
0658 wr->cookie = (uintptr_t) req;
0659 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
0660 wr->tmo_val = (uint8_t) req->tmo;
0661
0662 wr->sub_opcode_to_chk_all_io =
0663 (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
0664 FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
0665 wr->r3[0] = 0;
0666 wr->r3[1] = 0;
0667 wr->r3[2] = 0;
0668 wr->r3[3] = 0;
0669
0670 wr->t_cookie = (uintptr_t) req;
0671 }
0672
0673 static inline void
0674 csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
0675 {
0676 struct csio_wr_pair wrp;
0677 struct csio_hw *hw = req->lnode->hwp;
0678 uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
0679
0680 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
0681 if (req->drv_status != 0)
0682 return;
0683
0684 if (wrp.size1 >= size) {
0685
0686 csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
0687 } else {
0688 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
0689
0690
0691
0692
0693 csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
0694 memcpy(wrp.addr1, tmpwr, wrp.size1);
0695 memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
0696 }
0697 }
0698
0699
0700
0701
0702 static void
0703 csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
0704 {
0705 struct csio_hw *hw = req->lnode->hwp;
0706 struct csio_scsim *scsim = csio_hw_to_scsim(hw);
0707
0708 switch (evt) {
0709 case CSIO_SCSIE_START_IO:
0710
0711 if (req->nsge) {
0712 if (req->datadir == DMA_TO_DEVICE) {
0713 req->dcopy = 0;
0714 csio_scsi_write(req);
0715 } else
0716 csio_setup_ddp(scsim, req);
0717 } else {
0718 csio_scsi_cmd(req);
0719 }
0720
0721 if (likely(req->drv_status == 0)) {
0722
0723 csio_set_state(&req->sm, csio_scsis_io_active);
0724 list_add_tail(&req->sm.sm_list, &scsim->active_q);
0725 csio_wr_issue(hw, req->eq_idx, false);
0726 CSIO_INC_STATS(scsim, n_active);
0727
0728 return;
0729 }
0730 break;
0731
0732 case CSIO_SCSIE_START_TM:
0733 csio_scsi_cmd(req);
0734 if (req->drv_status == 0) {
0735
0736
0737
0738
0739
0740
0741
0742
0743 csio_set_state(&req->sm, csio_scsis_tm_active);
0744 list_add_tail(&req->sm.sm_list, &scsim->active_q);
0745 csio_wr_issue(hw, req->eq_idx, false);
0746 CSIO_INC_STATS(scsim, n_tm_active);
0747 }
0748 return;
0749
0750 case CSIO_SCSIE_ABORT:
0751 case CSIO_SCSIE_CLOSE:
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762 req->drv_status = -EINVAL;
0763 csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
0764 break;
0765
0766 default:
0767 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
0768 CSIO_DB_ASSERT(0);
0769 }
0770 }
0771
0772 static void
0773 csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
0774 {
0775 struct csio_hw *hw = req->lnode->hwp;
0776 struct csio_scsim *scm = csio_hw_to_scsim(hw);
0777 struct csio_rnode *rn;
0778
0779 switch (evt) {
0780 case CSIO_SCSIE_COMPLETED:
0781 CSIO_DEC_STATS(scm, n_active);
0782 list_del_init(&req->sm.sm_list);
0783 csio_set_state(&req->sm, csio_scsis_uninit);
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799 if (unlikely(req->wr_status != FW_SUCCESS)) {
0800 rn = req->rnode;
0801
0802
0803
0804
0805 if (csio_scsi_itnexus_loss_error(req->wr_status) &&
0806 csio_is_rnode_ready(rn)) {
0807 csio_set_state(&req->sm,
0808 csio_scsis_shost_cmpl_await);
0809 list_add_tail(&req->sm.sm_list,
0810 &rn->host_cmpl_q);
0811 }
0812 }
0813
0814 break;
0815
0816 case CSIO_SCSIE_ABORT:
0817 csio_scsi_abrt_cls(req, SCSI_ABORT);
0818 if (req->drv_status == 0) {
0819 csio_wr_issue(hw, req->eq_idx, false);
0820 csio_set_state(&req->sm, csio_scsis_aborting);
0821 }
0822 break;
0823
0824 case CSIO_SCSIE_CLOSE:
0825 csio_scsi_abrt_cls(req, SCSI_CLOSE);
0826 if (req->drv_status == 0) {
0827 csio_wr_issue(hw, req->eq_idx, false);
0828 csio_set_state(&req->sm, csio_scsis_closing);
0829 }
0830 break;
0831
0832 case CSIO_SCSIE_DRVCLEANUP:
0833 req->wr_status = FW_HOSTERROR;
0834 CSIO_DEC_STATS(scm, n_active);
0835 csio_set_state(&req->sm, csio_scsis_uninit);
0836 break;
0837
0838 default:
0839 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
0840 CSIO_DB_ASSERT(0);
0841 }
0842 }
0843
0844 static void
0845 csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
0846 {
0847 struct csio_hw *hw = req->lnode->hwp;
0848 struct csio_scsim *scm = csio_hw_to_scsim(hw);
0849
0850 switch (evt) {
0851 case CSIO_SCSIE_COMPLETED:
0852 CSIO_DEC_STATS(scm, n_tm_active);
0853 list_del_init(&req->sm.sm_list);
0854 csio_set_state(&req->sm, csio_scsis_uninit);
0855
0856 break;
0857
0858 case CSIO_SCSIE_ABORT:
0859 csio_scsi_abrt_cls(req, SCSI_ABORT);
0860 if (req->drv_status == 0) {
0861 csio_wr_issue(hw, req->eq_idx, false);
0862 csio_set_state(&req->sm, csio_scsis_aborting);
0863 }
0864 break;
0865
0866
0867 case CSIO_SCSIE_CLOSE:
0868 csio_scsi_abrt_cls(req, SCSI_CLOSE);
0869 if (req->drv_status == 0) {
0870 csio_wr_issue(hw, req->eq_idx, false);
0871 csio_set_state(&req->sm, csio_scsis_closing);
0872 }
0873 break;
0874
0875 case CSIO_SCSIE_DRVCLEANUP:
0876 req->wr_status = FW_HOSTERROR;
0877 CSIO_DEC_STATS(scm, n_tm_active);
0878 csio_set_state(&req->sm, csio_scsis_uninit);
0879 break;
0880
0881 default:
0882 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
0883 CSIO_DB_ASSERT(0);
0884 }
0885 }
0886
0887 static void
0888 csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
0889 {
0890 struct csio_hw *hw = req->lnode->hwp;
0891 struct csio_scsim *scm = csio_hw_to_scsim(hw);
0892
0893 switch (evt) {
0894 case CSIO_SCSIE_COMPLETED:
0895 csio_dbg(hw,
0896 "ioreq %p recvd cmpltd (wr_status:%d) "
0897 "in aborting st\n", req, req->wr_status);
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907 req->drv_status = -ECANCELED;
0908 break;
0909
0910 case CSIO_SCSIE_ABORT:
0911 CSIO_INC_STATS(scm, n_abrt_dups);
0912 break;
0913
0914 case CSIO_SCSIE_ABORTED:
0915
0916 csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
0917 req, req->wr_status, req->drv_status);
0918
0919
0920
0921
0922 if (req->drv_status != -ECANCELED) {
0923 csio_warn(hw,
0924 "Abort completed before original I/O,"
0925 " req:%p\n", req);
0926 CSIO_DB_ASSERT(0);
0927 }
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952 if ((req->wr_status == FW_SUCCESS) ||
0953 (req->wr_status == FW_EINVAL) ||
0954 csio_scsi_itnexus_loss_error(req->wr_status))
0955 req->wr_status = FW_SCSI_ABORT_REQUESTED;
0956
0957 CSIO_DEC_STATS(scm, n_active);
0958 list_del_init(&req->sm.sm_list);
0959 csio_set_state(&req->sm, csio_scsis_uninit);
0960 break;
0961
0962 case CSIO_SCSIE_DRVCLEANUP:
0963 req->wr_status = FW_HOSTERROR;
0964 CSIO_DEC_STATS(scm, n_active);
0965 csio_set_state(&req->sm, csio_scsis_uninit);
0966 break;
0967
0968 case CSIO_SCSIE_CLOSE:
0969
0970
0971
0972
0973
0974
0975
0976 break;
0977
0978 default:
0979 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
0980 CSIO_DB_ASSERT(0);
0981 }
0982 }
0983
0984 static void
0985 csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
0986 {
0987 struct csio_hw *hw = req->lnode->hwp;
0988 struct csio_scsim *scm = csio_hw_to_scsim(hw);
0989
0990 switch (evt) {
0991 case CSIO_SCSIE_COMPLETED:
0992 csio_dbg(hw,
0993 "ioreq %p recvd cmpltd (wr_status:%d) "
0994 "in closing st\n", req, req->wr_status);
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004 req->drv_status = -ECANCELED;
1005 break;
1006
1007 case CSIO_SCSIE_CLOSED:
1008
1009
1010
1011
1012 if (req->drv_status != -ECANCELED) {
1013 csio_fatal(hw,
1014 "Close completed before original I/O,"
1015 " req:%p\n", req);
1016 CSIO_DB_ASSERT(0);
1017 }
1018
1019
1020
1021
1022
1023
1024 CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
1025 (req->wr_status == FW_EINVAL));
1026 req->wr_status = FW_SCSI_CLOSE_REQUESTED;
1027
1028 CSIO_DEC_STATS(scm, n_active);
1029 list_del_init(&req->sm.sm_list);
1030 csio_set_state(&req->sm, csio_scsis_uninit);
1031 break;
1032
1033 case CSIO_SCSIE_CLOSE:
1034 break;
1035
1036 case CSIO_SCSIE_DRVCLEANUP:
1037 req->wr_status = FW_HOSTERROR;
1038 CSIO_DEC_STATS(scm, n_active);
1039 csio_set_state(&req->sm, csio_scsis_uninit);
1040 break;
1041
1042 default:
1043 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
1044 CSIO_DB_ASSERT(0);
1045 }
1046 }
1047
1048 static void
1049 csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
1050 {
1051 switch (evt) {
1052 case CSIO_SCSIE_ABORT:
1053 case CSIO_SCSIE_CLOSE:
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 req->drv_status = 0;
1071 break;
1072 case CSIO_SCSIE_DRVCLEANUP:
1073 csio_set_state(&req->sm, csio_scsis_uninit);
1074 break;
1075 default:
1076 csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
1077 evt, req);
1078 CSIO_DB_ASSERT(0);
1079 }
1080 }
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 struct csio_ioreq *
1102 csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
1103 struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
1104 {
1105 struct csio_ioreq *ioreq = NULL;
1106 struct cpl_fw6_msg *cpl;
1107 uint8_t *tempwr;
1108 uint8_t status;
1109 struct csio_scsim *scm = csio_hw_to_scsim(hw);
1110
1111
1112 cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
1113
1114 if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
1115 csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
1116 cpl->opcode);
1117 CSIO_INC_STATS(scm, n_inval_cplop);
1118 return NULL;
1119 }
1120
1121 tempwr = (uint8_t *)(cpl->data);
1122 status = csio_wr_status(tempwr);
1123 *scsiwr = tempwr;
1124
1125 if (likely((*tempwr == FW_SCSI_READ_WR) ||
1126 (*tempwr == FW_SCSI_WRITE_WR) ||
1127 (*tempwr == FW_SCSI_CMD_WR))) {
1128 ioreq = (struct csio_ioreq *)((uintptr_t)
1129 (((struct fw_scsi_read_wr *)tempwr)->cookie));
1130 CSIO_DB_ASSERT(virt_addr_valid(ioreq));
1131
1132 ioreq->wr_status = status;
1133
1134 return ioreq;
1135 }
1136
1137 if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
1138 ioreq = (struct csio_ioreq *)((uintptr_t)
1139 (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
1140 CSIO_DB_ASSERT(virt_addr_valid(ioreq));
1141
1142 ioreq->wr_status = status;
1143 return ioreq;
1144 }
1145
1146 csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
1147 CSIO_INC_STATS(scm, n_inval_scsiop);
1148 return NULL;
1149 }
1150
1151
1152
1153
1154
1155
1156
1157
1158 void
1159 csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
1160 {
1161 struct csio_hw *hw = scm->hw;
1162 struct csio_ioreq *ioreq;
1163 struct list_head *tmp, *next;
1164 struct scsi_cmnd *scmnd;
1165
1166
1167 list_for_each_safe(tmp, next, q) {
1168 ioreq = (struct csio_ioreq *)tmp;
1169 csio_scsi_drvcleanup(ioreq);
1170 list_del_init(&ioreq->sm.sm_list);
1171 scmnd = csio_scsi_cmnd(ioreq);
1172 spin_unlock_irq(&hw->lock);
1173
1174
1175
1176
1177
1178 if (scmnd != NULL)
1179 ioreq->io_cbfn(hw, ioreq);
1180
1181 spin_lock_irq(&scm->freelist_lock);
1182 csio_put_scsi_ioreq(scm, ioreq);
1183 spin_unlock_irq(&scm->freelist_lock);
1184
1185 spin_lock_irq(&hw->lock);
1186 }
1187 }
1188
1189 #define CSIO_SCSI_ABORT_Q_POLL_MS 2000
1190
1191 static void
1192 csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
1193 {
1194 struct csio_lnode *ln = ioreq->lnode;
1195 struct csio_hw *hw = ln->hwp;
1196 int ready = 0;
1197 struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1198 int rv;
1199
1200 if (csio_scsi_cmnd(ioreq) != scmnd) {
1201 CSIO_INC_STATS(scsim, n_abrt_race_comp);
1202 return;
1203 }
1204
1205 ready = csio_is_lnode_ready(ln);
1206
1207 rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1208 if (rv != 0) {
1209 if (ready)
1210 CSIO_INC_STATS(scsim, n_abrt_busy_error);
1211 else
1212 CSIO_INC_STATS(scsim, n_cls_busy_error);
1213 }
1214 }
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 static int
1233 csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
1234 {
1235 struct csio_hw *hw = scm->hw;
1236 struct list_head *tmp, *next;
1237 int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
1238 struct scsi_cmnd *scmnd;
1239
1240 if (list_empty(q))
1241 return 0;
1242
1243 csio_dbg(hw, "Aborting SCSI I/Os\n");
1244
1245
1246 list_for_each_safe(tmp, next, q) {
1247 scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
1248 csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
1249 }
1250
1251
1252 while (!list_empty(q) && count--) {
1253 spin_unlock_irq(&hw->lock);
1254 msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1255 spin_lock_irq(&hw->lock);
1256 }
1257
1258
1259 if (list_empty(q))
1260 return 0;
1261
1262 return -ETIMEDOUT;
1263 }
1264
1265
1266
1267
1268
1269
1270
1271
1272 int
1273 csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
1274 {
1275 struct csio_hw *hw = scm->hw;
1276 int rv = 0;
1277 int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
1278
1279
1280 if (list_empty(&scm->active_q))
1281 return 0;
1282
1283
1284 while (!list_empty(&scm->active_q) && count--) {
1285 spin_unlock_irq(&hw->lock);
1286 msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1287 spin_lock_irq(&hw->lock);
1288 }
1289
1290
1291 if (list_empty(&scm->active_q))
1292 return 0;
1293
1294
1295 if (abort) {
1296 rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
1297 if (rv == 0)
1298 return rv;
1299 csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
1300 }
1301
1302 csio_scsi_cleanup_io_q(scm, &scm->active_q);
1303
1304 CSIO_DB_ASSERT(list_empty(&scm->active_q));
1305
1306 return rv;
1307 }
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 int
1318 csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
1319 {
1320 struct csio_hw *hw = scm->hw;
1321 struct csio_scsi_level_data sld;
1322 int rv;
1323 int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
1324
1325 csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
1326
1327 sld.level = CSIO_LEV_LNODE;
1328 sld.lnode = ln;
1329 INIT_LIST_HEAD(&ln->cmpl_q);
1330 csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
1331
1332
1333 if (list_empty(&ln->cmpl_q))
1334 return 0;
1335
1336
1337 while (!list_empty(&ln->cmpl_q) && count--) {
1338 spin_unlock_irq(&hw->lock);
1339 msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
1340 spin_lock_irq(&hw->lock);
1341 }
1342
1343
1344 if (list_empty(&ln->cmpl_q))
1345 return 0;
1346
1347 csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
1348
1349
1350 rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
1351 if (rv != 0) {
1352 csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
1353 csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
1354 }
1355
1356 CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
1357
1358 return rv;
1359 }
1360
1361 static ssize_t
1362 csio_show_hw_state(struct device *dev,
1363 struct device_attribute *attr, char *buf)
1364 {
1365 struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1366 struct csio_hw *hw = csio_lnode_to_hw(ln);
1367
1368 if (csio_is_hw_ready(hw))
1369 return snprintf(buf, PAGE_SIZE, "ready\n");
1370 else
1371 return snprintf(buf, PAGE_SIZE, "not ready\n");
1372 }
1373
1374
1375 static ssize_t
1376 csio_device_reset(struct device *dev,
1377 struct device_attribute *attr, const char *buf, size_t count)
1378 {
1379 struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1380 struct csio_hw *hw = csio_lnode_to_hw(ln);
1381
1382 if (*buf != '1')
1383 return -EINVAL;
1384
1385
1386 csio_lnodes_exit(hw, 1);
1387
1388
1389 csio_lnodes_block_request(hw);
1390
1391 spin_lock_irq(&hw->lock);
1392 csio_hw_reset(hw);
1393 spin_unlock_irq(&hw->lock);
1394
1395
1396 csio_lnodes_unblock_request(hw);
1397 return count;
1398 }
1399
1400
1401 static ssize_t
1402 csio_disable_port(struct device *dev,
1403 struct device_attribute *attr, const char *buf, size_t count)
1404 {
1405 struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1406 struct csio_hw *hw = csio_lnode_to_hw(ln);
1407 bool disable;
1408
1409 if (*buf == '1' || *buf == '0')
1410 disable = (*buf == '1') ? true : false;
1411 else
1412 return -EINVAL;
1413
1414
1415 csio_lnodes_block_by_port(hw, ln->portid);
1416
1417 spin_lock_irq(&hw->lock);
1418 csio_disable_lnodes(hw, ln->portid, disable);
1419 spin_unlock_irq(&hw->lock);
1420
1421
1422 csio_lnodes_unblock_by_port(hw, ln->portid);
1423 return count;
1424 }
1425
1426
1427 static ssize_t
1428 csio_show_dbg_level(struct device *dev,
1429 struct device_attribute *attr, char *buf)
1430 {
1431 struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1432
1433 return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
1434 }
1435
1436
1437 static ssize_t
1438 csio_store_dbg_level(struct device *dev,
1439 struct device_attribute *attr, const char *buf, size_t count)
1440 {
1441 struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1442 struct csio_hw *hw = csio_lnode_to_hw(ln);
1443 uint32_t dbg_level = 0;
1444
1445 if (!isdigit(buf[0]))
1446 return -EINVAL;
1447
1448 if (sscanf(buf, "%i", &dbg_level))
1449 return -EINVAL;
1450
1451 ln->params.log_level = dbg_level;
1452 hw->params.log_level = dbg_level;
1453
1454 return 0;
1455 }
1456
1457 static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
1458 static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset);
1459 static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
1460 static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
1461 csio_store_dbg_level);
1462
1463 static struct attribute *csio_fcoe_lport_attrs[] = {
1464 &dev_attr_hw_state.attr,
1465 &dev_attr_device_reset.attr,
1466 &dev_attr_disable_port.attr,
1467 &dev_attr_dbg_level.attr,
1468 NULL,
1469 };
1470
1471 ATTRIBUTE_GROUPS(csio_fcoe_lport);
1472
1473 static ssize_t
1474 csio_show_num_reg_rnodes(struct device *dev,
1475 struct device_attribute *attr, char *buf)
1476 {
1477 struct csio_lnode *ln = shost_priv(class_to_shost(dev));
1478
1479 return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
1480 }
1481
1482 static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
1483
1484 static struct attribute *csio_fcoe_vport_attrs[] = {
1485 &dev_attr_num_reg_rnodes.attr,
1486 &dev_attr_dbg_level.attr,
1487 NULL,
1488 };
1489
1490 ATTRIBUTE_GROUPS(csio_fcoe_vport);
1491
1492 static inline uint32_t
1493 csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
1494 {
1495 struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1496 struct scatterlist *sg;
1497 uint32_t bytes_left;
1498 uint32_t bytes_copy;
1499 uint32_t buf_off = 0;
1500 uint32_t start_off = 0;
1501 uint32_t sg_off = 0;
1502 void *sg_addr;
1503 void *buf_addr;
1504 struct csio_dma_buf *dma_buf;
1505
1506 bytes_left = scsi_bufflen(scmnd);
1507 sg = scsi_sglist(scmnd);
1508 dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
1509
1510
1511 while (bytes_left > 0 && sg && dma_buf) {
1512 if (buf_off >= dma_buf->len) {
1513 buf_off = 0;
1514 dma_buf = (struct csio_dma_buf *)
1515 csio_list_next(dma_buf);
1516 continue;
1517 }
1518
1519 if (start_off >= sg->length) {
1520 start_off -= sg->length;
1521 sg = sg_next(sg);
1522 continue;
1523 }
1524
1525 buf_addr = dma_buf->vaddr + buf_off;
1526 sg_off = sg->offset + start_off;
1527 bytes_copy = min((dma_buf->len - buf_off),
1528 sg->length - start_off);
1529 bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
1530 bytes_copy);
1531
1532 sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
1533 if (!sg_addr) {
1534 csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
1535 sg, req);
1536 break;
1537 }
1538
1539 csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
1540 sg_addr, sg_off, buf_addr, bytes_copy);
1541 memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
1542 kunmap_atomic(sg_addr);
1543
1544 start_off += bytes_copy;
1545 buf_off += bytes_copy;
1546 bytes_left -= bytes_copy;
1547 }
1548
1549 if (bytes_left > 0)
1550 return DID_ERROR;
1551 else
1552 return DID_OK;
1553 }
1554
1555
1556
1557
1558
1559
1560
1561 static inline void
1562 csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
1563 {
1564 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1565 struct csio_scsim *scm = csio_hw_to_scsim(hw);
1566 struct fcp_resp_with_ext *fcp_resp;
1567 struct fcp_resp_rsp_info *rsp_info;
1568 struct csio_dma_buf *dma_buf;
1569 uint8_t flags, scsi_status = 0;
1570 uint32_t host_status = DID_OK;
1571 uint32_t rsp_len = 0, sns_len = 0;
1572 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1573
1574
1575 switch (req->wr_status) {
1576 case FW_HOSTERROR:
1577 if (unlikely(!csio_is_hw_ready(hw)))
1578 return;
1579
1580 host_status = DID_ERROR;
1581 CSIO_INC_STATS(scm, n_hosterror);
1582
1583 break;
1584 case FW_SCSI_RSP_ERR:
1585 dma_buf = &req->dma_buf;
1586 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
1587 rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
1588 flags = fcp_resp->resp.fr_flags;
1589 scsi_status = fcp_resp->resp.fr_status;
1590
1591 if (flags & FCP_RSP_LEN_VAL) {
1592 rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
1593 if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
1594 (rsp_info->rsp_code != FCP_TMF_CMPL)) {
1595 host_status = DID_ERROR;
1596 goto out;
1597 }
1598 }
1599
1600 if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
1601 sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
1602 if (sns_len > SCSI_SENSE_BUFFERSIZE)
1603 sns_len = SCSI_SENSE_BUFFERSIZE;
1604
1605 memcpy(cmnd->sense_buffer,
1606 &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
1607 CSIO_INC_STATS(scm, n_autosense);
1608 }
1609
1610 scsi_set_resid(cmnd, 0);
1611
1612
1613 if (flags & FCP_RESID_UNDER) {
1614 scsi_set_resid(cmnd,
1615 be32_to_cpu(fcp_resp->ext.fr_resid));
1616
1617 if (!(flags & FCP_SNS_LEN_VAL) &&
1618 (scsi_status == SAM_STAT_GOOD) &&
1619 ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
1620 < cmnd->underflow))
1621 host_status = DID_ERROR;
1622 } else if (flags & FCP_RESID_OVER)
1623 host_status = DID_ERROR;
1624
1625 CSIO_INC_STATS(scm, n_rsperror);
1626 break;
1627
1628 case FW_SCSI_OVER_FLOW_ERR:
1629 csio_warn(hw,
1630 "Over-flow error,cmnd:0x%x expected len:0x%x"
1631 " resid:0x%x\n", cmnd->cmnd[0],
1632 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
1633 host_status = DID_ERROR;
1634 CSIO_INC_STATS(scm, n_ovflerror);
1635 break;
1636
1637 case FW_SCSI_UNDER_FLOW_ERR:
1638 csio_warn(hw,
1639 "Under-flow error,cmnd:0x%x expected"
1640 " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n",
1641 cmnd->cmnd[0], scsi_bufflen(cmnd),
1642 scsi_get_resid(cmnd), cmnd->device->lun,
1643 rn->flowid);
1644 host_status = DID_ERROR;
1645 CSIO_INC_STATS(scm, n_unflerror);
1646 break;
1647
1648 case FW_SCSI_ABORT_REQUESTED:
1649 case FW_SCSI_ABORTED:
1650 case FW_SCSI_CLOSE_REQUESTED:
1651 csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
1652 cmnd->cmnd[0],
1653 (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
1654 "closed" : "aborted");
1655
1656
1657
1658
1659 host_status = DID_REQUEUE;
1660 if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
1661 CSIO_INC_STATS(scm, n_closed);
1662 else
1663 CSIO_INC_STATS(scm, n_aborted);
1664 break;
1665
1666 case FW_SCSI_ABORT_TIMEDOUT:
1667
1668 csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
1669 req, cmnd, req->wr_status);
1670 host_status = DID_ERROR;
1671 CSIO_INC_STATS(scm, n_abrt_timedout);
1672 break;
1673
1674 case FW_RDEV_NOT_READY:
1675
1676
1677
1678
1679
1680
1681 CSIO_INC_STATS(scm, n_rdev_nr_error);
1682 host_status = DID_ERROR;
1683 break;
1684
1685 case FW_ERR_RDEV_LOST:
1686 CSIO_INC_STATS(scm, n_rdev_lost_error);
1687 host_status = DID_ERROR;
1688 break;
1689
1690 case FW_ERR_RDEV_LOGO:
1691 CSIO_INC_STATS(scm, n_rdev_logo_error);
1692 host_status = DID_ERROR;
1693 break;
1694
1695 case FW_ERR_RDEV_IMPL_LOGO:
1696 host_status = DID_ERROR;
1697 break;
1698
1699 case FW_ERR_LINK_DOWN:
1700 CSIO_INC_STATS(scm, n_link_down_error);
1701 host_status = DID_ERROR;
1702 break;
1703
1704 case FW_FCOE_NO_XCHG:
1705 CSIO_INC_STATS(scm, n_no_xchg_error);
1706 host_status = DID_ERROR;
1707 break;
1708
1709 default:
1710 csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
1711 req->wr_status, req, cmnd);
1712 CSIO_DB_ASSERT(0);
1713
1714 CSIO_INC_STATS(scm, n_unknown_error);
1715 host_status = DID_ERROR;
1716 break;
1717 }
1718
1719 out:
1720 if (req->nsge > 0) {
1721 scsi_dma_unmap(cmnd);
1722 if (req->dcopy && (host_status == DID_OK))
1723 host_status = csio_scsi_copy_to_sgl(hw, req);
1724 }
1725
1726 cmnd->result = (((host_status) << 16) | scsi_status);
1727 scsi_done(cmnd);
1728
1729
1730 csio_scsi_cmnd(req) = NULL;
1731 complete(&req->cmplobj);
1732 }
1733
1734
1735
1736
1737
1738
1739
1740 static void
1741 csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
1742 {
1743 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
1744 uint8_t scsi_status = SAM_STAT_GOOD;
1745 uint32_t host_status = DID_OK;
1746
1747 if (likely(req->wr_status == FW_SUCCESS)) {
1748 if (req->nsge > 0) {
1749 scsi_dma_unmap(cmnd);
1750 if (req->dcopy)
1751 host_status = csio_scsi_copy_to_sgl(hw, req);
1752 }
1753
1754 cmnd->result = (((host_status) << 16) | scsi_status);
1755 scsi_done(cmnd);
1756 csio_scsi_cmnd(req) = NULL;
1757 CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
1758 } else {
1759
1760 csio_scsi_err_handler(hw, req);
1761 }
1762 }
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 static int
1779 csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
1780 {
1781 struct csio_lnode *ln = shost_priv(host);
1782 struct csio_hw *hw = csio_lnode_to_hw(ln);
1783 struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1784 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1785 struct csio_ioreq *ioreq = NULL;
1786 unsigned long flags;
1787 int nsge = 0;
1788 int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
1789 int retval;
1790 struct csio_scsi_qset *sqset;
1791 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1792
1793 sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
1794
1795 nr = fc_remote_port_chkready(rport);
1796 if (nr) {
1797 cmnd->result = nr;
1798 CSIO_INC_STATS(scsim, n_rn_nr_error);
1799 goto err_done;
1800 }
1801
1802 if (unlikely(!csio_is_hw_ready(hw))) {
1803 cmnd->result = (DID_REQUEUE << 16);
1804 CSIO_INC_STATS(scsim, n_hw_nr_error);
1805 goto err_done;
1806 }
1807
1808
1809 nsge = scsi_dma_map(cmnd);
1810 if (unlikely(nsge < 0)) {
1811 CSIO_INC_STATS(scsim, n_dmamap_error);
1812 goto err;
1813 }
1814
1815
1816 if (unlikely(nsge > scsim->max_sge)) {
1817 csio_warn(hw,
1818 "More SGEs than can be supported."
1819 " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
1820 CSIO_INC_STATS(scsim, n_unsupp_sge_error);
1821 goto err_dma_unmap;
1822 }
1823
1824
1825 ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
1826 if (!ioreq) {
1827 csio_err(hw, "Out of I/O request elements. Active #:%d\n",
1828 scsim->stats.n_active);
1829 CSIO_INC_STATS(scsim, n_no_req_error);
1830 goto err_dma_unmap;
1831 }
1832
1833 ioreq->nsge = nsge;
1834 ioreq->lnode = ln;
1835 ioreq->rnode = rn;
1836 ioreq->iq_idx = sqset->iq_idx;
1837 ioreq->eq_idx = sqset->eq_idx;
1838 ioreq->wr_status = 0;
1839 ioreq->drv_status = 0;
1840 csio_scsi_cmnd(ioreq) = (void *)cmnd;
1841 ioreq->tmo = 0;
1842 ioreq->datadir = cmnd->sc_data_direction;
1843
1844 if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
1845 CSIO_INC_STATS(ln, n_output_requests);
1846 ln->stats.n_output_bytes += scsi_bufflen(cmnd);
1847 } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
1848 CSIO_INC_STATS(ln, n_input_requests);
1849 ln->stats.n_input_bytes += scsi_bufflen(cmnd);
1850 } else
1851 CSIO_INC_STATS(ln, n_control_requests);
1852
1853
1854 ioreq->io_cbfn = csio_scsi_cbfn;
1855
1856
1857 cmnd->host_scribble = (unsigned char *)ioreq;
1858 csio_priv(cmnd)->fc_tm_flags = 0;
1859
1860
1861 spin_lock_irqsave(&hw->lock, flags);
1862 retval = csio_scsi_start_io(ioreq);
1863 spin_unlock_irqrestore(&hw->lock, flags);
1864
1865 if (retval != 0) {
1866 csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
1867 ioreq, retval);
1868 CSIO_INC_STATS(scsim, n_busy_error);
1869 goto err_put_req;
1870 }
1871
1872 return 0;
1873
1874 err_put_req:
1875 csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
1876 err_dma_unmap:
1877 if (nsge > 0)
1878 scsi_dma_unmap(cmnd);
1879 err:
1880 return rv;
1881
1882 err_done:
1883 scsi_done(cmnd);
1884 return 0;
1885 }
1886
1887 static int
1888 csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
1889 {
1890 int rv;
1891 int cpu = smp_processor_id();
1892 struct csio_lnode *ln = ioreq->lnode;
1893 struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
1894
1895 ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
1896
1897
1898
1899
1900
1901
1902 ioreq->eq_idx = sqset->eq_idx;
1903
1904 if (abort == SCSI_ABORT)
1905 rv = csio_scsi_abort(ioreq);
1906 else
1907 rv = csio_scsi_close(ioreq);
1908
1909 return rv;
1910 }
1911
1912 static int
1913 csio_eh_abort_handler(struct scsi_cmnd *cmnd)
1914 {
1915 struct csio_ioreq *ioreq;
1916 struct csio_lnode *ln = shost_priv(cmnd->device->host);
1917 struct csio_hw *hw = csio_lnode_to_hw(ln);
1918 struct csio_scsim *scsim = csio_hw_to_scsim(hw);
1919 int ready = 0, ret;
1920 unsigned long tmo = 0;
1921 int rv;
1922 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
1923
1924 ret = fc_block_scsi_eh(cmnd);
1925 if (ret)
1926 return ret;
1927
1928 ioreq = (struct csio_ioreq *)cmnd->host_scribble;
1929 if (!ioreq)
1930 return SUCCESS;
1931
1932 if (!rn)
1933 return FAILED;
1934
1935 csio_dbg(hw,
1936 "Request to abort ioreq:%p cmd:%p cdb:%08llx"
1937 " ssni:0x%x lun:%llu iq:0x%x\n",
1938 ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
1939 cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
1940
1941 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
1942 CSIO_INC_STATS(scsim, n_abrt_race_comp);
1943 return SUCCESS;
1944 }
1945
1946 ready = csio_is_lnode_ready(ln);
1947 tmo = CSIO_SCSI_ABRT_TMO_MS;
1948
1949 reinit_completion(&ioreq->cmplobj);
1950 spin_lock_irq(&hw->lock);
1951 rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1952 spin_unlock_irq(&hw->lock);
1953
1954 if (rv != 0) {
1955 if (rv == -EINVAL) {
1956
1957
1958
1959 return SUCCESS;
1960 }
1961 if (ready)
1962 CSIO_INC_STATS(scsim, n_abrt_busy_error);
1963 else
1964 CSIO_INC_STATS(scsim, n_cls_busy_error);
1965
1966 goto inval_scmnd;
1967 }
1968
1969 wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
1970
1971
1972 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
1973
1974 csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
1975 CSIO_INC_STATS(scsim, n_abrt_timedout);
1976
1977 inval_scmnd:
1978 if (ioreq->nsge > 0)
1979 scsi_dma_unmap(cmnd);
1980
1981 spin_lock_irq(&hw->lock);
1982 csio_scsi_cmnd(ioreq) = NULL;
1983 spin_unlock_irq(&hw->lock);
1984
1985 cmnd->result = (DID_ERROR << 16);
1986 scsi_done(cmnd);
1987
1988 return FAILED;
1989 }
1990
1991
1992 if (host_byte(cmnd->result) == DID_REQUEUE) {
1993 csio_info(hw,
1994 "Aborted SCSI command to (%d:%llu) tag %u\n",
1995 cmnd->device->id, cmnd->device->lun,
1996 scsi_cmd_to_rq(cmnd)->tag);
1997 return SUCCESS;
1998 } else {
1999 csio_info(hw,
2000 "Failed to abort SCSI command, (%d:%llu) tag %u\n",
2001 cmnd->device->id, cmnd->device->lun,
2002 scsi_cmd_to_rq(cmnd)->tag);
2003 return FAILED;
2004 }
2005 }
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016 static void
2017 csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
2018 {
2019 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
2020 struct csio_dma_buf *dma_buf;
2021 uint8_t flags = 0;
2022 struct fcp_resp_with_ext *fcp_resp;
2023 struct fcp_resp_rsp_info *rsp_info;
2024
2025 csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
2026 req, req->wr_status);
2027
2028
2029 csio_priv(cmnd)->wr_status = req->wr_status;
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042 if (req->wr_status == FW_SCSI_RSP_ERR) {
2043 dma_buf = &req->dma_buf;
2044 fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
2045 rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
2046
2047 flags = fcp_resp->resp.fr_flags;
2048
2049
2050 if (flags & FCP_RSP_LEN_VAL)
2051 if (rsp_info->rsp_code == FCP_TMF_CMPL)
2052 csio_priv(cmnd)->wr_status = FW_SUCCESS;
2053
2054 csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
2055 }
2056
2057
2058 csio_scsi_cmnd(req) = NULL;
2059 }
2060
2061 static int
2062 csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
2063 {
2064 struct csio_lnode *ln = shost_priv(cmnd->device->host);
2065 struct csio_hw *hw = csio_lnode_to_hw(ln);
2066 struct csio_scsim *scsim = csio_hw_to_scsim(hw);
2067 struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
2068 struct csio_ioreq *ioreq = NULL;
2069 struct csio_scsi_qset *sqset;
2070 unsigned long flags;
2071 int retval;
2072 int count, ret;
2073 LIST_HEAD(local_q);
2074 struct csio_scsi_level_data sld;
2075
2076 if (!rn)
2077 goto fail;
2078
2079 csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n",
2080 cmnd->device->lun, rn->flowid, rn->scsi_id);
2081
2082 if (!csio_is_lnode_ready(ln)) {
2083 csio_err(hw,
2084 "LUN reset cannot be issued on non-ready"
2085 " local node vnpi:0x%x (LUN:%llu)\n",
2086 ln->vnp_flowid, cmnd->device->lun);
2087 goto fail;
2088 }
2089
2090
2091 ret = fc_block_scsi_eh(cmnd);
2092 if (ret)
2093 return ret;
2094
2095
2096
2097
2098
2099
2100
2101
2102 if (fc_remote_port_chkready(rn->rport)) {
2103 csio_err(hw,
2104 "LUN reset cannot be issued on non-ready"
2105 " remote node ssni:0x%x (LUN:%llu)\n",
2106 rn->flowid, cmnd->device->lun);
2107 goto fail;
2108 }
2109
2110
2111 ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
2112
2113 if (!ioreq) {
2114 csio_err(hw, "Out of IO request elements. Active # :%d\n",
2115 scsim->stats.n_active);
2116 goto fail;
2117 }
2118
2119 sqset = &hw->sqset[ln->portid][smp_processor_id()];
2120 ioreq->nsge = 0;
2121 ioreq->lnode = ln;
2122 ioreq->rnode = rn;
2123 ioreq->iq_idx = sqset->iq_idx;
2124 ioreq->eq_idx = sqset->eq_idx;
2125
2126 csio_scsi_cmnd(ioreq) = cmnd;
2127 cmnd->host_scribble = (unsigned char *)ioreq;
2128 csio_priv(cmnd)->wr_status = 0;
2129
2130 csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET;
2131 ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
2132
2133
2134
2135
2136
2137
2138 count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
2139
2140
2141 ioreq->io_cbfn = csio_tm_cbfn;
2142
2143
2144 sld.level = CSIO_LEV_LUN;
2145 sld.lnode = ioreq->lnode;
2146 sld.rnode = ioreq->rnode;
2147 sld.oslun = cmnd->device->lun;
2148
2149 spin_lock_irqsave(&hw->lock, flags);
2150
2151 retval = csio_scsi_start_tm(ioreq);
2152 spin_unlock_irqrestore(&hw->lock, flags);
2153
2154 if (retval != 0) {
2155 csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
2156 ioreq, retval);
2157 goto fail_ret_ioreq;
2158 }
2159
2160 csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
2161 count * (CSIO_SCSI_TM_POLL_MS / 1000));
2162
2163 while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
2164 && count--)
2165 msleep(CSIO_SCSI_TM_POLL_MS);
2166
2167
2168 if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
2169 csio_err(hw, "LUN reset (%d:%llu) timed out\n",
2170 cmnd->device->id, cmnd->device->lun);
2171
2172 spin_lock_irq(&hw->lock);
2173 csio_scsi_drvcleanup(ioreq);
2174 list_del_init(&ioreq->sm.sm_list);
2175 spin_unlock_irq(&hw->lock);
2176
2177 goto fail_ret_ioreq;
2178 }
2179
2180
2181 if (csio_priv(cmnd)->wr_status != FW_SUCCESS) {
2182 csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
2183 cmnd->device->id, cmnd->device->lun,
2184 csio_priv(cmnd)->wr_status);
2185 goto fail;
2186 }
2187
2188
2189
2190
2191
2192
2193
2194 spin_lock_irq(&hw->lock);
2195 csio_scsi_gather_active_ios(scsim, &sld, &local_q);
2196
2197 retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
2198 spin_unlock_irq(&hw->lock);
2199
2200
2201 if (retval != 0) {
2202 csio_err(hw,
2203 "Attempt to abort I/Os during LUN reset of %llu"
2204 " returned %d\n", cmnd->device->lun, retval);
2205
2206 spin_lock_irq(&hw->lock);
2207 list_splice_tail_init(&local_q, &scsim->active_q);
2208 spin_unlock_irq(&hw->lock);
2209 goto fail;
2210 }
2211
2212 CSIO_INC_STATS(rn, n_lun_rst);
2213
2214 csio_info(hw, "LUN reset occurred (%d:%llu)\n",
2215 cmnd->device->id, cmnd->device->lun);
2216
2217 return SUCCESS;
2218
2219 fail_ret_ioreq:
2220 csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
2221 fail:
2222 CSIO_INC_STATS(rn, n_lun_rst_fail);
2223 return FAILED;
2224 }
2225
2226 static int
2227 csio_slave_alloc(struct scsi_device *sdev)
2228 {
2229 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2230
2231 if (!rport || fc_remote_port_chkready(rport))
2232 return -ENXIO;
2233
2234 sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
2235
2236 return 0;
2237 }
2238
2239 static int
2240 csio_slave_configure(struct scsi_device *sdev)
2241 {
2242 scsi_change_queue_depth(sdev, csio_lun_qdepth);
2243 return 0;
2244 }
2245
2246 static void
2247 csio_slave_destroy(struct scsi_device *sdev)
2248 {
2249 sdev->hostdata = NULL;
2250 }
2251
2252 static int
2253 csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
2254 {
2255 struct csio_lnode *ln = shost_priv(shost);
2256 int rv = 1;
2257
2258 spin_lock_irq(shost->host_lock);
2259 if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
2260 goto out;
2261
2262 rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
2263 csio_delta_scan_tmo * HZ);
2264 out:
2265 spin_unlock_irq(shost->host_lock);
2266
2267 return rv;
2268 }
2269
2270 struct scsi_host_template csio_fcoe_shost_template = {
2271 .module = THIS_MODULE,
2272 .name = CSIO_DRV_DESC,
2273 .proc_name = KBUILD_MODNAME,
2274 .queuecommand = csio_queuecommand,
2275 .cmd_size = sizeof(struct csio_cmd_priv),
2276 .eh_timed_out = fc_eh_timed_out,
2277 .eh_abort_handler = csio_eh_abort_handler,
2278 .eh_device_reset_handler = csio_eh_lun_reset_handler,
2279 .slave_alloc = csio_slave_alloc,
2280 .slave_configure = csio_slave_configure,
2281 .slave_destroy = csio_slave_destroy,
2282 .scan_finished = csio_scan_finished,
2283 .this_id = -1,
2284 .sg_tablesize = CSIO_SCSI_MAX_SGE,
2285 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
2286 .shost_groups = csio_fcoe_lport_groups,
2287 .max_sectors = CSIO_MAX_SECTOR_SIZE,
2288 };
2289
2290 struct scsi_host_template csio_fcoe_shost_vport_template = {
2291 .module = THIS_MODULE,
2292 .name = CSIO_DRV_DESC,
2293 .proc_name = KBUILD_MODNAME,
2294 .queuecommand = csio_queuecommand,
2295 .eh_timed_out = fc_eh_timed_out,
2296 .eh_abort_handler = csio_eh_abort_handler,
2297 .eh_device_reset_handler = csio_eh_lun_reset_handler,
2298 .slave_alloc = csio_slave_alloc,
2299 .slave_configure = csio_slave_configure,
2300 .slave_destroy = csio_slave_destroy,
2301 .scan_finished = csio_scan_finished,
2302 .this_id = -1,
2303 .sg_tablesize = CSIO_SCSI_MAX_SGE,
2304 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
2305 .shost_groups = csio_fcoe_vport_groups,
2306 .max_sectors = CSIO_MAX_SECTOR_SIZE,
2307 };
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320 static int
2321 csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
2322 int buf_size, int num_buf)
2323 {
2324 int n = 0;
2325 struct list_head *tmp;
2326 struct csio_dma_buf *ddp_desc = NULL;
2327 uint32_t unit_size = 0;
2328
2329 if (!num_buf)
2330 return 0;
2331
2332 if (!buf_size)
2333 return -EINVAL;
2334
2335 INIT_LIST_HEAD(&scm->ddp_freelist);
2336
2337
2338 buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
2339
2340 for (n = 0; n < num_buf; n++) {
2341
2342 unit_size = buf_size;
2343 ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
2344 if (!ddp_desc) {
2345 csio_err(hw,
2346 "Failed to allocate ddp descriptors,"
2347 " Num allocated = %d.\n",
2348 scm->stats.n_free_ddp);
2349 goto no_mem;
2350 }
2351
2352
2353 ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
2354 &ddp_desc->paddr, GFP_KERNEL);
2355 if (!ddp_desc->vaddr) {
2356 csio_err(hw,
2357 "SCSI response DMA buffer (ddp) allocation"
2358 " failed!\n");
2359 kfree(ddp_desc);
2360 goto no_mem;
2361 }
2362
2363 ddp_desc->len = unit_size;
2364
2365
2366 list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
2367 CSIO_INC_STATS(scm, n_free_ddp);
2368 }
2369
2370 return 0;
2371 no_mem:
2372
2373 list_for_each(tmp, &scm->ddp_freelist) {
2374 ddp_desc = (struct csio_dma_buf *) tmp;
2375 tmp = csio_list_prev(tmp);
2376 dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2377 ddp_desc->vaddr, ddp_desc->paddr);
2378 list_del_init(&ddp_desc->list);
2379 kfree(ddp_desc);
2380 }
2381 scm->stats.n_free_ddp = 0;
2382
2383 return -ENOMEM;
2384 }
2385
2386
2387
2388
2389
2390
2391
2392
2393 static void
2394 csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
2395 {
2396 struct list_head *tmp;
2397 struct csio_dma_buf *ddp_desc;
2398
2399
2400 list_for_each(tmp, &scm->ddp_freelist) {
2401 ddp_desc = (struct csio_dma_buf *) tmp;
2402 tmp = csio_list_prev(tmp);
2403 dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2404 ddp_desc->vaddr, ddp_desc->paddr);
2405 list_del_init(&ddp_desc->list);
2406 kfree(ddp_desc);
2407 }
2408 scm->stats.n_free_ddp = 0;
2409 }
2410
2411
2412
2413
2414
2415
2416
2417 int
2418 csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
2419 {
2420 int i;
2421 struct csio_ioreq *ioreq;
2422 struct csio_dma_buf *dma_buf;
2423
2424 INIT_LIST_HEAD(&scm->active_q);
2425 scm->hw = hw;
2426
2427 scm->proto_cmd_len = sizeof(struct fcp_cmnd);
2428 scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
2429 scm->max_sge = CSIO_SCSI_MAX_SGE;
2430
2431 spin_lock_init(&scm->freelist_lock);
2432
2433
2434 INIT_LIST_HEAD(&scm->ioreq_freelist);
2435 for (i = 0; i < csio_scsi_ioreqs; i++) {
2436
2437 ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
2438 if (!ioreq) {
2439 csio_err(hw,
2440 "I/O request element allocation failed, "
2441 " Num allocated = %d.\n",
2442 scm->stats.n_free_ioreq);
2443
2444 goto free_ioreq;
2445 }
2446
2447
2448 dma_buf = &ioreq->dma_buf;
2449 dma_buf->vaddr = dma_pool_alloc(hw->scsi_dma_pool, GFP_KERNEL,
2450 &dma_buf->paddr);
2451 if (!dma_buf->vaddr) {
2452 csio_err(hw,
2453 "SCSI response DMA buffer allocation"
2454 " failed!\n");
2455 kfree(ioreq);
2456 goto free_ioreq;
2457 }
2458
2459 dma_buf->len = scm->proto_rsp_len;
2460
2461
2462 csio_init_state(&ioreq->sm, csio_scsis_uninit);
2463 INIT_LIST_HEAD(&ioreq->gen_list);
2464 init_completion(&ioreq->cmplobj);
2465
2466 list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
2467 CSIO_INC_STATS(scm, n_free_ioreq);
2468 }
2469
2470 if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
2471 goto free_ioreq;
2472
2473 return 0;
2474
2475 free_ioreq:
2476
2477
2478
2479
2480 while (!list_empty(&scm->ioreq_freelist)) {
2481 struct csio_sm *tmp;
2482
2483 tmp = list_first_entry(&scm->ioreq_freelist,
2484 struct csio_sm, sm_list);
2485 list_del_init(&tmp->sm_list);
2486 ioreq = (struct csio_ioreq *)tmp;
2487
2488 dma_buf = &ioreq->dma_buf;
2489 dma_pool_free(hw->scsi_dma_pool, dma_buf->vaddr,
2490 dma_buf->paddr);
2491
2492 kfree(ioreq);
2493 }
2494
2495 scm->stats.n_free_ioreq = 0;
2496
2497 return -ENOMEM;
2498 }
2499
2500
2501
2502
2503
2504
2505 void
2506 csio_scsim_exit(struct csio_scsim *scm)
2507 {
2508 struct csio_ioreq *ioreq;
2509 struct csio_dma_buf *dma_buf;
2510
2511 while (!list_empty(&scm->ioreq_freelist)) {
2512 struct csio_sm *tmp;
2513
2514 tmp = list_first_entry(&scm->ioreq_freelist,
2515 struct csio_sm, sm_list);
2516 list_del_init(&tmp->sm_list);
2517 ioreq = (struct csio_ioreq *)tmp;
2518
2519 dma_buf = &ioreq->dma_buf;
2520 dma_pool_free(scm->hw->scsi_dma_pool, dma_buf->vaddr,
2521 dma_buf->paddr);
2522
2523 kfree(ioreq);
2524 }
2525
2526 scm->stats.n_free_ioreq = 0;
2527
2528 csio_scsi_free_ddp_bufs(scm, scm->hw);
2529 }