0001
0002
0003
0004
0005
0006
0007 #include "efct_driver.h"
0008 #include "efct_unsol.h"
0009
0010 #define frame_printf(efct, hdr, fmt, ...) \
0011 do { \
0012 char s_id_text[16]; \
0013 efc_node_fcid_display(ntoh24((hdr)->fh_s_id), \
0014 s_id_text, sizeof(s_id_text)); \
0015 efc_log_debug(efct, "[%06x.%s] %02x/%04x/%04x: " fmt, \
0016 ntoh24((hdr)->fh_d_id), s_id_text, \
0017 (hdr)->fh_r_ctl, be16_to_cpu((hdr)->fh_ox_id), \
0018 be16_to_cpu((hdr)->fh_rx_id), ##__VA_ARGS__); \
0019 } while (0)
0020
0021 static struct efct_node *
0022 efct_node_find(struct efct *efct, u32 port_id, u32 node_id)
0023 {
0024 struct efct_node *node;
0025 u64 id = (u64)port_id << 32 | node_id;
0026
0027
0028
0029
0030
0031
0032 node = xa_load(&efct->lookup, id);
0033 if (node)
0034 kref_get(&node->ref);
0035
0036 return node;
0037 }
0038
0039 static int
0040 efct_dispatch_frame(struct efct *efct, struct efc_hw_sequence *seq)
0041 {
0042 struct efct_node *node;
0043 struct fc_frame_header *hdr;
0044 u32 s_id, d_id;
0045
0046 hdr = seq->header->dma.virt;
0047
0048
0049 s_id = ntoh24(hdr->fh_s_id);
0050 d_id = ntoh24(hdr->fh_d_id);
0051
0052 if (!(hdr->fh_type == FC_TYPE_FCP || hdr->fh_type == FC_TYPE_BLS))
0053 return -EIO;
0054
0055 if (hdr->fh_type == FC_TYPE_FCP) {
0056 node = efct_node_find(efct, d_id, s_id);
0057 if (!node) {
0058 efc_log_err(efct,
0059 "Node not found, drop cmd d_id:%x s_id:%x\n",
0060 d_id, s_id);
0061 efct_hw_sequence_free(&efct->hw, seq);
0062 return 0;
0063 }
0064
0065 efct_dispatch_fcp_cmd(node, seq);
0066 } else {
0067 node = efct_node_find(efct, d_id, s_id);
0068 if (!node) {
0069 efc_log_err(efct, "ABTS: Node not found, d_id:%x s_id:%x\n",
0070 d_id, s_id);
0071 return -EIO;
0072 }
0073
0074 efc_log_err(efct, "Received ABTS for Node:%p\n", node);
0075 efct_node_recv_abts_frame(node, seq);
0076 }
0077
0078 kref_put(&node->ref, node->release);
0079 efct_hw_sequence_free(&efct->hw, seq);
0080 return 0;
0081 }
0082
0083 int
0084 efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq)
0085 {
0086 struct efct *efct = arg;
0087
0088
0089 if (!efct_dispatch_frame(efct, seq))
0090 return 0;
0091
0092
0093 efc_dispatch_frame(efct->efcport, seq);
0094 return 0;
0095 }
0096
0097 static int
0098 efct_fc_tmf_rejected_cb(struct efct_io *io,
0099 enum efct_scsi_io_status scsi_status,
0100 u32 flags, void *arg)
0101 {
0102 efct_scsi_io_free(io);
0103 return 0;
0104 }
0105
0106 static void
0107 efct_dispatch_unsol_tmf(struct efct_io *io, u8 tm_flags, u32 lun)
0108 {
0109 u32 i;
0110 struct {
0111 u32 mask;
0112 enum efct_scsi_tmf_cmd cmd;
0113 } tmflist[] = {
0114 {FCP_TMF_ABT_TASK_SET, EFCT_SCSI_TMF_ABORT_TASK_SET},
0115 {FCP_TMF_CLR_TASK_SET, EFCT_SCSI_TMF_CLEAR_TASK_SET},
0116 {FCP_TMF_LUN_RESET, EFCT_SCSI_TMF_LOGICAL_UNIT_RESET},
0117 {FCP_TMF_TGT_RESET, EFCT_SCSI_TMF_TARGET_RESET},
0118 {FCP_TMF_CLR_ACA, EFCT_SCSI_TMF_CLEAR_ACA} };
0119
0120 io->exp_xfer_len = 0;
0121
0122 for (i = 0; i < ARRAY_SIZE(tmflist); i++) {
0123 if (tmflist[i].mask & tm_flags) {
0124 io->tmf_cmd = tmflist[i].cmd;
0125 efct_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
0126 break;
0127 }
0128 }
0129 if (i == ARRAY_SIZE(tmflist)) {
0130
0131 efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags);
0132 efct_scsi_send_tmf_resp(io, EFCT_SCSI_TMF_FUNCTION_REJECTED,
0133 NULL, efct_fc_tmf_rejected_cb, NULL);
0134 }
0135 }
0136
0137 static int
0138 efct_validate_fcp_cmd(struct efct *efct, struct efc_hw_sequence *seq)
0139 {
0140
0141
0142
0143
0144
0145
0146 if (seq->payload->dma.len < sizeof(struct fcp_cmnd)) {
0147 struct fc_frame_header *fchdr = seq->header->dma.virt;
0148
0149 efc_log_debug(efct,
0150 "drop ox_id %04x payload (%zd) less than (%zd)\n",
0151 be16_to_cpu(fchdr->fh_ox_id),
0152 seq->payload->dma.len, sizeof(struct fcp_cmnd));
0153 return -EIO;
0154 }
0155 return 0;
0156 }
0157
0158 static void
0159 efct_populate_io_fcp_cmd(struct efct_io *io, struct fcp_cmnd *cmnd,
0160 struct fc_frame_header *fchdr, bool sit)
0161 {
0162 io->init_task_tag = be16_to_cpu(fchdr->fh_ox_id);
0163
0164 io->exp_xfer_len = be32_to_cpu(cmnd->fc_dl);
0165 io->transferred = 0;
0166
0167
0168
0169
0170
0171
0172 if (ntoh24(fchdr->fh_f_ctl) & FC_FC_RES_B17)
0173 io->cs_ctl = fchdr->fh_cs_ctl;
0174 else
0175 io->cs_ctl = 0;
0176
0177 io->seq_init = sit;
0178 }
0179
0180 static u32
0181 efct_get_flags_fcp_cmd(struct fcp_cmnd *cmnd)
0182 {
0183 u32 flags = 0;
0184
0185 switch (cmnd->fc_pri_ta & FCP_PTA_MASK) {
0186 case FCP_PTA_SIMPLE:
0187 flags |= EFCT_SCSI_CMD_SIMPLE;
0188 break;
0189 case FCP_PTA_HEADQ:
0190 flags |= EFCT_SCSI_CMD_HEAD_OF_QUEUE;
0191 break;
0192 case FCP_PTA_ORDERED:
0193 flags |= EFCT_SCSI_CMD_ORDERED;
0194 break;
0195 case FCP_PTA_ACA:
0196 flags |= EFCT_SCSI_CMD_ACA;
0197 break;
0198 }
0199 if (cmnd->fc_flags & FCP_CFL_WRDATA)
0200 flags |= EFCT_SCSI_CMD_DIR_IN;
0201 if (cmnd->fc_flags & FCP_CFL_RDDATA)
0202 flags |= EFCT_SCSI_CMD_DIR_OUT;
0203
0204 return flags;
0205 }
0206
0207 static void
0208 efct_sframe_common_send_cb(void *arg, u8 *cqe, int status)
0209 {
0210 struct efct_hw_send_frame_context *ctx = arg;
0211 struct efct_hw *hw = ctx->hw;
0212
0213
0214 efct_hw_reqtag_free(hw, ctx->wqcb);
0215
0216
0217 efct_hw_sequence_free(hw, ctx->seq);
0218 }
0219
0220 static int
0221 efct_sframe_common_send(struct efct_node *node,
0222 struct efc_hw_sequence *seq,
0223 enum fc_rctl r_ctl, u32 f_ctl,
0224 u8 type, void *payload, u32 payload_len)
0225 {
0226 struct efct *efct = node->efct;
0227 struct efct_hw *hw = &efct->hw;
0228 int rc = 0;
0229 struct fc_frame_header *req_hdr = seq->header->dma.virt;
0230 struct fc_frame_header hdr;
0231 struct efct_hw_send_frame_context *ctx;
0232
0233 u32 heap_size = seq->payload->dma.size;
0234 uintptr_t heap_phys_base = seq->payload->dma.phys;
0235 u8 *heap_virt_base = seq->payload->dma.virt;
0236 u32 heap_offset = 0;
0237
0238
0239 memset(&hdr, 0, sizeof(hdr));
0240 hdr.fh_r_ctl = r_ctl;
0241
0242 memcpy(hdr.fh_d_id, req_hdr->fh_s_id, sizeof(hdr.fh_d_id));
0243 memcpy(hdr.fh_s_id, req_hdr->fh_d_id, sizeof(hdr.fh_s_id));
0244 hdr.fh_type = type;
0245 hton24(hdr.fh_f_ctl, f_ctl);
0246 hdr.fh_ox_id = req_hdr->fh_ox_id;
0247 hdr.fh_rx_id = req_hdr->fh_rx_id;
0248 hdr.fh_cs_ctl = 0;
0249 hdr.fh_df_ctl = 0;
0250 hdr.fh_seq_cnt = 0;
0251 hdr.fh_parm_offset = 0;
0252
0253
0254
0255
0256
0257 hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id);
0258 hdr.fh_seq_id--;
0259
0260
0261 ctx = (void *)(heap_virt_base + heap_offset);
0262 heap_offset += sizeof(*ctx);
0263 if (heap_offset > heap_size) {
0264 efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
0265 heap_offset, heap_size);
0266 return -EIO;
0267 }
0268
0269 memset(ctx, 0, sizeof(*ctx));
0270
0271
0272 ctx->seq = seq;
0273
0274
0275 ctx->payload.phys = heap_phys_base + heap_offset;
0276 ctx->payload.virt = heap_virt_base + heap_offset;
0277 ctx->payload.size = payload_len;
0278 ctx->payload.len = payload_len;
0279 heap_offset += payload_len;
0280 if (heap_offset > heap_size) {
0281 efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
0282 heap_offset, heap_size);
0283 return -EIO;
0284 }
0285
0286
0287 memcpy(ctx->payload.virt, payload, payload_len);
0288
0289
0290 rc = efct_hw_send_frame(&efct->hw, (void *)&hdr, FC_SOF_N3,
0291 FC_EOF_T, &ctx->payload, ctx,
0292 efct_sframe_common_send_cb, ctx);
0293 if (rc)
0294 efc_log_debug(efct, "efct_hw_send_frame failed: %d\n", rc);
0295
0296 return rc;
0297 }
0298
0299 static int
0300 efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq,
0301 void *rsp, u32 rsp_len)
0302 {
0303 return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS,
0304 FC_FC_EX_CTX |
0305 FC_FC_LAST_SEQ |
0306 FC_FC_END_SEQ |
0307 FC_FC_SEQ_INIT,
0308 FC_TYPE_FCP,
0309 rsp, rsp_len);
0310 }
0311
0312 static int
0313 efct_sframe_send_task_set_full_or_busy(struct efct_node *node,
0314 struct efc_hw_sequence *seq)
0315 {
0316 struct fcp_resp_with_ext fcprsp;
0317 struct fcp_cmnd *fcpcmd = seq->payload->dma.virt;
0318 int rc = 0;
0319 unsigned long flags = 0;
0320 struct efct *efct = node->efct;
0321
0322
0323 memset(&fcprsp, 0, sizeof(fcprsp));
0324 spin_lock_irqsave(&node->active_ios_lock, flags);
0325 fcprsp.resp.fr_status = list_empty(&node->active_ios) ?
0326 SAM_STAT_BUSY : SAM_STAT_TASK_SET_FULL;
0327 spin_unlock_irqrestore(&node->active_ios_lock, flags);
0328 *((u32 *)&fcprsp.ext.fr_resid) = be32_to_cpu(fcpcmd->fc_dl);
0329
0330
0331 rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp));
0332 if (rc)
0333 efc_log_debug(efct, "efct_sframe_send_fcp_rsp failed %d\n", rc);
0334
0335 return rc;
0336 }
0337
0338 int
0339 efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq)
0340 {
0341 struct efct *efct = node->efct;
0342 struct fc_frame_header *fchdr = seq->header->dma.virt;
0343 struct fcp_cmnd *cmnd = NULL;
0344 struct efct_io *io = NULL;
0345 u32 lun;
0346
0347 if (!seq->payload) {
0348 efc_log_err(efct, "Sequence payload is NULL.\n");
0349 return -EIO;
0350 }
0351
0352 cmnd = seq->payload->dma.virt;
0353
0354
0355 if (efct_validate_fcp_cmd(efct, seq))
0356 return -EIO;
0357
0358 lun = scsilun_to_int(&cmnd->fc_lun);
0359 if (lun == U32_MAX)
0360 return -EIO;
0361
0362 io = efct_scsi_io_alloc(node);
0363 if (!io) {
0364 int rc;
0365
0366
0367 rc = efct_sframe_send_task_set_full_or_busy(node, seq);
0368 if (rc)
0369 efc_log_err(efct, "Failed to send busy task: %d\n", rc);
0370
0371 return rc;
0372 }
0373
0374 io->hw_priv = seq->hw_priv;
0375
0376 io->app_id = 0;
0377
0378
0379 efct_populate_io_fcp_cmd(io, cmnd, fchdr, true);
0380
0381 if (cmnd->fc_tm_flags) {
0382 efct_dispatch_unsol_tmf(io, cmnd->fc_tm_flags, lun);
0383 } else {
0384 u32 flags = efct_get_flags_fcp_cmd(cmnd);
0385
0386 if (cmnd->fc_flags & FCP_CFL_LEN_MASK) {
0387 efc_log_err(efct, "Additional CDB not supported\n");
0388 return -EIO;
0389 }
0390
0391
0392
0393
0394 efct_scsi_recv_cmd(io, lun, cmnd->fc_cdb,
0395 sizeof(cmnd->fc_cdb), flags);
0396 }
0397
0398 return 0;
0399 }
0400
0401 static int
0402 efct_process_abts(struct efct_io *io, struct fc_frame_header *hdr)
0403 {
0404 struct efct_node *node = io->node;
0405 struct efct *efct = io->efct;
0406 u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
0407 u16 rx_id = be16_to_cpu(hdr->fh_rx_id);
0408 struct efct_io *abortio;
0409
0410
0411 abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id);
0412
0413 if (abortio) {
0414
0415
0416
0417 efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n",
0418 ox_id, rx_id);
0419
0420
0421
0422
0423
0424
0425 io->display_name = "abts";
0426 io->init_task_tag = ox_id;
0427
0428
0429
0430
0431
0432
0433
0434 io->abort_rx_id = rx_id;
0435
0436
0437 io->tmf_cmd = EFCT_SCSI_TMF_ABORT_TASK;
0438 efct_scsi_recv_tmf(io, abortio->tgt_io.lun,
0439 EFCT_SCSI_TMF_ABORT_TASK, abortio, 0);
0440
0441
0442
0443
0444
0445
0446 kref_put(&abortio->ref, abortio->release);
0447 } else {
0448
0449
0450
0451
0452
0453 efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n",
0454 ox_id);
0455
0456
0457 efct_bls_send_rjt(io, hdr);
0458 }
0459 return 0;
0460 }
0461
0462 int
0463 efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq)
0464 {
0465 struct efct *efct = node->efct;
0466 struct fc_frame_header *hdr = seq->header->dma.virt;
0467 struct efct_io *io = NULL;
0468
0469 node->abort_cnt++;
0470 io = efct_scsi_io_alloc(node);
0471 if (io) {
0472 io->hw_priv = seq->hw_priv;
0473
0474 io->seq_init = 1;
0475
0476
0477 io->efct = efct;
0478 io->node = node;
0479 io->cmd_tgt = true;
0480
0481 efct_process_abts(io, seq->header->dma.virt);
0482 } else {
0483 efc_log_err(efct,
0484 "SCSI IO allocation failed for ABTS received ");
0485 efc_log_err(efct, "s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
0486 ntoh24(hdr->fh_s_id), ntoh24(hdr->fh_d_id),
0487 be16_to_cpu(hdr->fh_ox_id),
0488 be16_to_cpu(hdr->fh_rx_id));
0489 }
0490
0491 return 0;
0492 }