0001
0002
0003
0004
0005
0006
0007 #include "efct_driver.h"
0008 #include "efct_hw.h"
0009 #include "efct_unsol.h"
0010
0011 struct efct_hw_link_stat_cb_arg {
0012 void (*cb)(int status, u32 num_counters,
0013 struct efct_hw_link_stat_counts *counters, void *arg);
0014 void *arg;
0015 };
0016
0017 struct efct_hw_host_stat_cb_arg {
0018 void (*cb)(int status, u32 num_counters,
0019 struct efct_hw_host_stat_counts *counters, void *arg);
0020 void *arg;
0021 };
0022
0023 struct efct_hw_fw_wr_cb_arg {
0024 void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
0025 void *arg;
0026 };
0027
0028 struct efct_mbox_rqst_ctx {
0029 int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
0030 void *arg;
0031 };
0032
0033 static int
0034 efct_hw_link_event_init(struct efct_hw *hw)
0035 {
0036 hw->link.status = SLI4_LINK_STATUS_MAX;
0037 hw->link.topology = SLI4_LINK_TOPO_NONE;
0038 hw->link.medium = SLI4_LINK_MEDIUM_MAX;
0039 hw->link.speed = 0;
0040 hw->link.loop_map = NULL;
0041 hw->link.fc_id = U32_MAX;
0042
0043 return 0;
0044 }
0045
0046 static int
0047 efct_hw_read_max_dump_size(struct efct_hw *hw)
0048 {
0049 u8 buf[SLI4_BMBX_SIZE];
0050 struct efct *efct = hw->os;
0051 int rc = 0;
0052 struct sli4_rsp_cmn_set_dump_location *rsp;
0053
0054
0055 if (PCI_FUNC(efct->pci->devfn) != 0)
0056 return rc;
0057
0058 if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
0059 return -EIO;
0060
0061 rsp = (struct sli4_rsp_cmn_set_dump_location *)
0062 (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
0063
0064 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
0065 if (rc != 0) {
0066 efc_log_debug(hw->os, "set dump location cmd failed\n");
0067 return rc;
0068 }
0069
0070 hw->dump_size =
0071 le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
0072
0073 efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size);
0074
0075 return rc;
0076 }
0077
0078 static int
0079 __efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
0080 {
0081 struct sli4_cmd_read_topology *read_topo =
0082 (struct sli4_cmd_read_topology *)mqe;
0083 u8 speed;
0084 struct efc_domain_record drec = {0};
0085 struct efct *efct = hw->os;
0086
0087 if (status || le16_to_cpu(read_topo->hdr.status)) {
0088 efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
0089 le16_to_cpu(read_topo->hdr.status));
0090 return -EIO;
0091 }
0092
0093 switch (le32_to_cpu(read_topo->dw2_attentype) &
0094 SLI4_READTOPO_ATTEN_TYPE) {
0095 case SLI4_READ_TOPOLOGY_LINK_UP:
0096 hw->link.status = SLI4_LINK_STATUS_UP;
0097 break;
0098 case SLI4_READ_TOPOLOGY_LINK_DOWN:
0099 hw->link.status = SLI4_LINK_STATUS_DOWN;
0100 break;
0101 case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
0102 hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
0103 break;
0104 default:
0105 hw->link.status = SLI4_LINK_STATUS_MAX;
0106 break;
0107 }
0108
0109 switch (read_topo->topology) {
0110 case SLI4_READ_TOPO_NON_FC_AL:
0111 hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
0112 break;
0113 case SLI4_READ_TOPO_FC_AL:
0114 hw->link.topology = SLI4_LINK_TOPO_FC_AL;
0115 if (hw->link.status == SLI4_LINK_STATUS_UP)
0116 hw->link.loop_map = hw->loop_map.virt;
0117 hw->link.fc_id = read_topo->acquired_al_pa;
0118 break;
0119 default:
0120 hw->link.topology = SLI4_LINK_TOPO_MAX;
0121 break;
0122 }
0123
0124 hw->link.medium = SLI4_LINK_MEDIUM_FC;
0125
0126 speed = (le32_to_cpu(read_topo->currlink_state) &
0127 SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
0128 switch (speed) {
0129 case SLI4_READ_TOPOLOGY_SPEED_1G:
0130 hw->link.speed = 1 * 1000;
0131 break;
0132 case SLI4_READ_TOPOLOGY_SPEED_2G:
0133 hw->link.speed = 2 * 1000;
0134 break;
0135 case SLI4_READ_TOPOLOGY_SPEED_4G:
0136 hw->link.speed = 4 * 1000;
0137 break;
0138 case SLI4_READ_TOPOLOGY_SPEED_8G:
0139 hw->link.speed = 8 * 1000;
0140 break;
0141 case SLI4_READ_TOPOLOGY_SPEED_16G:
0142 hw->link.speed = 16 * 1000;
0143 break;
0144 case SLI4_READ_TOPOLOGY_SPEED_32G:
0145 hw->link.speed = 32 * 1000;
0146 break;
0147 case SLI4_READ_TOPOLOGY_SPEED_64G:
0148 hw->link.speed = 64 * 1000;
0149 break;
0150 case SLI4_READ_TOPOLOGY_SPEED_128G:
0151 hw->link.speed = 128 * 1000;
0152 break;
0153 }
0154
0155 drec.speed = hw->link.speed;
0156 drec.fc_id = hw->link.fc_id;
0157 drec.is_nport = true;
0158 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
0159
0160 return 0;
0161 }
0162
0163 static int
0164 efct_hw_cb_link(void *ctx, void *e)
0165 {
0166 struct efct_hw *hw = ctx;
0167 struct sli4_link_event *event = e;
0168 struct efc_domain *d = NULL;
0169 int rc = 0;
0170 struct efct *efct = hw->os;
0171
0172 efct_hw_link_event_init(hw);
0173
0174 switch (event->status) {
0175 case SLI4_LINK_STATUS_UP:
0176
0177 hw->link = *event;
0178 efct->efcport->link_status = EFC_LINK_STATUS_UP;
0179
0180 if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
0181 struct efc_domain_record drec = {0};
0182
0183 efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
0184 event->speed);
0185 drec.speed = event->speed;
0186 drec.fc_id = event->fc_id;
0187 drec.is_nport = true;
0188 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
0189 &drec);
0190 } else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
0191 u8 buf[SLI4_BMBX_SIZE];
0192
0193 efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
0194 event->speed);
0195
0196 if (!sli_cmd_read_topology(&hw->sli, buf,
0197 &hw->loop_map)) {
0198 rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
0199 __efct_read_topology_cb, NULL);
0200 }
0201
0202 if (rc)
0203 efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
0204 } else {
0205 efc_log_info(hw->os, "%s(%#x), speed is %d\n",
0206 "Link Up, unsupported topology ",
0207 event->topology, event->speed);
0208 }
0209 break;
0210 case SLI4_LINK_STATUS_DOWN:
0211 efc_log_info(hw->os, "Link down\n");
0212
0213 hw->link.status = event->status;
0214 efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
0215
0216 d = efct->efcport->domain;
0217 if (d)
0218 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
0219 break;
0220 default:
0221 efc_log_debug(hw->os, "unhandled link status %#x\n",
0222 event->status);
0223 break;
0224 }
0225
0226 return 0;
0227 }
0228
0229 int
0230 efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
0231 {
0232 u32 i, max_sgl, cpus;
0233
0234 if (hw->hw_setup_called)
0235 return 0;
0236
0237
0238
0239
0240
0241
0242 memset(hw, 0, sizeof(struct efct_hw));
0243
0244 hw->hw_setup_called = true;
0245
0246 hw->os = os;
0247
0248 mutex_init(&hw->bmbx_lock);
0249 spin_lock_init(&hw->cmd_lock);
0250 INIT_LIST_HEAD(&hw->cmd_head);
0251 INIT_LIST_HEAD(&hw->cmd_pending);
0252 hw->cmd_head_count = 0;
0253
0254
0255 hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
0256 sizeof(struct efct_command_ctx));
0257 if (!hw->cmd_ctx_pool) {
0258 efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
0259 return -EIO;
0260 }
0261
0262
0263 hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
0264 sizeof(struct efct_mbox_rqst_ctx));
0265 if (!hw->mbox_rqst_pool) {
0266 efc_log_err(hw->os, "failed to allocate mbox request pool\n");
0267 return -EIO;
0268 }
0269
0270 spin_lock_init(&hw->io_lock);
0271 INIT_LIST_HEAD(&hw->io_inuse);
0272 INIT_LIST_HEAD(&hw->io_free);
0273 INIT_LIST_HEAD(&hw->io_wait_free);
0274
0275 atomic_set(&hw->io_alloc_failed_count, 0);
0276
0277 hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
0278 if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
0279 efc_log_err(hw->os, "SLI setup failed\n");
0280 return -EIO;
0281 }
0282
0283 efct_hw_link_event_init(hw);
0284
0285 sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
0286
0287
0288
0289
0290 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
0291 hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
0292
0293
0294
0295
0296
0297 hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
0298
0299
0300
0301
0302
0303 hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
0304 hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
0305
0306 cpus = num_possible_cpus();
0307 hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
0308
0309 max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
0310 max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
0311 hw->config.n_sgl = max_sgl;
0312
0313 (void)efct_hw_read_max_dump_size(hw);
0314
0315 return 0;
0316 }
0317
0318 static void
0319 efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
0320 {
0321 efc_log_info(hw->os,
0322 "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
0323 j, hw->config.filter_def[j], i, id);
0324 }
0325
0326 static inline void
0327 efct_hw_init_free_io(struct efct_hw_io *io)
0328 {
0329
0330
0331
0332
0333 io->done = NULL;
0334 io->abort_done = NULL;
0335 io->status_saved = false;
0336 io->abort_in_progress = false;
0337 io->type = 0xFFFF;
0338 io->wq = NULL;
0339 }
0340
0341 static bool efct_hw_iotype_is_originator(u16 io_type)
0342 {
0343 switch (io_type) {
0344 case EFCT_HW_FC_CT:
0345 case EFCT_HW_ELS_REQ:
0346 return true;
0347 default:
0348 return false;
0349 }
0350 }
0351
0352 static void
0353 efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
0354 {
0355
0356 io->sgl = &io->def_sgl;
0357 io->sgl_count = io->def_sgl_count;
0358 }
0359
0360 static void
0361 efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
0362 {
0363 struct efct_hw_io *io = arg;
0364 struct efct_hw *hw = io->hw;
0365 struct sli4_fc_wcqe *wcqe = (void *)cqe;
0366 u32 len = 0;
0367 u32 ext = 0;
0368
0369
0370 if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
0371 io->xbusy = false;
0372
0373
0374 switch (io->type) {
0375 case EFCT_HW_BLS_ACC:
0376 case EFCT_HW_BLS_RJT:
0377 break;
0378 case EFCT_HW_ELS_REQ:
0379 sli_fc_els_did(&hw->sli, cqe, &ext);
0380 len = sli_fc_response_length(&hw->sli, cqe);
0381 break;
0382 case EFCT_HW_ELS_RSP:
0383 case EFCT_HW_FC_CT_RSP:
0384 break;
0385 case EFCT_HW_FC_CT:
0386 len = sli_fc_response_length(&hw->sli, cqe);
0387 break;
0388 case EFCT_HW_IO_TARGET_WRITE:
0389 len = sli_fc_io_length(&hw->sli, cqe);
0390 break;
0391 case EFCT_HW_IO_TARGET_READ:
0392 len = sli_fc_io_length(&hw->sli, cqe);
0393 break;
0394 case EFCT_HW_IO_TARGET_RSP:
0395 break;
0396 case EFCT_HW_IO_DNRX_REQUEUE:
0397
0398
0399 break;
0400 default:
0401 efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
0402 io->type, io->indicator);
0403 break;
0404 }
0405 if (status) {
0406 ext = sli_fc_ext_status(&hw->sli, cqe);
0407
0408
0409
0410
0411 if (efct_hw_iotype_is_originator(io->type) &&
0412 wcqe->flags & SLI4_WCQE_XB) {
0413 int rc;
0414
0415 efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
0416 io->indicator, io->reqtag);
0417
0418
0419
0420
0421
0422
0423 rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
0424 if (rc == 0) {
0425
0426
0427
0428
0429 io->status_saved = true;
0430 io->saved_status = status;
0431 io->saved_ext = ext;
0432 io->saved_len = len;
0433 goto exit_efct_hw_wq_process_io;
0434 } else if (rc == -EINPROGRESS) {
0435
0436
0437
0438
0439
0440 efc_log_debug(hw->os, "%s%#x tag=%#x\n",
0441 "abort in progress xri=",
0442 io->indicator, io->reqtag);
0443
0444 } else {
0445
0446
0447
0448 efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
0449 "Failed to abort xri=",
0450 io->indicator, io->reqtag, rc);
0451 }
0452 }
0453 }
0454
0455 if (io->done) {
0456 efct_hw_done_t done = io->done;
0457
0458 io->done = NULL;
0459
0460 if (io->status_saved) {
0461
0462 status = io->saved_status;
0463 len = io->saved_len;
0464 ext = io->saved_ext;
0465 io->status_saved = false;
0466 }
0467
0468
0469 efct_hw_io_restore_sgl(hw, io);
0470 done(io, len, status, ext, io->arg);
0471 }
0472
0473 exit_efct_hw_wq_process_io:
0474 return;
0475 }
0476
0477 static int
0478 efct_hw_setup_io(struct efct_hw *hw)
0479 {
0480 u32 i = 0;
0481 struct efct_hw_io *io = NULL;
0482 uintptr_t xfer_virt = 0;
0483 uintptr_t xfer_phys = 0;
0484 u32 index;
0485 bool new_alloc = true;
0486 struct efc_dma *dma;
0487 struct efct *efct = hw->os;
0488
0489 if (!hw->io) {
0490 hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
0491 if (!hw->io)
0492 return -ENOMEM;
0493
0494 memset(hw->io, 0, hw->config.n_io * sizeof(io));
0495
0496 for (i = 0; i < hw->config.n_io; i++) {
0497 hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
0498 if (!hw->io[i])
0499 goto error;
0500 }
0501
0502
0503 hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
0504 GFP_KERNEL);
0505 if (!hw->wqe_buffs) {
0506 kfree(hw->io);
0507 return -ENOMEM;
0508 }
0509
0510 } else {
0511
0512 new_alloc = false;
0513 }
0514
0515 if (new_alloc) {
0516 dma = &hw->xfer_rdy;
0517 dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
0518 dma->virt = dma_alloc_coherent(&efct->pci->dev,
0519 dma->size, &dma->phys, GFP_KERNEL);
0520 if (!dma->virt)
0521 return -ENOMEM;
0522 }
0523 xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
0524 xfer_phys = hw->xfer_rdy.phys;
0525
0526
0527 for (i = 0; i < hw->config.n_io; i++) {
0528 struct hw_wq_callback *wqcb;
0529
0530 io = hw->io[i];
0531
0532
0533 io->hw = hw;
0534
0535
0536 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
0537
0538
0539 wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
0540 if (!wqcb) {
0541 efc_log_err(hw->os, "can't allocate request tag\n");
0542 return -ENOSPC;
0543 }
0544 io->reqtag = wqcb->instance_index;
0545
0546
0547 efct_hw_init_free_io(io);
0548
0549
0550 io->xbusy = 0;
0551
0552 if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
0553 &io->indicator, &index)) {
0554 efc_log_err(hw->os,
0555 "sli_resource_alloc failed @ %d\n", i);
0556 return -ENOMEM;
0557 }
0558
0559 if (new_alloc) {
0560 dma = &io->def_sgl;
0561 dma->size = hw->config.n_sgl *
0562 sizeof(struct sli4_sge);
0563 dma->virt = dma_alloc_coherent(&efct->pci->dev,
0564 dma->size, &dma->phys,
0565 GFP_KERNEL);
0566 if (!dma->virt) {
0567 efc_log_err(hw->os, "dma_alloc fail %d\n", i);
0568 memset(&io->def_sgl, 0,
0569 sizeof(struct efc_dma));
0570 return -ENOMEM;
0571 }
0572 }
0573 io->def_sgl_count = hw->config.n_sgl;
0574 io->sgl = &io->def_sgl;
0575 io->sgl_count = io->def_sgl_count;
0576
0577 if (hw->xfer_rdy.size) {
0578 io->xfer_rdy.virt = (void *)xfer_virt;
0579 io->xfer_rdy.phys = xfer_phys;
0580 io->xfer_rdy.size = sizeof(struct fcp_txrdy);
0581
0582 xfer_virt += sizeof(struct fcp_txrdy);
0583 xfer_phys += sizeof(struct fcp_txrdy);
0584 }
0585 }
0586
0587 return 0;
0588 error:
0589 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
0590 kfree(hw->io[i]);
0591 hw->io[i] = NULL;
0592 }
0593
0594 kfree(hw->io);
0595 hw->io = NULL;
0596
0597 return -ENOMEM;
0598 }
0599
0600 static int
0601 efct_hw_init_prereg_io(struct efct_hw *hw)
0602 {
0603 u32 i, idx = 0;
0604 struct efct_hw_io *io = NULL;
0605 u8 cmd[SLI4_BMBX_SIZE];
0606 int rc = 0;
0607 u32 n_rem;
0608 u32 n = 0;
0609 u32 sgls_per_request = 256;
0610 struct efc_dma **sgls = NULL;
0611 struct efc_dma req;
0612 struct efct *efct = hw->os;
0613
0614 sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
0615 if (!sgls)
0616 return -ENOMEM;
0617
0618 memset(&req, 0, sizeof(struct efc_dma));
0619 req.size = 32 + sgls_per_request * 16;
0620 req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
0621 GFP_KERNEL);
0622 if (!req.virt) {
0623 kfree(sgls);
0624 return -ENOMEM;
0625 }
0626
0627 for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
0628
0629
0630
0631 u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
0632
0633 for (n = 0; n < min; n++) {
0634
0635 if (n > 0) {
0636 if (hw->io[idx + n]->indicator !=
0637 hw->io[idx + n - 1]->indicator + 1)
0638 break;
0639 }
0640
0641 sgls[n] = hw->io[idx + n]->sgl;
0642 }
0643
0644 if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
0645 hw->io[idx]->indicator, n, sgls, NULL, &req)) {
0646 rc = -EIO;
0647 break;
0648 }
0649
0650 rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
0651 if (rc) {
0652 efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
0653 break;
0654 }
0655
0656
0657 for (i = 0; i < n; i++, idx++) {
0658 io = hw->io[idx];
0659 io->state = EFCT_HW_IO_STATE_FREE;
0660 INIT_LIST_HEAD(&io->list_entry);
0661 list_add_tail(&io->list_entry, &hw->io_free);
0662 }
0663 }
0664
0665 dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
0666 memset(&req, 0, sizeof(struct efc_dma));
0667 kfree(sgls);
0668
0669 return rc;
0670 }
0671
0672 static int
0673 efct_hw_init_io(struct efct_hw *hw)
0674 {
0675 u32 i, idx = 0;
0676 bool prereg = false;
0677 struct efct_hw_io *io = NULL;
0678 int rc = 0;
0679
0680 prereg = hw->sli.params.sgl_pre_registered;
0681
0682 if (prereg)
0683 return efct_hw_init_prereg_io(hw);
0684
0685 for (i = 0; i < hw->config.n_io; i++, idx++) {
0686 io = hw->io[idx];
0687 io->state = EFCT_HW_IO_STATE_FREE;
0688 INIT_LIST_HEAD(&io->list_entry);
0689 list_add_tail(&io->list_entry, &hw->io_free);
0690 }
0691
0692 return rc;
0693 }
0694
0695 static int
0696 efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
0697 {
0698 int rc = 0;
0699 u8 buf[SLI4_BMBX_SIZE];
0700 struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
0701
0702 memset(¶m, 0, sizeof(param));
0703 param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
0704
0705 sli_cmd_common_set_features(&hw->sli, buf,
0706 SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), ¶m);
0707
0708 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
0709 if (rc)
0710 efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
0711 fdt_xfer_hint, rc);
0712 else
0713 efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
0714 le32_to_cpu(param.fdt_xfer_hint));
0715
0716 return rc;
0717 }
0718
0719 static int
0720 efct_hw_config_rq(struct efct_hw *hw)
0721 {
0722 u32 min_rq_count, i, rc;
0723 struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
0724 u8 buf[SLI4_BMBX_SIZE];
0725
0726 efc_log_info(hw->os, "using REG_FCFI standard\n");
0727
0728
0729
0730
0731
0732 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
0733 rq_cfg[i].rq_id = cpu_to_le16(0xffff);
0734 rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
0735 rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
0736 rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
0737 rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
0738 }
0739
0740
0741
0742
0743
0744
0745 min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
0746 hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
0747 for (i = 0; i < min_rq_count; i++) {
0748 struct hw_rq *rq = hw->hw_rq[i];
0749 u32 j;
0750
0751 for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
0752 u32 mask = (rq->filter_mask != 0) ?
0753 rq->filter_mask : 1;
0754
0755 if (!(mask & (1U << j)))
0756 continue;
0757
0758 rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
0759 efct_logfcfi(hw, j, i, rq->hdr->id);
0760 }
0761 }
0762
0763 rc = -EIO;
0764 if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg))
0765 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
0766
0767 if (rc != 0) {
0768 efc_log_err(hw->os, "FCFI registration failed\n");
0769 return rc;
0770 }
0771 hw->fcf_indicator =
0772 le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
0773
0774 return rc;
0775 }
0776
0777 static int
0778 efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
0779 {
0780 u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
0781 struct hw_rq *rq;
0782 struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
0783 struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
0784 u32 rc, i;
0785
0786 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
0787 goto issue_cmd;
0788
0789
0790 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
0791 rq_filter[i].rq_id = cpu_to_le16(0xffff);
0792 rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
0793 rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
0794 rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
0795 rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
0796 }
0797
0798 rq = hw->hw_rq[0];
0799 rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
0800 rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
0801
0802 mrq_bitmask = 0x2;
0803 issue_cmd:
0804 efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
0805 hw->hw_rq_count, hw->config.rq_selection_policy, mode);
0806
0807 rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
0808 hw->config.rq_selection_policy, mrq_bitmask,
0809 hw->hw_mrq_count, rq_filter);
0810 if (rc) {
0811 efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
0812 return -EIO;
0813 }
0814
0815 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
0816
0817 rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
0818
0819 if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
0820 efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
0821 rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
0822 return -EIO;
0823 }
0824
0825 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
0826 hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
0827
0828 return 0;
0829 }
0830
0831 static void
0832 efct_hw_queue_hash_add(struct efct_queue_hash *hash,
0833 u16 id, u16 index)
0834 {
0835 u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
0836
0837
0838
0839
0840
0841 while (hash[hash_index].in_use)
0842 hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
0843
0844
0845 hash[hash_index].id = id;
0846 hash[hash_index].in_use = true;
0847 hash[hash_index].index = index;
0848 }
0849
0850 static int
0851 efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
0852 {
0853 int rc = 0;
0854 u8 buf[SLI4_BMBX_SIZE];
0855 struct sli4_rqst_cmn_set_features_health_check param;
0856 u32 health_check_flag = 0;
0857
0858 memset(¶m, 0, sizeof(param));
0859
0860 if (enable)
0861 health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
0862
0863 if (query)
0864 health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
0865
0866 param.health_check_dword = cpu_to_le32(health_check_flag);
0867
0868
0869 sli_cmd_common_set_features(&hw->sli, buf,
0870 SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), ¶m);
0871
0872 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
0873 if (rc)
0874 efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
0875 else
0876 efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
0877
0878 return rc;
0879 }
0880
0881 int
0882 efct_hw_init(struct efct_hw *hw)
0883 {
0884 int rc;
0885 u32 i = 0;
0886 int rem_count;
0887 unsigned long flags = 0;
0888 struct efct_hw_io *temp;
0889 struct efc_dma *dma;
0890
0891
0892
0893
0894
0895
0896
0897
0898 spin_lock_irqsave(&hw->cmd_lock, flags);
0899 if (!list_empty(&hw->cmd_head)) {
0900 spin_unlock_irqrestore(&hw->cmd_lock, flags);
0901 efc_log_err(hw->os, "command found on cmd list\n");
0902 return -EIO;
0903 }
0904 if (!list_empty(&hw->cmd_pending)) {
0905 spin_unlock_irqrestore(&hw->cmd_lock, flags);
0906 efc_log_err(hw->os, "command found on pending list\n");
0907 return -EIO;
0908 }
0909 spin_unlock_irqrestore(&hw->cmd_lock, flags);
0910
0911
0912 efct_hw_rx_free(hw);
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924 rem_count = 0;
0925 while ((!list_empty(&hw->io_wait_free))) {
0926 rem_count++;
0927 temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
0928 list_entry);
0929 list_del_init(&temp->list_entry);
0930 }
0931 if (rem_count > 0)
0932 efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
0933 rem_count);
0934
0935 rem_count = 0;
0936 while ((!list_empty(&hw->io_inuse))) {
0937 rem_count++;
0938 temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
0939 list_entry);
0940 list_del_init(&temp->list_entry);
0941 }
0942 if (rem_count > 0)
0943 efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
0944 rem_count);
0945
0946 rem_count = 0;
0947 while ((!list_empty(&hw->io_free))) {
0948 rem_count++;
0949 temp = list_first_entry(&hw->io_free, struct efct_hw_io,
0950 list_entry);
0951 list_del_init(&temp->list_entry);
0952 }
0953 if (rem_count > 0)
0954 efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
0955 rem_count);
0956
0957
0958 if (hw->config.n_rq == 1)
0959 hw->sli.features &= (~SLI4_REQFEAT_MRQP);
0960
0961 if (sli_init(&hw->sli)) {
0962 efc_log_err(hw->os, "SLI failed to initialize\n");
0963 return -EIO;
0964 }
0965
0966 if (hw->sliport_healthcheck) {
0967 rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
0968 if (rc != 0) {
0969 efc_log_err(hw->os, "Enable port Health check fail\n");
0970 return rc;
0971 }
0972 }
0973
0974
0975
0976
0977 if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
0978
0979
0980
0981
0982
0983 efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
0984 }
0985
0986
0987 memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
0988 efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
0989 EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
0990
0991 memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
0992 efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
0993 EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
0994
0995 memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
0996 efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
0997 EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
0998
0999 rc = efct_hw_init_queues(hw);
1000 if (rc)
1001 return rc;
1002
1003 rc = efct_hw_map_wq_cpu(hw);
1004 if (rc)
1005 return rc;
1006
1007
1008 rc = efct_hw_rx_allocate(hw);
1009 if (rc) {
1010 efc_log_err(hw->os, "rx_allocate failed\n");
1011 return rc;
1012 }
1013
1014 rc = efct_hw_rx_post(hw);
1015 if (rc) {
1016 efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
1017 return rc;
1018 }
1019
1020 if (hw->config.n_eq == 1) {
1021 rc = efct_hw_config_rq(hw);
1022 if (rc) {
1023 efc_log_err(hw->os, "config rq failed %d\n", rc);
1024 return rc;
1025 }
1026 } else {
1027 rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
1028 if (rc != 0) {
1029 efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
1030 return rc;
1031 }
1032
1033 rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
1034 if (rc != 0) {
1035 efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
1036 return rc;
1037 }
1038 }
1039
1040
1041
1042
1043
1044
1045 hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
1046 if (!hw->wq_reqtag_pool) {
1047 efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n");
1048 return -ENOMEM;
1049 }
1050
1051 rc = efct_hw_setup_io(hw);
1052 if (rc) {
1053 efc_log_err(hw->os, "IO allocation failure\n");
1054 return rc;
1055 }
1056
1057 rc = efct_hw_init_io(hw);
1058 if (rc) {
1059 efc_log_err(hw->os, "IO initialization failure\n");
1060 return rc;
1061 }
1062
1063 dma = &hw->loop_map;
1064 dma->size = SLI4_MIN_LOOP_MAP_BYTES;
1065 dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
1066 GFP_KERNEL);
1067 if (!dma->virt)
1068 return -EIO;
1069
1070
1071
1072
1073
1074 for (i = 0; i < hw->eq_count; i++)
1075 sli_queue_arm(&hw->sli, &hw->eq[i], true);
1076
1077
1078
1079
1080 for (i = 0; i < hw->rq_count; i++)
1081 efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
1082
1083
1084
1085
1086 for (i = 0; i < hw->wq_count; i++)
1087 efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
1088
1089
1090
1091
1092 for (i = 0; i < hw->cq_count; i++) {
1093 efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
1094 sli_queue_arm(&hw->sli, &hw->cq[i], true);
1095 }
1096
1097
1098 for (i = 0; i < hw->hw_rq_count; i++) {
1099 struct hw_rq *rq = hw->hw_rq[i];
1100
1101 hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
1102 }
1103
1104
1105 hw->state = EFCT_HW_STATE_ACTIVE;
1106
1107
1108
1109 hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
1110 if (!hw->hw_wq[0]->send_frame_io)
1111 efc_log_err(hw->os, "alloc for send_frame_io failed\n");
1112
1113
1114 atomic_set(&hw->send_frame_seq_id, 0);
1115
1116 return 0;
1117 }
1118
1119 int
1120 efct_hw_parse_filter(struct efct_hw *hw, void *value)
1121 {
1122 int rc = 0;
1123 char *p = NULL;
1124 char *token;
1125 u32 idx = 0;
1126
1127 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
1128 hw->config.filter_def[idx] = 0;
1129
1130 p = kstrdup(value, GFP_KERNEL);
1131 if (!p || !*p) {
1132 efc_log_err(hw->os, "p is NULL\n");
1133 return -ENOMEM;
1134 }
1135
1136 idx = 0;
1137 while ((token = strsep(&p, ",")) && *token) {
1138 if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
1139 efc_log_err(hw->os, "kstrtoint failed\n");
1140
1141 if (!p || !*p)
1142 break;
1143
1144 if (idx == ARRAY_SIZE(hw->config.filter_def))
1145 break;
1146 }
1147 kfree(p);
1148
1149 return rc;
1150 }
1151
1152 u64
1153 efct_get_wwnn(struct efct_hw *hw)
1154 {
1155 struct sli4 *sli = &hw->sli;
1156 u8 p[8];
1157
1158 memcpy(p, sli->wwnn, sizeof(p));
1159 return get_unaligned_be64(p);
1160 }
1161
1162 u64
1163 efct_get_wwpn(struct efct_hw *hw)
1164 {
1165 struct sli4 *sli = &hw->sli;
1166 u8 p[8];
1167
1168 memcpy(p, sli->wwpn, sizeof(p));
1169 return get_unaligned_be64(p);
1170 }
1171
1172 static struct efc_hw_rq_buffer *
1173 efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
1174 u32 size)
1175 {
1176 struct efct *efct = hw->os;
1177 struct efc_hw_rq_buffer *rq_buf = NULL;
1178 struct efc_hw_rq_buffer *prq;
1179 u32 i;
1180
1181 if (!count)
1182 return NULL;
1183
1184 rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
1185 if (!rq_buf)
1186 return NULL;
1187 memset(rq_buf, 0, sizeof(*rq_buf) * count);
1188
1189 for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
1190 prq->rqindex = rqindex;
1191 prq->dma.size = size;
1192 prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
1193 prq->dma.size,
1194 &prq->dma.phys,
1195 GFP_KERNEL);
1196 if (!prq->dma.virt) {
1197 efc_log_err(hw->os, "DMA allocation failed\n");
1198 kfree(rq_buf);
1199 return NULL;
1200 }
1201 }
1202 return rq_buf;
1203 }
1204
1205 static void
1206 efct_hw_rx_buffer_free(struct efct_hw *hw,
1207 struct efc_hw_rq_buffer *rq_buf,
1208 u32 count)
1209 {
1210 struct efct *efct = hw->os;
1211 u32 i;
1212 struct efc_hw_rq_buffer *prq;
1213
1214 if (rq_buf) {
1215 for (i = 0, prq = rq_buf; i < count; i++, prq++) {
1216 dma_free_coherent(&efct->pci->dev,
1217 prq->dma.size, prq->dma.virt,
1218 prq->dma.phys);
1219 memset(&prq->dma, 0, sizeof(struct efc_dma));
1220 }
1221
1222 kfree(rq_buf);
1223 }
1224 }
1225
1226 int
1227 efct_hw_rx_allocate(struct efct_hw *hw)
1228 {
1229 struct efct *efct = hw->os;
1230 u32 i;
1231 int rc = 0;
1232 u32 rqindex = 0;
1233 u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
1234 u32 payload_size = hw->config.rq_default_buffer_size;
1235
1236 rqindex = 0;
1237
1238 for (i = 0; i < hw->hw_rq_count; i++) {
1239 struct hw_rq *rq = hw->hw_rq[i];
1240
1241
1242 rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
1243 rq->entry_count,
1244 hdr_size);
1245 if (!rq->hdr_buf) {
1246 efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
1247 rc = -EIO;
1248 break;
1249 }
1250
1251 efc_log_debug(hw->os,
1252 "rq[%2d] rq_id %02d header %4d by %4d bytes\n",
1253 i, rq->hdr->id, rq->entry_count, hdr_size);
1254
1255 rqindex++;
1256
1257
1258 rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
1259 rq->entry_count,
1260 payload_size);
1261 if (!rq->payload_buf) {
1262 efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
1263 rc = -EIO;
1264 break;
1265 }
1266 efc_log_debug(hw->os,
1267 "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
1268 i, rq->data->id, rq->entry_count, payload_size);
1269 rqindex++;
1270 }
1271
1272 return rc ? -EIO : 0;
1273 }
1274
1275 int
1276 efct_hw_rx_post(struct efct_hw *hw)
1277 {
1278 u32 i;
1279 u32 idx;
1280 u32 rq_idx;
1281 int rc = 0;
1282
1283 if (!hw->seq_pool) {
1284 u32 count = 0;
1285
1286 for (i = 0; i < hw->hw_rq_count; i++)
1287 count += hw->hw_rq[i]->entry_count;
1288
1289 hw->seq_pool = kmalloc_array(count,
1290 sizeof(struct efc_hw_sequence), GFP_KERNEL);
1291 if (!hw->seq_pool)
1292 return -ENOMEM;
1293 }
1294
1295
1296
1297
1298
1299 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
1300 struct hw_rq *rq = hw->hw_rq[rq_idx];
1301
1302 for (i = 0; i < rq->entry_count - 1; i++) {
1303 struct efc_hw_sequence *seq;
1304
1305 seq = hw->seq_pool + idx;
1306 idx++;
1307 seq->header = &rq->hdr_buf[i];
1308 seq->payload = &rq->payload_buf[i];
1309 rc = efct_hw_sequence_free(hw, seq);
1310 if (rc)
1311 break;
1312 }
1313 if (rc)
1314 break;
1315 }
1316
1317 if (rc && hw->seq_pool)
1318 kfree(hw->seq_pool);
1319
1320 return rc;
1321 }
1322
1323 void
1324 efct_hw_rx_free(struct efct_hw *hw)
1325 {
1326 u32 i;
1327
1328
1329 for (i = 0; i < hw->hw_rq_count; i++) {
1330 struct hw_rq *rq = hw->hw_rq[i];
1331
1332 if (rq) {
1333 efct_hw_rx_buffer_free(hw, rq->hdr_buf,
1334 rq->entry_count);
1335 rq->hdr_buf = NULL;
1336 efct_hw_rx_buffer_free(hw, rq->payload_buf,
1337 rq->entry_count);
1338 rq->payload_buf = NULL;
1339 }
1340 }
1341 }
1342
1343 static int
1344 efct_hw_cmd_submit_pending(struct efct_hw *hw)
1345 {
1346 int rc = 0;
1347
1348
1349
1350
1351 while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
1352 !list_empty(&hw->cmd_pending)) {
1353 struct efct_command_ctx *ctx;
1354
1355 ctx = list_first_entry(&hw->cmd_pending,
1356 struct efct_command_ctx, list_entry);
1357 if (!ctx)
1358 break;
1359
1360 list_del_init(&ctx->list_entry);
1361
1362 list_add_tail(&ctx->list_entry, &hw->cmd_head);
1363 hw->cmd_head_count++;
1364 if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
1365 efc_log_debug(hw->os,
1366 "sli_queue_write failed: %d\n", rc);
1367 rc = -EIO;
1368 break;
1369 }
1370 }
1371 return rc;
1372 }
1373
1374 int
1375 efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
1376 {
1377 int rc = -EIO;
1378 unsigned long flags = 0;
1379 void *bmbx = NULL;
1380
1381
1382
1383
1384
1385 if (sli_fw_error_status(&hw->sli) > 0) {
1386 efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
1387 efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
1388 sli_reg_read_status(&hw->sli),
1389 sli_reg_read_err1(&hw->sli),
1390 sli_reg_read_err2(&hw->sli));
1391
1392 return -EIO;
1393 }
1394
1395
1396
1397
1398
1399
1400
1401 if (opts == EFCT_CMD_POLL) {
1402 mutex_lock(&hw->bmbx_lock);
1403 bmbx = hw->sli.bmbx.virt;
1404
1405 memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
1406
1407 if (sli_bmbx_command(&hw->sli) == 0) {
1408 rc = 0;
1409 memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
1410 }
1411 mutex_unlock(&hw->bmbx_lock);
1412 } else if (opts == EFCT_CMD_NOWAIT) {
1413 struct efct_command_ctx *ctx = NULL;
1414
1415 if (hw->state != EFCT_HW_STATE_ACTIVE) {
1416 efc_log_err(hw->os, "Can't send command, HW state=%d\n",
1417 hw->state);
1418 return -EIO;
1419 }
1420
1421 ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
1422 if (!ctx)
1423 return -ENOSPC;
1424
1425 memset(ctx, 0, sizeof(struct efct_command_ctx));
1426
1427 if (cb) {
1428 ctx->cb = cb;
1429 ctx->arg = arg;
1430 }
1431
1432 memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
1433 ctx->ctx = hw;
1434
1435 spin_lock_irqsave(&hw->cmd_lock, flags);
1436
1437
1438 INIT_LIST_HEAD(&ctx->list_entry);
1439 list_add_tail(&ctx->list_entry, &hw->cmd_pending);
1440
1441
1442 rc = efct_hw_cmd_submit_pending(hw);
1443
1444 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1445 }
1446
1447 return rc;
1448 }
1449
1450 static int
1451 efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
1452 size_t size)
1453 {
1454 struct efct_command_ctx *ctx = NULL;
1455 unsigned long flags = 0;
1456
1457 spin_lock_irqsave(&hw->cmd_lock, flags);
1458 if (!list_empty(&hw->cmd_head)) {
1459 ctx = list_first_entry(&hw->cmd_head,
1460 struct efct_command_ctx, list_entry);
1461 list_del_init(&ctx->list_entry);
1462 }
1463 if (!ctx) {
1464 efc_log_err(hw->os, "no command context\n");
1465 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1466 return -EIO;
1467 }
1468
1469 hw->cmd_head_count--;
1470
1471
1472 efct_hw_cmd_submit_pending(hw);
1473
1474 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1475
1476 if (ctx->cb) {
1477 memcpy(ctx->buf, mqe, size);
1478 ctx->cb(hw, status, ctx->buf, ctx->arg);
1479 }
1480
1481 mempool_free(ctx, hw->cmd_ctx_pool);
1482
1483 return 0;
1484 }
1485
1486 static int
1487 efct_hw_mq_process(struct efct_hw *hw,
1488 int status, struct sli4_queue *mq)
1489 {
1490 u8 mqe[SLI4_BMBX_SIZE];
1491 int rc;
1492
1493 rc = sli_mq_read(&hw->sli, mq, mqe);
1494 if (!rc)
1495 rc = efct_hw_command_process(hw, status, mqe, mq->size);
1496
1497 return rc;
1498 }
1499
1500 static int
1501 efct_hw_command_cancel(struct efct_hw *hw)
1502 {
1503 unsigned long flags = 0;
1504 int rc = 0;
1505
1506 spin_lock_irqsave(&hw->cmd_lock, flags);
1507
1508
1509
1510
1511
1512
1513 while (!list_empty(&hw->cmd_head)) {
1514 u8 mqe[SLI4_BMBX_SIZE] = { 0 };
1515 struct efct_command_ctx *ctx;
1516
1517 ctx = list_first_entry(&hw->cmd_head,
1518 struct efct_command_ctx, list_entry);
1519
1520 efc_log_debug(hw->os, "hung command %08x\n",
1521 !ctx ? U32_MAX : *((u32 *)ctx->buf));
1522 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1523 rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
1524 spin_lock_irqsave(&hw->cmd_lock, flags);
1525 }
1526
1527 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1528
1529 return rc;
1530 }
1531
1532 static void
1533 efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
1534 {
1535 struct efct_mbox_rqst_ctx *ctx = arg;
1536
1537 if (ctx) {
1538 if (ctx->callback)
1539 (*ctx->callback)(hw->os->efcport, status, mqe,
1540 ctx->arg);
1541
1542 mempool_free(ctx, hw->mbox_rqst_pool);
1543 }
1544 }
1545
1546 int
1547 efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
1548 {
1549 struct efct_mbox_rqst_ctx *ctx;
1550 struct efct *efct = base;
1551 struct efct_hw *hw = &efct->hw;
1552 int rc;
1553
1554
1555
1556
1557
1558
1559 ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
1560 if (!ctx)
1561 return -EIO;
1562
1563 ctx->callback = cb;
1564 ctx->arg = arg;
1565
1566 rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
1567 if (rc) {
1568 efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
1569 mempool_free(ctx, hw->mbox_rqst_pool);
1570 return -EIO;
1571 }
1572
1573 return 0;
1574 }
1575
1576 static inline struct efct_hw_io *
1577 _efct_hw_io_alloc(struct efct_hw *hw)
1578 {
1579 struct efct_hw_io *io = NULL;
1580
1581 if (!list_empty(&hw->io_free)) {
1582 io = list_first_entry(&hw->io_free, struct efct_hw_io,
1583 list_entry);
1584 list_del(&io->list_entry);
1585 }
1586 if (io) {
1587 INIT_LIST_HEAD(&io->list_entry);
1588 list_add_tail(&io->list_entry, &hw->io_inuse);
1589 io->state = EFCT_HW_IO_STATE_INUSE;
1590 io->abort_reqtag = U32_MAX;
1591 io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
1592 if (!io->wq) {
1593 efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
1594 raw_smp_processor_id());
1595 io->wq = hw->hw_wq[0];
1596 }
1597 kref_init(&io->ref);
1598 io->release = efct_hw_io_free_internal;
1599 } else {
1600 atomic_add(1, &hw->io_alloc_failed_count);
1601 }
1602
1603 return io;
1604 }
1605
1606 struct efct_hw_io *
1607 efct_hw_io_alloc(struct efct_hw *hw)
1608 {
1609 struct efct_hw_io *io = NULL;
1610 unsigned long flags = 0;
1611
1612 spin_lock_irqsave(&hw->io_lock, flags);
1613 io = _efct_hw_io_alloc(hw);
1614 spin_unlock_irqrestore(&hw->io_lock, flags);
1615
1616 return io;
1617 }
1618
1619 static void
1620 efct_hw_io_free_move_correct_list(struct efct_hw *hw,
1621 struct efct_hw_io *io)
1622 {
1623
1624
1625
1626
1627 if (io->xbusy) {
1628
1629
1630
1631
1632 INIT_LIST_HEAD(&io->list_entry);
1633 list_add_tail(&io->list_entry, &hw->io_wait_free);
1634 io->state = EFCT_HW_IO_STATE_WAIT_FREE;
1635 } else {
1636
1637 INIT_LIST_HEAD(&io->list_entry);
1638 list_add_tail(&io->list_entry, &hw->io_free);
1639 io->state = EFCT_HW_IO_STATE_FREE;
1640 }
1641 }
1642
1643 static inline void
1644 efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
1645 {
1646
1647 efct_hw_init_free_io(io);
1648
1649
1650 efct_hw_io_restore_sgl(hw, io);
1651 }
1652
1653 void
1654 efct_hw_io_free_internal(struct kref *arg)
1655 {
1656 unsigned long flags = 0;
1657 struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref);
1658 struct efct_hw *hw = io->hw;
1659
1660
1661 efct_hw_io_free_common(hw, io);
1662
1663 spin_lock_irqsave(&hw->io_lock, flags);
1664
1665 if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
1666 list_del_init(&io->list_entry);
1667 efct_hw_io_free_move_correct_list(hw, io);
1668 }
1669 spin_unlock_irqrestore(&hw->io_lock, flags);
1670 }
1671
1672 int
1673 efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
1674 {
1675 return kref_put(&io->ref, io->release);
1676 }
1677
1678 struct efct_hw_io *
1679 efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
1680 {
1681 u32 ioindex;
1682
1683 ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
1684 return hw->io[ioindex];
1685 }
1686
1687 int
1688 efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
1689 enum efct_hw_io_type type)
1690 {
1691 struct sli4_sge *data = NULL;
1692 u32 i = 0;
1693 u32 skips = 0;
1694 u32 sge_flags = 0;
1695
1696 if (!io) {
1697 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
1698 return -EIO;
1699 }
1700
1701
1702 io->sgl = &io->def_sgl;
1703 io->sgl_count = io->def_sgl_count;
1704 io->first_data_sge = 0;
1705
1706 memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
1707 io->n_sge = 0;
1708 io->sge_offset = 0;
1709
1710 io->type = type;
1711
1712 data = io->sgl->virt;
1713
1714
1715
1716
1717
1718 switch (type) {
1719 case EFCT_HW_IO_TARGET_WRITE:
1720
1721
1722 sge_flags = le32_to_cpu(data->dw2_flags);
1723 sge_flags &= (~SLI4_SGE_TYPE_MASK);
1724 sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
1725 data->buffer_address_high =
1726 cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
1727 data->buffer_address_low =
1728 cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
1729 data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
1730 data->dw2_flags = cpu_to_le32(sge_flags);
1731 data++;
1732
1733 skips = EFCT_TARGET_WRITE_SKIPS;
1734
1735 io->n_sge = 1;
1736 break;
1737 case EFCT_HW_IO_TARGET_READ:
1738
1739
1740
1741 skips = EFCT_TARGET_READ_SKIPS;
1742 break;
1743 case EFCT_HW_IO_TARGET_RSP:
1744
1745
1746
1747 break;
1748 default:
1749 efc_log_err(hw->os, "unsupported IO type %#x\n", type);
1750 return -EIO;
1751 }
1752
1753
1754
1755
1756 for (i = 0; i < skips; i++) {
1757 sge_flags = le32_to_cpu(data->dw2_flags);
1758 sge_flags &= (~SLI4_SGE_TYPE_MASK);
1759 sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
1760 data->dw2_flags = cpu_to_le32(sge_flags);
1761 data++;
1762 }
1763
1764 io->n_sge += skips;
1765
1766
1767
1768
1769 sge_flags = le32_to_cpu(data->dw2_flags);
1770 sge_flags |= SLI4_SGE_LAST;
1771 data->dw2_flags = cpu_to_le32(sge_flags);
1772
1773 return 0;
1774 }
1775
1776 int
1777 efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
1778 uintptr_t addr, u32 length)
1779 {
1780 struct sli4_sge *data = NULL;
1781 u32 sge_flags = 0;
1782
1783 if (!io || !addr || !length) {
1784 efc_log_err(hw->os,
1785 "bad parameter hw=%p io=%p addr=%lx length=%u\n",
1786 hw, io, addr, length);
1787 return -EIO;
1788 }
1789
1790 if (length > hw->sli.sge_supported_length) {
1791 efc_log_err(hw->os,
1792 "length of SGE %d bigger than allowed %d\n",
1793 length, hw->sli.sge_supported_length);
1794 return -EIO;
1795 }
1796
1797 data = io->sgl->virt;
1798 data += io->n_sge;
1799
1800 sge_flags = le32_to_cpu(data->dw2_flags);
1801 sge_flags &= ~SLI4_SGE_TYPE_MASK;
1802 sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
1803 sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
1804 sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
1805
1806 data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
1807 data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
1808 data->buffer_length = cpu_to_le32(length);
1809
1810
1811
1812
1813
1814
1815 sge_flags |= SLI4_SGE_LAST;
1816 data->dw2_flags = cpu_to_le32(sge_flags);
1817
1818 if (io->n_sge) {
1819 sge_flags = le32_to_cpu(data[-1].dw2_flags);
1820 sge_flags &= ~SLI4_SGE_LAST;
1821 data[-1].dw2_flags = cpu_to_le32(sge_flags);
1822 }
1823
1824
1825 if (io->first_data_sge == 0)
1826 io->first_data_sge = io->n_sge;
1827
1828 io->sge_offset += length;
1829 io->n_sge++;
1830
1831 return 0;
1832 }
1833
1834 void
1835 efct_hw_io_abort_all(struct efct_hw *hw)
1836 {
1837 struct efct_hw_io *io_to_abort = NULL;
1838 struct efct_hw_io *next_io = NULL;
1839
1840 list_for_each_entry_safe(io_to_abort, next_io,
1841 &hw->io_inuse, list_entry) {
1842 efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
1843 }
1844 }
1845
1846 static void
1847 efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
1848 {
1849 struct efct_hw_io *io = arg;
1850 struct efct_hw *hw = io->hw;
1851 u32 ext = 0;
1852 u32 len = 0;
1853 struct hw_wq_callback *wqcb;
1854
1855
1856
1857
1858
1859
1860
1861 ext = sli_fc_ext_status(&hw->sli, cqe);
1862 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
1863 ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
1864 efct_hw_done_t done = io->done;
1865
1866 io->done = NULL;
1867
1868
1869
1870
1871
1872
1873
1874 status = io->saved_status;
1875 len = io->saved_len;
1876 ext = io->saved_ext;
1877 io->status_saved = false;
1878 done(io, len, status, ext, io->arg);
1879 }
1880
1881 if (io->abort_done) {
1882 efct_hw_done_t done = io->abort_done;
1883
1884 io->abort_done = NULL;
1885 done(io, len, status, ext, io->abort_arg);
1886 }
1887
1888
1889 io->abort_in_progress = false;
1890
1891
1892 if (io->abort_reqtag == U32_MAX) {
1893 efc_log_err(hw->os, "HW IO already freed\n");
1894 return;
1895 }
1896
1897 wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
1898 efct_hw_reqtag_free(hw, wqcb);
1899
1900
1901
1902
1903
1904 (void)efct_hw_io_free(hw, io);
1905 }
1906
1907 static void
1908 efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
1909 {
1910 struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
1911
1912 memset(abort, 0, hw->sli.wqe_size);
1913
1914 abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
1915 abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
1916
1917
1918 abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
1919
1920 abort->t_tag = cpu_to_le32(wqe->id);
1921 abort->command = SLI4_WQE_ABORT;
1922 abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
1923
1924 abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
1925
1926 abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
1927 }
1928
1929 int
1930 efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
1931 bool send_abts, void *cb, void *arg)
1932 {
1933 struct hw_wq_callback *wqcb;
1934 unsigned long flags = 0;
1935
1936 if (!io_to_abort) {
1937 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
1938 hw, io_to_abort);
1939 return -EIO;
1940 }
1941
1942 if (hw->state != EFCT_HW_STATE_ACTIVE) {
1943 efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
1944 hw->state);
1945 return -EIO;
1946 }
1947
1948
1949 if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
1950
1951 efc_log_debug(hw->os,
1952 "io not active xri=0x%x tag=0x%x\n",
1953 io_to_abort->indicator, io_to_abort->reqtag);
1954 return -ENOENT;
1955 }
1956
1957
1958 if (!io_to_abort->wq) {
1959 efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
1960 io_to_abort->indicator);
1961
1962 kref_put(&io_to_abort->ref, io_to_abort->release);
1963 return -ENOENT;
1964 }
1965
1966
1967
1968
1969
1970 if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
1971
1972 kref_put(&io_to_abort->ref, io_to_abort->release);
1973 efc_log_debug(hw->os,
1974 "io already being aborted xri=0x%x tag=0x%x\n",
1975 io_to_abort->indicator, io_to_abort->reqtag);
1976 return -EINPROGRESS;
1977 }
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993 io_to_abort->abort_done = cb;
1994 io_to_abort->abort_arg = arg;
1995
1996
1997 wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
1998 if (!wqcb) {
1999 efc_log_err(hw->os, "can't allocate request tag\n");
2000 return -ENOSPC;
2001 }
2002
2003 io_to_abort->abort_reqtag = wqcb->instance_index;
2004 io_to_abort->wqe.send_abts = send_abts;
2005 io_to_abort->wqe.id = io_to_abort->indicator;
2006 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
2007
2008
2009
2010
2011
2012 if (io_to_abort->wq) {
2013 spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
2014 if (io_to_abort->wqe.list_entry.next) {
2015 io_to_abort->wqe.abort_wqe_submit_needed = true;
2016 spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
2017 flags);
2018 return 0;
2019 }
2020 spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
2021 }
2022
2023 efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
2024
2025
2026
2027
2028
2029 if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
2030 io_to_abort->abort_in_progress = false;
2031
2032 kref_put(&io_to_abort->ref, io_to_abort->release);
2033 return -EIO;
2034 }
2035
2036 return 0;
2037 }
2038
2039 void
2040 efct_hw_reqtag_pool_free(struct efct_hw *hw)
2041 {
2042 u32 i;
2043 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
2044 struct hw_wq_callback *wqcb = NULL;
2045
2046 if (reqtag_pool) {
2047 for (i = 0; i < U16_MAX; i++) {
2048 wqcb = reqtag_pool->tags[i];
2049 if (!wqcb)
2050 continue;
2051
2052 kfree(wqcb);
2053 }
2054 kfree(reqtag_pool);
2055 hw->wq_reqtag_pool = NULL;
2056 }
2057 }
2058
2059 struct reqtag_pool *
2060 efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
2061 {
2062 u32 i = 0;
2063 struct reqtag_pool *reqtag_pool;
2064 struct hw_wq_callback *wqcb;
2065
2066 reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
2067 if (!reqtag_pool)
2068 return NULL;
2069
2070 INIT_LIST_HEAD(&reqtag_pool->freelist);
2071
2072 spin_lock_init(&reqtag_pool->lock);
2073 for (i = 0; i < U16_MAX; i++) {
2074 wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
2075 if (!wqcb)
2076 break;
2077
2078 reqtag_pool->tags[i] = wqcb;
2079 wqcb->instance_index = i;
2080 wqcb->callback = NULL;
2081 wqcb->arg = NULL;
2082 INIT_LIST_HEAD(&wqcb->list_entry);
2083 list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
2084 }
2085
2086 return reqtag_pool;
2087 }
2088
2089 struct hw_wq_callback *
2090 efct_hw_reqtag_alloc(struct efct_hw *hw,
2091 void (*callback)(void *arg, u8 *cqe, int status),
2092 void *arg)
2093 {
2094 struct hw_wq_callback *wqcb = NULL;
2095 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
2096 unsigned long flags = 0;
2097
2098 if (!callback)
2099 return wqcb;
2100
2101 spin_lock_irqsave(&reqtag_pool->lock, flags);
2102
2103 if (!list_empty(&reqtag_pool->freelist)) {
2104 wqcb = list_first_entry(&reqtag_pool->freelist,
2105 struct hw_wq_callback, list_entry);
2106 }
2107
2108 if (wqcb) {
2109 list_del_init(&wqcb->list_entry);
2110 spin_unlock_irqrestore(&reqtag_pool->lock, flags);
2111 wqcb->callback = callback;
2112 wqcb->arg = arg;
2113 } else {
2114 spin_unlock_irqrestore(&reqtag_pool->lock, flags);
2115 }
2116
2117 return wqcb;
2118 }
2119
2120 void
2121 efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
2122 {
2123 unsigned long flags = 0;
2124 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
2125
2126 if (!wqcb->callback)
2127 efc_log_err(hw->os, "WQCB is already freed\n");
2128
2129 spin_lock_irqsave(&reqtag_pool->lock, flags);
2130 wqcb->callback = NULL;
2131 wqcb->arg = NULL;
2132 INIT_LIST_HEAD(&wqcb->list_entry);
2133 list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
2134 spin_unlock_irqrestore(&reqtag_pool->lock, flags);
2135 }
2136
2137 struct hw_wq_callback *
2138 efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
2139 {
2140 struct hw_wq_callback *wqcb;
2141
2142 wqcb = hw->wq_reqtag_pool->tags[instance_index];
2143 if (!wqcb)
2144 efc_log_err(hw->os, "wqcb for instance %d is null\n",
2145 instance_index);
2146
2147 return wqcb;
2148 }
2149
2150 int
2151 efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
2152 {
2153 int index = -1;
2154 int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
2155
2156
2157
2158
2159
2160
2161 do {
2162 if (hash[i].in_use && hash[i].id == id)
2163 index = hash[i].index;
2164 else
2165 i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
2166 } while (index == -1 && hash[i].in_use);
2167
2168 return index;
2169 }
2170
2171 int
2172 efct_hw_process(struct efct_hw *hw, u32 vector,
2173 u32 max_isr_time_msec)
2174 {
2175 struct hw_eq *eq;
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188 if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
2189 return 0;
2190
2191
2192 eq = hw->hw_eq[vector];
2193 if (!eq)
2194 return 0;
2195
2196 eq->use_count++;
2197
2198 return efct_hw_eq_process(hw, eq, max_isr_time_msec);
2199 }
2200
2201 int
2202 efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
2203 u32 max_isr_time_msec)
2204 {
2205 u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
2206 u32 tcheck_count;
2207 u64 tstart;
2208 u64 telapsed;
2209 bool done = false;
2210
2211 tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
2212 tstart = jiffies_to_msecs(jiffies);
2213
2214 while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
2215 u16 cq_id = 0;
2216 int rc;
2217
2218 rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2219 if (unlikely(rc)) {
2220 if (rc == SLI4_EQE_STATUS_EQ_FULL) {
2221 u32 i;
2222
2223
2224
2225
2226
2227 for (i = 0; i < hw->cq_count; i++)
2228 efct_hw_cq_process(hw, hw->hw_cq[i]);
2229 continue;
2230 } else {
2231 return rc;
2232 }
2233 } else {
2234 int index;
2235
2236 index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
2237
2238 if (likely(index >= 0))
2239 efct_hw_cq_process(hw, hw->hw_cq[index]);
2240 else
2241 efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2242 }
2243
2244 if (eq->queue->n_posted > eq->queue->posted_limit)
2245 sli_queue_arm(&hw->sli, eq->queue, false);
2246
2247 if (tcheck_count && (--tcheck_count == 0)) {
2248 tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
2249 telapsed = jiffies_to_msecs(jiffies) - tstart;
2250 if (telapsed >= max_isr_time_msec)
2251 done = true;
2252 }
2253 }
2254 sli_queue_eq_arm(&hw->sli, eq->queue, true);
2255
2256 return 0;
2257 }
2258
2259 static int
2260 _efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
2261 {
2262 int queue_rc;
2263
2264
2265 if (wq->wqec_count)
2266 wq->wqec_count--;
2267
2268 if (wq->wqec_count == 0) {
2269 struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
2270
2271 genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
2272 wq->wqec_count = wq->wqec_set_count;
2273 }
2274
2275
2276 wq->free_count--;
2277
2278 queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
2279
2280 return (queue_rc < 0) ? -EIO : 0;
2281 }
2282
2283 static void
2284 hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
2285 {
2286 struct efct_hw_wqe *wqe;
2287 unsigned long flags = 0;
2288
2289 spin_lock_irqsave(&wq->queue->lock, flags);
2290
2291
2292 wq->free_count += update_free_count;
2293
2294 while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
2295 wqe = list_first_entry(&wq->pending_list,
2296 struct efct_hw_wqe, list_entry);
2297 list_del_init(&wqe->list_entry);
2298 _efct_hw_wq_write(wq, wqe);
2299
2300 if (wqe->abort_wqe_submit_needed) {
2301 wqe->abort_wqe_submit_needed = false;
2302 efct_hw_fill_abort_wqe(wq->hw, wqe);
2303 INIT_LIST_HEAD(&wqe->list_entry);
2304 list_add_tail(&wqe->list_entry, &wq->pending_list);
2305 wq->wq_pending_count++;
2306 }
2307 }
2308
2309 spin_unlock_irqrestore(&wq->queue->lock, flags);
2310 }
2311
2312 void
2313 efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
2314 {
2315 u8 cqe[sizeof(struct sli4_mcqe)];
2316 u16 rid = U16_MAX;
2317
2318 enum sli4_qentry ctype;
2319 u32 n_processed = 0;
2320 u32 tstart, telapsed;
2321
2322 tstart = jiffies_to_msecs(jiffies);
2323
2324 while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
2325 int status;
2326
2327 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337 if (status < 0) {
2338 if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
2339
2340
2341
2342
2343 continue;
2344
2345 break;
2346 }
2347
2348 switch (ctype) {
2349 case SLI4_QENTRY_ASYNC:
2350 sli_cqe_async(&hw->sli, cqe);
2351 break;
2352 case SLI4_QENTRY_MQ:
2353
2354
2355
2356
2357 efct_hw_mq_process(hw, status, hw->mq);
2358 break;
2359 case SLI4_QENTRY_WQ:
2360 efct_hw_wq_process(hw, cq, cqe, status, rid);
2361 break;
2362 case SLI4_QENTRY_WQ_RELEASE: {
2363 u32 wq_id = rid;
2364 int index;
2365 struct hw_wq *wq = NULL;
2366
2367 index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
2368
2369 if (likely(index >= 0)) {
2370 wq = hw->hw_wq[index];
2371 } else {
2372 efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
2373 break;
2374 }
2375
2376 hw_wq_submit_pending(wq, wq->wqec_set_count);
2377
2378 break;
2379 }
2380
2381 case SLI4_QENTRY_RQ:
2382 efct_hw_rqpair_process_rq(hw, cq, cqe);
2383 break;
2384 case SLI4_QENTRY_XABT: {
2385 efct_hw_xabt_process(hw, cq, cqe, rid);
2386 break;
2387 }
2388 default:
2389 efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
2390 ctype, rid);
2391 break;
2392 }
2393
2394 n_processed++;
2395 if (n_processed == cq->queue->proc_limit)
2396 break;
2397
2398 if (cq->queue->n_posted >= cq->queue->posted_limit)
2399 sli_queue_arm(&hw->sli, cq->queue, false);
2400 }
2401
2402 sli_queue_arm(&hw->sli, cq->queue, true);
2403
2404 if (n_processed > cq->queue->max_num_processed)
2405 cq->queue->max_num_processed = n_processed;
2406 telapsed = jiffies_to_msecs(jiffies) - tstart;
2407 if (telapsed > cq->queue->max_process_time)
2408 cq->queue->max_process_time = telapsed;
2409 }
2410
2411 void
2412 efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
2413 u8 *cqe, int status, u16 rid)
2414 {
2415 struct hw_wq_callback *wqcb;
2416
2417 if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
2418 if (status)
2419 efc_log_err(hw->os, "reque xri failed, status = %d\n",
2420 status);
2421 return;
2422 }
2423
2424 wqcb = efct_hw_reqtag_get_instance(hw, rid);
2425 if (!wqcb) {
2426 efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
2427 return;
2428 }
2429
2430 if (!wqcb->callback) {
2431 efc_log_err(hw->os, "wqcb callback is NULL\n");
2432 return;
2433 }
2434
2435 (*wqcb->callback)(wqcb->arg, cqe, status);
2436 }
2437
2438 void
2439 efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
2440 u8 *cqe, u16 rid)
2441 {
2442
2443 struct efct_hw_io *io = NULL;
2444 unsigned long flags = 0;
2445
2446 io = efct_hw_io_lookup(hw, rid);
2447 if (!io) {
2448
2449 efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
2450 return;
2451 }
2452
2453 if (!io->xbusy)
2454 efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
2455 else
2456
2457 io->xbusy = false;
2458
2459
2460
2461
2462
2463 if (io->done) {
2464 efct_hw_done_t done = io->done;
2465 void *arg = io->arg;
2466
2467
2468
2469
2470
2471 int status = io->saved_status;
2472 u32 len = io->saved_len;
2473 u32 ext = io->saved_ext;
2474
2475 io->done = NULL;
2476 io->status_saved = false;
2477
2478 done(io, len, status, ext, arg);
2479 }
2480
2481 spin_lock_irqsave(&hw->io_lock, flags);
2482 if (io->state == EFCT_HW_IO_STATE_INUSE ||
2483 io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
2484
2485
2486
2487
2488
2489 if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
2490 io->state = EFCT_HW_IO_STATE_FREE;
2491 list_del_init(&io->list_entry);
2492 efct_hw_io_free_move_correct_list(hw, io);
2493 }
2494 }
2495 spin_unlock_irqrestore(&hw->io_lock, flags);
2496 }
2497
2498 static int
2499 efct_hw_flush(struct efct_hw *hw)
2500 {
2501 u32 i = 0;
2502
2503
2504 for (i = 0; i < hw->eq_count; i++)
2505 efct_hw_process(hw, i, ~0);
2506
2507 return 0;
2508 }
2509
2510 int
2511 efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
2512 {
2513 int rc = 0;
2514 unsigned long flags = 0;
2515
2516 spin_lock_irqsave(&wq->queue->lock, flags);
2517 if (list_empty(&wq->pending_list)) {
2518 if (wq->free_count > 0) {
2519 rc = _efct_hw_wq_write(wq, wqe);
2520 } else {
2521 INIT_LIST_HEAD(&wqe->list_entry);
2522 list_add_tail(&wqe->list_entry, &wq->pending_list);
2523 wq->wq_pending_count++;
2524 }
2525
2526 spin_unlock_irqrestore(&wq->queue->lock, flags);
2527 return rc;
2528 }
2529
2530 INIT_LIST_HEAD(&wqe->list_entry);
2531 list_add_tail(&wqe->list_entry, &wq->pending_list);
2532 wq->wq_pending_count++;
2533 while (wq->free_count > 0) {
2534 wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
2535 list_entry);
2536 if (!wqe)
2537 break;
2538
2539 list_del_init(&wqe->list_entry);
2540 rc = _efct_hw_wq_write(wq, wqe);
2541 if (rc)
2542 break;
2543
2544 if (wqe->abort_wqe_submit_needed) {
2545 wqe->abort_wqe_submit_needed = false;
2546 efct_hw_fill_abort_wqe(wq->hw, wqe);
2547
2548 INIT_LIST_HEAD(&wqe->list_entry);
2549 list_add_tail(&wqe->list_entry, &wq->pending_list);
2550 wq->wq_pending_count++;
2551 }
2552 }
2553
2554 spin_unlock_irqrestore(&wq->queue->lock, flags);
2555
2556 return rc;
2557 }
2558
2559 int
2560 efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
2561 {
2562 struct efct *efct = efc->base;
2563
2564 return efct_hw_bls_send(efct, type, bls, NULL, NULL);
2565 }
2566
2567 int
2568 efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
2569 void *cb, void *arg)
2570 {
2571 struct efct_hw *hw = &efct->hw;
2572 struct efct_hw_io *hio;
2573 struct sli_bls_payload bls;
2574 int rc;
2575
2576 if (hw->state != EFCT_HW_STATE_ACTIVE) {
2577 efc_log_err(hw->os,
2578 "cannot send BLS, HW state=%d\n", hw->state);
2579 return -EIO;
2580 }
2581
2582 hio = efct_hw_io_alloc(hw);
2583 if (!hio) {
2584 efc_log_err(hw->os, "HIO allocation failed\n");
2585 return -EIO;
2586 }
2587
2588 hio->done = cb;
2589 hio->arg = arg;
2590
2591 bls_params->xri = hio->indicator;
2592 bls_params->tag = hio->reqtag;
2593
2594 if (type == FC_RCTL_BA_ACC) {
2595 hio->type = EFCT_HW_BLS_ACC;
2596 bls.type = SLI4_SLI_BLS_ACC;
2597 memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
2598 } else {
2599 hio->type = EFCT_HW_BLS_RJT;
2600 bls.type = SLI4_SLI_BLS_RJT;
2601 memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
2602 }
2603
2604 bls.ox_id = cpu_to_le16(bls_params->ox_id);
2605 bls.rx_id = cpu_to_le16(bls_params->rx_id);
2606
2607 if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
2608 &bls, bls_params)) {
2609 efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
2610 return -EIO;
2611 }
2612
2613 hio->xbusy = true;
2614
2615
2616
2617
2618
2619 hio->wq->use_count++;
2620 rc = efct_hw_wq_write(hio->wq, &hio->wqe);
2621 if (rc >= 0) {
2622
2623 rc = 0;
2624 } else {
2625
2626 efc_log_err(hw->os,
2627 "sli_queue_write failed: %d\n", rc);
2628 hio->xbusy = false;
2629 }
2630
2631 return rc;
2632 }
2633
2634 static int
2635 efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
2636 u32 ext_status, void *arg)
2637 {
2638 struct efc_disc_io *io = arg;
2639
2640 efc_disc_io_complete(io, length, status, ext_status);
2641 return 0;
2642 }
2643
2644 static inline void
2645 efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
2646 {
2647 u8 *cmd = io->req.virt;
2648
2649 params->cmd = *cmd;
2650 params->s_id = io->s_id;
2651 params->d_id = io->d_id;
2652 params->ox_id = io->iparam.els.ox_id;
2653 params->rpi = io->rpi;
2654 params->vpi = io->vpi;
2655 params->rpi_registered = io->rpi_registered;
2656 params->xmit_len = io->xmit_len;
2657 params->rsp_len = io->rsp_len;
2658 params->timeout = io->iparam.els.timeout;
2659 }
2660
2661 static inline void
2662 efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
2663 {
2664 params->r_ctl = io->iparam.ct.r_ctl;
2665 params->type = io->iparam.ct.type;
2666 params->df_ctl = io->iparam.ct.df_ctl;
2667 params->d_id = io->d_id;
2668 params->ox_id = io->iparam.ct.ox_id;
2669 params->rpi = io->rpi;
2670 params->vpi = io->vpi;
2671 params->rpi_registered = io->rpi_registered;
2672 params->xmit_len = io->xmit_len;
2673 params->rsp_len = io->rsp_len;
2674 params->timeout = io->iparam.ct.timeout;
2675 }
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693 int
2694 efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
2695 {
2696 struct efct *efct = efc->base;
2697 struct efct_hw_io *hio;
2698 struct efct_hw *hw = &efct->hw;
2699 struct efc_dma *send = &io->req;
2700 struct efc_dma *receive = &io->rsp;
2701 struct sli4_sge *sge = NULL;
2702 int rc = 0;
2703 u32 len = io->xmit_len;
2704 u32 sge0_flags;
2705 u32 sge1_flags;
2706
2707 hio = efct_hw_io_alloc(hw);
2708 if (!hio) {
2709 pr_err("HIO alloc failed\n");
2710 return -EIO;
2711 }
2712
2713 if (hw->state != EFCT_HW_STATE_ACTIVE) {
2714 efc_log_debug(hw->os,
2715 "cannot send SRRS, HW state=%d\n", hw->state);
2716 return -EIO;
2717 }
2718
2719 hio->done = efct_els_ssrs_send_cb;
2720 hio->arg = io;
2721
2722 sge = hio->sgl->virt;
2723
2724
2725 memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
2726
2727 sge0_flags = le32_to_cpu(sge[0].dw2_flags);
2728 sge1_flags = le32_to_cpu(sge[1].dw2_flags);
2729 if (send->size) {
2730 sge[0].buffer_address_high =
2731 cpu_to_le32(upper_32_bits(send->phys));
2732 sge[0].buffer_address_low =
2733 cpu_to_le32(lower_32_bits(send->phys));
2734
2735 sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
2736
2737 sge[0].buffer_length = cpu_to_le32(len);
2738 }
2739
2740 if (io->io_type == EFC_DISC_IO_ELS_REQ ||
2741 io->io_type == EFC_DISC_IO_CT_REQ) {
2742 sge[1].buffer_address_high =
2743 cpu_to_le32(upper_32_bits(receive->phys));
2744 sge[1].buffer_address_low =
2745 cpu_to_le32(lower_32_bits(receive->phys));
2746
2747 sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
2748 sge1_flags |= SLI4_SGE_LAST;
2749
2750 sge[1].buffer_length = cpu_to_le32(receive->size);
2751 } else {
2752 sge0_flags |= SLI4_SGE_LAST;
2753 }
2754
2755 sge[0].dw2_flags = cpu_to_le32(sge0_flags);
2756 sge[1].dw2_flags = cpu_to_le32(sge1_flags);
2757
2758 switch (io->io_type) {
2759 case EFC_DISC_IO_ELS_REQ: {
2760 struct sli_els_params els_params;
2761
2762 hio->type = EFCT_HW_ELS_REQ;
2763 efct_fill_els_params(io, &els_params);
2764 els_params.xri = hio->indicator;
2765 els_params.tag = hio->reqtag;
2766
2767 if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2768 &els_params)) {
2769 efc_log_err(hw->os, "REQ WQE error\n");
2770 rc = -EIO;
2771 }
2772 break;
2773 }
2774 case EFC_DISC_IO_ELS_RESP: {
2775 struct sli_els_params els_params;
2776
2777 hio->type = EFCT_HW_ELS_RSP;
2778 efct_fill_els_params(io, &els_params);
2779 els_params.xri = hio->indicator;
2780 els_params.tag = hio->reqtag;
2781 if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
2782 &els_params)){
2783 efc_log_err(hw->os, "RSP WQE error\n");
2784 rc = -EIO;
2785 }
2786 break;
2787 }
2788 case EFC_DISC_IO_CT_REQ: {
2789 struct sli_ct_params ct_params;
2790
2791 hio->type = EFCT_HW_FC_CT;
2792 efct_fill_ct_params(io, &ct_params);
2793 ct_params.xri = hio->indicator;
2794 ct_params.tag = hio->reqtag;
2795 if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2796 &ct_params)){
2797 efc_log_err(hw->os, "GEN WQE error\n");
2798 rc = -EIO;
2799 }
2800 break;
2801 }
2802 case EFC_DISC_IO_CT_RESP: {
2803 struct sli_ct_params ct_params;
2804
2805 hio->type = EFCT_HW_FC_CT_RSP;
2806 efct_fill_ct_params(io, &ct_params);
2807 ct_params.xri = hio->indicator;
2808 ct_params.tag = hio->reqtag;
2809 if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2810 &ct_params)){
2811 efc_log_err(hw->os, "XMIT SEQ WQE error\n");
2812 rc = -EIO;
2813 }
2814 break;
2815 }
2816 default:
2817 efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
2818 rc = -EIO;
2819 }
2820
2821 if (rc == 0) {
2822 hio->xbusy = true;
2823
2824
2825
2826
2827
2828 hio->wq->use_count++;
2829 rc = efct_hw_wq_write(hio->wq, &hio->wqe);
2830 if (rc >= 0) {
2831
2832 rc = 0;
2833 } else {
2834
2835 efc_log_err(hw->os,
2836 "sli_queue_write failed: %d\n", rc);
2837 hio->xbusy = false;
2838 }
2839 }
2840
2841 return rc;
2842 }
2843
2844 int
2845 efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
2846 struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
2847 void *cb, void *arg)
2848 {
2849 int rc = 0;
2850 bool send_wqe = true;
2851
2852 if (!io) {
2853 pr_err("bad parm hw=%p io=%p\n", hw, io);
2854 return -EIO;
2855 }
2856
2857 if (hw->state != EFCT_HW_STATE_ACTIVE) {
2858 efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
2859 return -EIO;
2860 }
2861
2862
2863
2864
2865 io->type = type;
2866 io->done = cb;
2867 io->arg = arg;
2868
2869
2870
2871
2872 switch (type) {
2873 case EFCT_HW_IO_TARGET_WRITE: {
2874 u16 *flags = &iparam->fcp_tgt.flags;
2875 struct fcp_txrdy *xfer = io->xfer_rdy.virt;
2876
2877
2878
2879
2880 xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
2881 xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
2882
2883 if (io->xbusy)
2884 *flags |= SLI4_IO_CONTINUATION;
2885 else
2886 *flags &= ~SLI4_IO_CONTINUATION;
2887 iparam->fcp_tgt.xri = io->indicator;
2888 iparam->fcp_tgt.tag = io->reqtag;
2889
2890 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
2891 &io->def_sgl, io->first_data_sge,
2892 SLI4_CQ_DEFAULT,
2893 0, 0, &iparam->fcp_tgt)) {
2894 efc_log_err(hw->os, "TRECEIVE WQE error\n");
2895 rc = -EIO;
2896 }
2897 break;
2898 }
2899 case EFCT_HW_IO_TARGET_READ: {
2900 u16 *flags = &iparam->fcp_tgt.flags;
2901
2902 if (io->xbusy)
2903 *flags |= SLI4_IO_CONTINUATION;
2904 else
2905 *flags &= ~SLI4_IO_CONTINUATION;
2906
2907 iparam->fcp_tgt.xri = io->indicator;
2908 iparam->fcp_tgt.tag = io->reqtag;
2909
2910 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
2911 &io->def_sgl, io->first_data_sge,
2912 SLI4_CQ_DEFAULT,
2913 0, 0, &iparam->fcp_tgt)) {
2914 efc_log_err(hw->os, "TSEND WQE error\n");
2915 rc = -EIO;
2916 }
2917 break;
2918 }
2919 case EFCT_HW_IO_TARGET_RSP: {
2920 u16 *flags = &iparam->fcp_tgt.flags;
2921
2922 if (io->xbusy)
2923 *flags |= SLI4_IO_CONTINUATION;
2924 else
2925 *flags &= ~SLI4_IO_CONTINUATION;
2926
2927 iparam->fcp_tgt.xri = io->indicator;
2928 iparam->fcp_tgt.tag = io->reqtag;
2929
2930 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
2931 &io->def_sgl, SLI4_CQ_DEFAULT,
2932 0, &iparam->fcp_tgt)) {
2933 efc_log_err(hw->os, "TRSP WQE error\n");
2934 rc = -EIO;
2935 }
2936
2937 break;
2938 }
2939 default:
2940 efc_log_err(hw->os, "unsupported IO type %#x\n", type);
2941 rc = -EIO;
2942 }
2943
2944 if (send_wqe && rc == 0) {
2945 io->xbusy = true;
2946
2947
2948
2949
2950
2951 hw->tcmd_wq_submit[io->wq->instance]++;
2952 io->wq->use_count++;
2953 rc = efct_hw_wq_write(io->wq, &io->wqe);
2954 if (rc >= 0) {
2955
2956 rc = 0;
2957 } else {
2958
2959 efc_log_err(hw->os,
2960 "sli_queue_write failed: %d\n", rc);
2961 io->xbusy = false;
2962 }
2963 }
2964
2965 return rc;
2966 }
2967
2968 int
2969 efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
2970 u8 sof, u8 eof, struct efc_dma *payload,
2971 struct efct_hw_send_frame_context *ctx,
2972 void (*callback)(void *arg, u8 *cqe, int status),
2973 void *arg)
2974 {
2975 int rc;
2976 struct efct_hw_wqe *wqe;
2977 u32 xri;
2978 struct hw_wq *wq;
2979
2980 wqe = &ctx->wqe;
2981
2982
2983 ctx->hw = hw;
2984
2985
2986 ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
2987 if (!ctx->wqcb) {
2988 efc_log_err(hw->os, "can't allocate request tag\n");
2989 return -ENOSPC;
2990 }
2991
2992 wq = hw->hw_wq[0];
2993
2994
2995
2996
2997 xri = wq->send_frame_io->indicator;
2998
2999
3000 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
3001 sof, eof, (u32 *)hdr, payload, payload->len,
3002 EFCT_HW_SEND_FRAME_TIMEOUT, xri,
3003 ctx->wqcb->instance_index);
3004 if (rc) {
3005 efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
3006 return -EIO;
3007 }
3008
3009
3010 rc = efct_hw_wq_write(wq, wqe);
3011 if (rc) {
3012 efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
3013 return -EIO;
3014 }
3015
3016 wq->use_count++;
3017
3018 return 0;
3019 }
3020
3021 static int
3022 efct_hw_cb_link_stat(struct efct_hw *hw, int status,
3023 u8 *mqe, void *arg)
3024 {
3025 struct sli4_cmd_read_link_stats *mbox_rsp;
3026 struct efct_hw_link_stat_cb_arg *cb_arg = arg;
3027 struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
3028 u32 num_counters, i;
3029 u32 mbox_rsp_flags = 0;
3030
3031 mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
3032 mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
3033 num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
3034 memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
3035 EFCT_HW_LINK_STAT_MAX);
3036
3037
3038 for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++)
3039 counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2)));
3040
3041 counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
3042 le32_to_cpu(mbox_rsp->linkfail_errcnt);
3043 counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
3044 le32_to_cpu(mbox_rsp->losssync_errcnt);
3045 counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
3046 le32_to_cpu(mbox_rsp->losssignal_errcnt);
3047 counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
3048 le32_to_cpu(mbox_rsp->primseq_errcnt);
3049 counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
3050 le32_to_cpu(mbox_rsp->inval_txword_errcnt);
3051 counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
3052 le32_to_cpu(mbox_rsp->crc_errcnt);
3053 counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
3054 le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
3055 counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
3056 le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
3057 counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
3058 le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
3059 counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
3060 le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
3061 counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
3062 le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
3063 counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
3064 le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
3065 counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
3066 le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
3067 counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
3068 le32_to_cpu(mbox_rsp->rx_eofa_cnt);
3069 counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
3070 le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
3071 counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
3072 le32_to_cpu(mbox_rsp->rx_eofni_cnt);
3073 counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
3074 le32_to_cpu(mbox_rsp->rx_soff_cnt);
3075 counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
3076 le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
3077 counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
3078 le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
3079 counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
3080 le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
3081
3082 if (cb_arg) {
3083 if (cb_arg->cb) {
3084 if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
3085 status = le16_to_cpu(mbox_rsp->hdr.status);
3086 cb_arg->cb(status, num_counters, counts, cb_arg->arg);
3087 }
3088
3089 kfree(cb_arg);
3090 }
3091
3092 return 0;
3093 }
3094
3095 int
3096 efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters,
3097 u8 clear_overflow_flags, u8 clear_all_counters,
3098 void (*cb)(int status, u32 num_counters,
3099 struct efct_hw_link_stat_counts *counters,
3100 void *arg),
3101 void *arg)
3102 {
3103 int rc = -EIO;
3104 struct efct_hw_link_stat_cb_arg *cb_arg;
3105 u8 mbxdata[SLI4_BMBX_SIZE];
3106
3107 cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC);
3108 if (!cb_arg)
3109 return -ENOMEM;
3110
3111 cb_arg->cb = cb;
3112 cb_arg->arg = arg;
3113
3114
3115 if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters,
3116 clear_overflow_flags, clear_all_counters))
3117 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
3118 efct_hw_cb_link_stat, cb_arg);
3119
3120 if (rc)
3121 kfree(cb_arg);
3122
3123 return rc;
3124 }
3125
3126 static int
3127 efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg)
3128 {
3129 struct sli4_cmd_read_status *mbox_rsp =
3130 (struct sli4_cmd_read_status *)mqe;
3131 struct efct_hw_host_stat_cb_arg *cb_arg = arg;
3132 struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
3133 u32 num_counters = EFCT_HW_HOST_STAT_MAX;
3134
3135 memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
3136 EFCT_HW_HOST_STAT_MAX);
3137
3138 counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
3139 le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
3140 counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
3141 le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
3142 counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
3143 le32_to_cpu(mbox_rsp->trans_frame_cnt);
3144 counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
3145 le32_to_cpu(mbox_rsp->recv_frame_cnt);
3146 counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
3147 le32_to_cpu(mbox_rsp->trans_seq_cnt);
3148 counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
3149 le32_to_cpu(mbox_rsp->recv_seq_cnt);
3150 counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
3151 le32_to_cpu(mbox_rsp->tot_exchanges_orig);
3152 counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
3153 le32_to_cpu(mbox_rsp->tot_exchanges_resp);
3154 counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
3155 le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
3156 counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
3157 le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
3158 counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
3159 le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
3160 counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
3161 le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
3162 counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
3163 le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
3164 counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
3165 le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
3166
3167 if (cb_arg) {
3168 if (cb_arg->cb) {
3169 if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
3170 status = le16_to_cpu(mbox_rsp->hdr.status);
3171 cb_arg->cb(status, num_counters, counts, cb_arg->arg);
3172 }
3173
3174 kfree(cb_arg);
3175 }
3176
3177 return 0;
3178 }
3179
3180 int
3181 efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
3182 void (*cb)(int status, u32 num_counters,
3183 struct efct_hw_host_stat_counts *counters,
3184 void *arg),
3185 void *arg)
3186 {
3187 int rc = -EIO;
3188 struct efct_hw_host_stat_cb_arg *cb_arg;
3189 u8 mbxdata[SLI4_BMBX_SIZE];
3190
3191 cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
3192 if (!cb_arg)
3193 return -ENOMEM;
3194
3195 cb_arg->cb = cb;
3196 cb_arg->arg = arg;
3197
3198
3199 if (!sli_cmd_read_status(&hw->sli, mbxdata, cc))
3200 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
3201 efct_hw_cb_host_stat, cb_arg);
3202
3203 if (rc) {
3204 efc_log_debug(hw->os, "READ_HOST_STATS failed\n");
3205 kfree(cb_arg);
3206 }
3207
3208 return rc;
3209 }
3210
3211 struct efct_hw_async_call_ctx {
3212 efct_hw_async_cb_t callback;
3213 void *arg;
3214 u8 cmd[SLI4_BMBX_SIZE];
3215 };
3216
3217 static void
3218 efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
3219 {
3220 struct efct_hw_async_call_ctx *ctx = arg;
3221
3222 if (ctx) {
3223 if (ctx->callback)
3224 (*ctx->callback)(hw, status, mqe, ctx->arg);
3225
3226 kfree(ctx);
3227 }
3228 }
3229
3230 int
3231 efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg)
3232 {
3233 struct efct_hw_async_call_ctx *ctx;
3234 int rc;
3235
3236
3237
3238
3239
3240
3241 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3242 if (!ctx)
3243 return -ENOMEM;
3244
3245 ctx->callback = callback;
3246 ctx->arg = arg;
3247
3248
3249 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) {
3250 efc_log_err(hw->os, "COMMON_NOP format failure\n");
3251 kfree(ctx);
3252 return -EIO;
3253 }
3254
3255 rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb,
3256 ctx);
3257 if (rc) {
3258 efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc);
3259 kfree(ctx);
3260 return -EIO;
3261 }
3262 return 0;
3263 }
3264
3265 static int
3266 efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg)
3267 {
3268 struct sli4_cmd_sli_config *mbox_rsp =
3269 (struct sli4_cmd_sli_config *)mqe;
3270 struct sli4_rsp_cmn_write_object *wr_obj_rsp;
3271 struct efct_hw_fw_wr_cb_arg *cb_arg = arg;
3272 u32 bytes_written;
3273 u16 mbox_status;
3274 u32 change_status;
3275
3276 wr_obj_rsp = (struct sli4_rsp_cmn_write_object *)
3277 &mbox_rsp->payload.embed;
3278 bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length);
3279 mbox_status = le16_to_cpu(mbox_rsp->hdr.status);
3280 change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) &
3281 RSP_CHANGE_STATUS);
3282
3283 if (cb_arg) {
3284 if (cb_arg->cb) {
3285 if (!status && mbox_status)
3286 status = mbox_status;
3287 cb_arg->cb(status, bytes_written, change_status,
3288 cb_arg->arg);
3289 }
3290
3291 kfree(cb_arg);
3292 }
3293
3294 return 0;
3295 }
3296
3297 int
3298 efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size,
3299 u32 offset, int last,
3300 void (*cb)(int status, u32 bytes_written,
3301 u32 change_status, void *arg),
3302 void *arg)
3303 {
3304 int rc = -EIO;
3305 u8 mbxdata[SLI4_BMBX_SIZE];
3306 struct efct_hw_fw_wr_cb_arg *cb_arg;
3307 int noc = 0;
3308
3309 cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL);
3310 if (!cb_arg)
3311 return -ENOMEM;
3312
3313 cb_arg->cb = cb;
3314 cb_arg->arg = arg;
3315
3316
3317 if (!sli_cmd_common_write_object(&hw->sli, mbxdata,
3318 noc, last, size, offset, "/prg/",
3319 dma))
3320 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
3321 efct_hw_cb_fw_write, cb_arg);
3322
3323 if (rc != 0) {
3324 efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n");
3325 kfree(cb_arg);
3326 }
3327
3328 return rc;
3329 }
3330
3331 static int
3332 efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe,
3333 void *arg)
3334 {
3335 return 0;
3336 }
3337
3338 int
3339 efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
3340 uintptr_t value,
3341 void (*cb)(int status, uintptr_t value, void *arg),
3342 void *arg)
3343 {
3344 int rc = -EIO;
3345 u8 link[SLI4_BMBX_SIZE];
3346 u32 speed = 0;
3347 u8 reset_alpa = 0;
3348
3349 switch (ctrl) {
3350 case EFCT_HW_PORT_INIT:
3351 if (!sli_cmd_config_link(&hw->sli, link))
3352 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
3353 efct_hw_cb_port_control, NULL);
3354
3355 if (rc != 0) {
3356 efc_log_err(hw->os, "CONFIG_LINK failed\n");
3357 break;
3358 }
3359 speed = hw->config.speed;
3360 reset_alpa = (u8)(value & 0xff);
3361
3362 rc = -EIO;
3363 if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa))
3364 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
3365 efct_hw_cb_port_control, NULL);
3366
3367 if (rc)
3368 efc_log_err(hw->os, "INIT_LINK failed\n");
3369 break;
3370
3371 case EFCT_HW_PORT_SHUTDOWN:
3372 if (!sli_cmd_down_link(&hw->sli, link))
3373 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
3374 efct_hw_cb_port_control, NULL);
3375
3376 if (rc)
3377 efc_log_err(hw->os, "DOWN_LINK failed\n");
3378 break;
3379
3380 default:
3381 efc_log_debug(hw->os, "unhandled control %#x\n", ctrl);
3382 break;
3383 }
3384
3385 return rc;
3386 }
3387
3388 void
3389 efct_hw_teardown(struct efct_hw *hw)
3390 {
3391 u32 i = 0;
3392 u32 destroy_queues;
3393 u32 free_memory;
3394 struct efc_dma *dma;
3395 struct efct *efct = hw->os;
3396
3397 destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE);
3398 free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED);
3399
3400
3401 if (hw->sliport_healthcheck) {
3402 hw->sliport_healthcheck = 0;
3403 efct_hw_config_sli_port_health_check(hw, 0, 0);
3404 }
3405
3406 if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) {
3407 hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
3408
3409 efct_hw_flush(hw);
3410
3411 if (list_empty(&hw->cmd_head))
3412 efc_log_debug(hw->os,
3413 "All commands completed on MQ queue\n");
3414 else
3415 efc_log_debug(hw->os,
3416 "Some cmds still pending on MQ queue\n");
3417
3418
3419 efct_hw_command_cancel(hw);
3420 } else {
3421 hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
3422 }
3423
3424 dma_free_coherent(&efct->pci->dev,
3425 hw->rnode_mem.size, hw->rnode_mem.virt,
3426 hw->rnode_mem.phys);
3427 memset(&hw->rnode_mem, 0, sizeof(struct efc_dma));
3428
3429 if (hw->io) {
3430 for (i = 0; i < hw->config.n_io; i++) {
3431 if (hw->io[i] && hw->io[i]->sgl &&
3432 hw->io[i]->sgl->virt) {
3433 dma_free_coherent(&efct->pci->dev,
3434 hw->io[i]->sgl->size,
3435 hw->io[i]->sgl->virt,
3436 hw->io[i]->sgl->phys);
3437 }
3438 kfree(hw->io[i]);
3439 hw->io[i] = NULL;
3440 }
3441 kfree(hw->io);
3442 hw->io = NULL;
3443 kfree(hw->wqe_buffs);
3444 hw->wqe_buffs = NULL;
3445 }
3446
3447 dma = &hw->xfer_rdy;
3448 dma_free_coherent(&efct->pci->dev,
3449 dma->size, dma->virt, dma->phys);
3450 memset(dma, 0, sizeof(struct efc_dma));
3451
3452 dma = &hw->loop_map;
3453 dma_free_coherent(&efct->pci->dev,
3454 dma->size, dma->virt, dma->phys);
3455 memset(dma, 0, sizeof(struct efc_dma));
3456
3457 for (i = 0; i < hw->wq_count; i++)
3458 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues,
3459 free_memory);
3460
3461 for (i = 0; i < hw->rq_count; i++)
3462 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
3463 free_memory);
3464
3465 for (i = 0; i < hw->mq_count; i++)
3466 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues,
3467 free_memory);
3468
3469 for (i = 0; i < hw->cq_count; i++)
3470 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues,
3471 free_memory);
3472
3473 for (i = 0; i < hw->eq_count; i++)
3474 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues,
3475 free_memory);
3476
3477
3478 efct_hw_rx_free(hw);
3479
3480 efct_hw_queue_teardown(hw);
3481
3482 kfree(hw->wq_cpu_array);
3483
3484 sli_teardown(&hw->sli);
3485
3486
3487 hw->state = EFCT_HW_STATE_UNINITIALIZED;
3488
3489
3490 kfree(hw->seq_pool);
3491 hw->seq_pool = NULL;
3492
3493
3494 efct_hw_reqtag_pool_free(hw);
3495
3496 mempool_destroy(hw->cmd_ctx_pool);
3497 mempool_destroy(hw->mbox_rqst_pool);
3498
3499
3500 hw->hw_setup_called = false;
3501 }
3502
3503 static int
3504 efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset,
3505 enum efct_hw_state prev_state)
3506 {
3507 int rc = 0;
3508
3509 switch (reset) {
3510 case EFCT_HW_RESET_FUNCTION:
3511 efc_log_debug(hw->os, "issuing function level reset\n");
3512 if (sli_reset(&hw->sli)) {
3513 efc_log_err(hw->os, "sli_reset failed\n");
3514 rc = -EIO;
3515 }
3516 break;
3517 case EFCT_HW_RESET_FIRMWARE:
3518 efc_log_debug(hw->os, "issuing firmware reset\n");
3519 if (sli_fw_reset(&hw->sli)) {
3520 efc_log_err(hw->os, "sli_soft_reset failed\n");
3521 rc = -EIO;
3522 }
3523
3524
3525
3526
3527 efc_log_debug(hw->os, "issuing function level reset\n");
3528 if (sli_reset(&hw->sli)) {
3529 efc_log_err(hw->os, "sli_reset failed\n");
3530 rc = -EIO;
3531 }
3532 break;
3533 default:
3534 efc_log_err(hw->os, "unknown type - no reset performed\n");
3535 hw->state = prev_state;
3536 rc = -EINVAL;
3537 break;
3538 }
3539
3540 return rc;
3541 }
3542
3543 int
3544 efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset)
3545 {
3546 int rc = 0;
3547 enum efct_hw_state prev_state = hw->state;
3548
3549 if (hw->state != EFCT_HW_STATE_ACTIVE)
3550 efc_log_debug(hw->os,
3551 "HW state %d is not active\n", hw->state);
3552
3553 hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS;
3554
3555
3556
3557
3558
3559 if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS ||
3560 prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS)
3561 return efct_hw_sli_reset(hw, reset, prev_state);
3562
3563 if (prev_state != EFCT_HW_STATE_UNINITIALIZED) {
3564 efct_hw_flush(hw);
3565
3566 if (list_empty(&hw->cmd_head))
3567 efc_log_debug(hw->os,
3568 "All commands completed on MQ queue\n");
3569 else
3570 efc_log_err(hw->os,
3571 "Some commands still pending on MQ queue\n");
3572 }
3573
3574
3575 rc = efct_hw_sli_reset(hw, reset, prev_state);
3576 if (rc == -EINVAL)
3577 return -EIO;
3578
3579 return rc;
3580 }