0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #define dev_fmt(fmt) "QPLIB: " fmt
0040
0041 #include <linux/interrupt.h>
0042 #include <linux/spinlock.h>
0043 #include <linux/pci.h>
0044 #include <linux/prefetch.h>
0045 #include <linux/delay.h>
0046
0047 #include "roce_hsi.h"
0048 #include "qplib_res.h"
0049 #include "qplib_rcfw.h"
0050 #include "qplib_sp.h"
0051 #include "qplib_fp.h"
0052
0053 static void bnxt_qplib_service_creq(struct tasklet_struct *t);
0054
0055
0056 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
0057 {
0058 struct bnxt_qplib_cmdq_ctx *cmdq;
0059 u16 cbit;
0060 int rc;
0061
0062 cmdq = &rcfw->cmdq;
0063 cbit = cookie % rcfw->cmdq_depth;
0064 rc = wait_event_timeout(cmdq->waitq,
0065 !test_bit(cbit, cmdq->cmdq_bitmap),
0066 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
0067 return rc ? 0 : -ETIMEDOUT;
0068 };
0069
0070 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
0071 {
0072 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
0073 struct bnxt_qplib_cmdq_ctx *cmdq;
0074 u16 cbit;
0075
0076 cmdq = &rcfw->cmdq;
0077 cbit = cookie % rcfw->cmdq_depth;
0078 if (!test_bit(cbit, cmdq->cmdq_bitmap))
0079 goto done;
0080 do {
0081 udelay(1);
0082 bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
0083 } while (test_bit(cbit, cmdq->cmdq_bitmap) && --count);
0084 done:
0085 return count ? 0 : -ETIMEDOUT;
0086 };
0087
0088 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
0089 struct creq_base *resp, void *sb, u8 is_block)
0090 {
0091 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
0092 struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
0093 struct bnxt_qplib_crsqe *crsqe;
0094 struct bnxt_qplib_cmdqe *cmdqe;
0095 u32 sw_prod, cmdq_prod;
0096 struct pci_dev *pdev;
0097 unsigned long flags;
0098 u32 size, opcode;
0099 u16 cookie, cbit;
0100 u8 *preq;
0101
0102 pdev = rcfw->pdev;
0103
0104 opcode = req->opcode;
0105 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
0106 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
0107 opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
0108 opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
0109 dev_err(&pdev->dev,
0110 "RCFW not initialized, reject opcode 0x%x\n", opcode);
0111 return -EINVAL;
0112 }
0113
0114 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
0115 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
0116 dev_err(&pdev->dev, "RCFW already initialized!\n");
0117 return -EINVAL;
0118 }
0119
0120 if (test_bit(FIRMWARE_TIMED_OUT, &cmdq->flags))
0121 return -ETIMEDOUT;
0122
0123
0124
0125
0126 spin_lock_irqsave(&hwq->lock, flags);
0127 if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) {
0128 dev_err(&pdev->dev, "RCFW: CMDQ is full!\n");
0129 spin_unlock_irqrestore(&hwq->lock, flags);
0130 return -EAGAIN;
0131 }
0132
0133
0134 cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
0135 cbit = cookie % rcfw->cmdq_depth;
0136 if (is_block)
0137 cookie |= RCFW_CMD_IS_BLOCKING;
0138
0139 set_bit(cbit, cmdq->cmdq_bitmap);
0140 req->cookie = cpu_to_le16(cookie);
0141 crsqe = &rcfw->crsqe_tbl[cbit];
0142 if (crsqe->resp) {
0143 spin_unlock_irqrestore(&hwq->lock, flags);
0144 return -EBUSY;
0145 }
0146
0147 size = req->cmd_size;
0148
0149
0150
0151 bnxt_qplib_set_cmd_slots(req);
0152
0153 memset(resp, 0, sizeof(*resp));
0154 crsqe->resp = (struct creq_qp_event *)resp;
0155 crsqe->resp->cookie = req->cookie;
0156 crsqe->req_size = req->cmd_size;
0157 if (req->resp_size && sb) {
0158 struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
0159
0160 req->resp_addr = cpu_to_le64(sbuf->dma_addr);
0161 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
0162 BNXT_QPLIB_CMDQE_UNITS;
0163 }
0164
0165 preq = (u8 *)req;
0166 do {
0167
0168 sw_prod = HWQ_CMP(hwq->prod, hwq);
0169 cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
0170 if (!cmdqe) {
0171 dev_err(&pdev->dev,
0172 "RCFW request failed with no cmdqe!\n");
0173 goto done;
0174 }
0175
0176 memset(cmdqe, 0, sizeof(*cmdqe));
0177 memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
0178 preq += min_t(u32, size, sizeof(*cmdqe));
0179 size -= min_t(u32, size, sizeof(*cmdqe));
0180 hwq->prod++;
0181 } while (size > 0);
0182 cmdq->seq_num++;
0183
0184 cmdq_prod = hwq->prod;
0185 if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
0186
0187
0188
0189
0190
0191 cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
0192 clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
0193 }
0194
0195
0196 wmb();
0197 writel(cmdq_prod, cmdq->cmdq_mbox.prod);
0198 writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
0199 done:
0200 spin_unlock_irqrestore(&hwq->lock, flags);
0201
0202 return 0;
0203 }
0204
0205 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
0206 struct cmdq_base *req,
0207 struct creq_base *resp,
0208 void *sb, u8 is_block)
0209 {
0210 struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
0211 u16 cookie;
0212 u8 opcode, retry_cnt = 0xFF;
0213 int rc = 0;
0214
0215
0216 if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
0217 return 0;
0218
0219 do {
0220 opcode = req->opcode;
0221 rc = __send_message(rcfw, req, resp, sb, is_block);
0222 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
0223 if (!rc)
0224 break;
0225
0226 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
0227
0228 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n",
0229 cookie, opcode);
0230 return rc;
0231 }
0232 is_block ? mdelay(1) : usleep_range(500, 1000);
0233
0234 } while (retry_cnt--);
0235
0236 if (is_block)
0237 rc = __block_for_resp(rcfw, cookie);
0238 else
0239 rc = __wait_for_resp(rcfw, cookie);
0240 if (rc) {
0241
0242 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n",
0243 cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
0244 set_bit(FIRMWARE_TIMED_OUT, &rcfw->cmdq.flags);
0245 return rc;
0246 }
0247
0248 if (evnt->status) {
0249
0250 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
0251 cookie, opcode, evnt->status);
0252 rc = -EFAULT;
0253 }
0254
0255 return rc;
0256 }
0257
0258 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
0259 struct creq_func_event *func_event)
0260 {
0261 int rc;
0262
0263 switch (func_event->event) {
0264 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
0265 break;
0266 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
0267 break;
0268 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
0269 break;
0270 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
0271 break;
0272 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
0273 break;
0274 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
0275 break;
0276 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
0277 break;
0278 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
0279
0280
0281
0282 break;
0283 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
0284 break;
0285 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
0286 break;
0287 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
0288 break;
0289 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
0290 break;
0291 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
0292 break;
0293 default:
0294 return -EINVAL;
0295 }
0296
0297 rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL);
0298 return rc;
0299 }
0300
0301 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
0302 struct creq_qp_event *qp_event)
0303 {
0304 struct creq_qp_error_notification *err_event;
0305 struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
0306 struct bnxt_qplib_crsqe *crsqe;
0307 struct bnxt_qplib_qp *qp;
0308 u16 cbit, blocked = 0;
0309 struct pci_dev *pdev;
0310 unsigned long flags;
0311 __le16 mcookie;
0312 u16 cookie;
0313 int rc = 0;
0314 u32 qp_id, tbl_indx;
0315
0316 pdev = rcfw->pdev;
0317 switch (qp_event->event) {
0318 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
0319 err_event = (struct creq_qp_error_notification *)qp_event;
0320 qp_id = le32_to_cpu(err_event->xid);
0321 tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
0322 qp = rcfw->qp_tbl[tbl_indx].qp_handle;
0323 dev_dbg(&pdev->dev, "Received QP error notification\n");
0324 dev_dbg(&pdev->dev,
0325 "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
0326 qp_id, err_event->req_err_state_reason,
0327 err_event->res_err_state_reason);
0328 if (!qp)
0329 break;
0330 bnxt_qplib_mark_qp_error(qp);
0331 rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
0332 break;
0333 default:
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343 spin_lock_irqsave_nested(&hwq->lock, flags,
0344 SINGLE_DEPTH_NESTING);
0345 cookie = le16_to_cpu(qp_event->cookie);
0346 mcookie = qp_event->cookie;
0347 blocked = cookie & RCFW_CMD_IS_BLOCKING;
0348 cookie &= RCFW_MAX_COOKIE_VALUE;
0349 cbit = cookie % rcfw->cmdq_depth;
0350 crsqe = &rcfw->crsqe_tbl[cbit];
0351 if (crsqe->resp &&
0352 crsqe->resp->cookie == mcookie) {
0353 memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
0354 crsqe->resp = NULL;
0355 } else {
0356 if (crsqe->resp && crsqe->resp->cookie)
0357 dev_err(&pdev->dev,
0358 "CMD %s cookie sent=%#x, recd=%#x\n",
0359 crsqe->resp ? "mismatch" : "collision",
0360 crsqe->resp ? crsqe->resp->cookie : 0,
0361 mcookie);
0362 }
0363 if (!test_and_clear_bit(cbit, rcfw->cmdq.cmdq_bitmap))
0364 dev_warn(&pdev->dev,
0365 "CMD bit %d was not requested\n", cbit);
0366 hwq->cons += crsqe->req_size;
0367 crsqe->req_size = 0;
0368
0369 if (!blocked)
0370 wake_up(&rcfw->cmdq.waitq);
0371 spin_unlock_irqrestore(&hwq->lock, flags);
0372 }
0373 return rc;
0374 }
0375
0376
0377 static void bnxt_qplib_service_creq(struct tasklet_struct *t)
0378 {
0379 struct bnxt_qplib_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
0380 struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
0381 u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
0382 struct bnxt_qplib_hwq *hwq = &creq->hwq;
0383 struct creq_base *creqe;
0384 u32 sw_cons, raw_cons;
0385 unsigned long flags;
0386
0387
0388 spin_lock_irqsave(&hwq->lock, flags);
0389 raw_cons = hwq->cons;
0390 while (budget > 0) {
0391 sw_cons = HWQ_CMP(raw_cons, hwq);
0392 creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
0393 if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
0394 break;
0395
0396
0397
0398 dma_rmb();
0399
0400 type = creqe->type & CREQ_BASE_TYPE_MASK;
0401 switch (type) {
0402 case CREQ_BASE_TYPE_QP_EVENT:
0403 bnxt_qplib_process_qp_event
0404 (rcfw, (struct creq_qp_event *)creqe);
0405 creq->stats.creq_qp_event_processed++;
0406 break;
0407 case CREQ_BASE_TYPE_FUNC_EVENT:
0408 if (!bnxt_qplib_process_func_event
0409 (rcfw, (struct creq_func_event *)creqe))
0410 creq->stats.creq_func_event_processed++;
0411 else
0412 dev_warn(&rcfw->pdev->dev,
0413 "aeqe:%#x Not handled\n", type);
0414 break;
0415 default:
0416 if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
0417 dev_warn(&rcfw->pdev->dev,
0418 "creqe with event 0x%x not handled\n",
0419 type);
0420 break;
0421 }
0422 raw_cons++;
0423 budget--;
0424 }
0425
0426 if (hwq->cons != raw_cons) {
0427 hwq->cons = raw_cons;
0428 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
0429 rcfw->res->cctx, true);
0430 }
0431 spin_unlock_irqrestore(&hwq->lock, flags);
0432 }
0433
0434 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
0435 {
0436 struct bnxt_qplib_rcfw *rcfw = dev_instance;
0437 struct bnxt_qplib_creq_ctx *creq;
0438 struct bnxt_qplib_hwq *hwq;
0439 u32 sw_cons;
0440
0441 creq = &rcfw->creq;
0442 hwq = &creq->hwq;
0443
0444 sw_cons = HWQ_CMP(hwq->cons, hwq);
0445 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
0446
0447 tasklet_schedule(&creq->creq_tasklet);
0448
0449 return IRQ_HANDLED;
0450 }
0451
0452
0453 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
0454 {
0455 struct cmdq_deinitialize_fw req;
0456 struct creq_deinitialize_fw_resp resp;
0457 u16 cmd_flags = 0;
0458 int rc;
0459
0460 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
0461 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0462 NULL, 0);
0463 if (rc)
0464 return rc;
0465
0466 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
0467 return 0;
0468 }
0469
0470 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
0471 struct bnxt_qplib_ctx *ctx, int is_virtfn)
0472 {
0473 struct creq_initialize_fw_resp resp;
0474 struct cmdq_initialize_fw req;
0475 u16 cmd_flags = 0;
0476 u8 pgsz, lvl;
0477 int rc;
0478
0479 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
0480
0481
0482
0483 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
0484 RCFW_DBR_BASE_PAGE_SHIFT);
0485
0486
0487
0488
0489
0490
0491
0492 if (is_virtfn)
0493 goto skip_ctx_setup;
0494 if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
0495 goto config_vf_res;
0496
0497 lvl = ctx->qpc_tbl.level;
0498 pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
0499 req.qpc_pg_size_qpc_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
0500 lvl;
0501 lvl = ctx->mrw_tbl.level;
0502 pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl);
0503 req.mrw_pg_size_mrw_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
0504 lvl;
0505 lvl = ctx->srqc_tbl.level;
0506 pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl);
0507 req.srq_pg_size_srq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
0508 lvl;
0509 lvl = ctx->cq_tbl.level;
0510 pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl);
0511 req.cq_pg_size_cq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
0512 lvl;
0513 lvl = ctx->tim_tbl.level;
0514 pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl);
0515 req.tim_pg_size_tim_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
0516 lvl;
0517 lvl = ctx->tqm_ctx.pde.level;
0518 pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde);
0519 req.tqm_pg_size_tqm_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
0520 lvl;
0521 req.qpc_page_dir =
0522 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
0523 req.mrw_page_dir =
0524 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
0525 req.srq_page_dir =
0526 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
0527 req.cq_page_dir =
0528 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
0529 req.tim_page_dir =
0530 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
0531 req.tqm_page_dir =
0532 cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]);
0533
0534 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
0535 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
0536 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
0537 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
0538
0539 config_vf_res:
0540 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
0541 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
0542 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
0543 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
0544 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
0545
0546 skip_ctx_setup:
0547 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
0548 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0549 NULL, 0);
0550 if (rc)
0551 return rc;
0552 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
0553 return 0;
0554 }
0555
0556 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
0557 {
0558 bitmap_free(rcfw->cmdq.cmdq_bitmap);
0559 kfree(rcfw->qp_tbl);
0560 kfree(rcfw->crsqe_tbl);
0561 bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
0562 bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
0563 rcfw->pdev = NULL;
0564 }
0565
0566 int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
0567 struct bnxt_qplib_rcfw *rcfw,
0568 struct bnxt_qplib_ctx *ctx,
0569 int qp_tbl_sz)
0570 {
0571 struct bnxt_qplib_hwq_attr hwq_attr = {};
0572 struct bnxt_qplib_sg_info sginfo = {};
0573 struct bnxt_qplib_cmdq_ctx *cmdq;
0574 struct bnxt_qplib_creq_ctx *creq;
0575
0576 rcfw->pdev = res->pdev;
0577 cmdq = &rcfw->cmdq;
0578 creq = &rcfw->creq;
0579 rcfw->res = res;
0580
0581 sginfo.pgsize = PAGE_SIZE;
0582 sginfo.pgshft = PAGE_SHIFT;
0583
0584 hwq_attr.sginfo = &sginfo;
0585 hwq_attr.res = rcfw->res;
0586 hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;
0587 hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;
0588 hwq_attr.type = bnxt_qplib_get_hwq_type(res);
0589
0590 if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
0591 dev_err(&rcfw->pdev->dev,
0592 "HW channel CREQ allocation failed\n");
0593 goto fail;
0594 }
0595 if (ctx->hwrm_intf_ver < HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK)
0596 rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_256;
0597 else
0598 rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192;
0599
0600 sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
0601 hwq_attr.depth = rcfw->cmdq_depth;
0602 hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
0603 hwq_attr.type = HWQ_TYPE_CTX;
0604 if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
0605 dev_err(&rcfw->pdev->dev,
0606 "HW channel CMDQ allocation failed\n");
0607 goto fail;
0608 }
0609
0610 rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
0611 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
0612 if (!rcfw->crsqe_tbl)
0613 goto fail;
0614
0615 cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL);
0616 if (!cmdq->cmdq_bitmap)
0617 goto fail;
0618
0619
0620 rcfw->qp_tbl_size = qp_tbl_sz + 1;
0621 rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
0622 GFP_KERNEL);
0623 if (!rcfw->qp_tbl)
0624 goto fail;
0625
0626 return 0;
0627
0628 fail:
0629 bnxt_qplib_free_rcfw_channel(rcfw);
0630 return -ENOMEM;
0631 }
0632
0633 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
0634 {
0635 struct bnxt_qplib_creq_ctx *creq;
0636
0637 creq = &rcfw->creq;
0638 tasklet_disable(&creq->creq_tasklet);
0639
0640 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
0641
0642 synchronize_irq(creq->msix_vec);
0643 if (kill)
0644 tasklet_kill(&creq->creq_tasklet);
0645
0646 if (creq->requested) {
0647 free_irq(creq->msix_vec, rcfw);
0648 creq->requested = false;
0649 }
0650 }
0651
0652 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
0653 {
0654 struct bnxt_qplib_creq_ctx *creq;
0655 struct bnxt_qplib_cmdq_ctx *cmdq;
0656 unsigned long indx;
0657
0658 creq = &rcfw->creq;
0659 cmdq = &rcfw->cmdq;
0660
0661 bnxt_qplib_rcfw_stop_irq(rcfw, true);
0662
0663 iounmap(cmdq->cmdq_mbox.reg.bar_reg);
0664 iounmap(creq->creq_db.reg.bar_reg);
0665
0666 indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth);
0667 if (indx != rcfw->cmdq_depth)
0668 dev_err(&rcfw->pdev->dev,
0669 "disabling RCFW with pending cmd-bit %lx\n", indx);
0670
0671 cmdq->cmdq_mbox.reg.bar_reg = NULL;
0672 creq->creq_db.reg.bar_reg = NULL;
0673 creq->aeq_handler = NULL;
0674 creq->msix_vec = 0;
0675 }
0676
0677 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
0678 bool need_init)
0679 {
0680 struct bnxt_qplib_creq_ctx *creq;
0681 int rc;
0682
0683 creq = &rcfw->creq;
0684
0685 if (creq->requested)
0686 return -EFAULT;
0687
0688 creq->msix_vec = msix_vector;
0689 if (need_init)
0690 tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq);
0691 else
0692 tasklet_enable(&creq->creq_tasklet);
0693 rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
0694 "bnxt_qplib_creq", rcfw);
0695 if (rc)
0696 return rc;
0697 creq->requested = true;
0698
0699 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, true);
0700
0701 return 0;
0702 }
0703
0704 static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw, bool is_vf)
0705 {
0706 struct bnxt_qplib_cmdq_mbox *mbox;
0707 resource_size_t bar_reg;
0708 struct pci_dev *pdev;
0709 u16 prod_offt;
0710 int rc = 0;
0711
0712 pdev = rcfw->pdev;
0713 mbox = &rcfw->cmdq.cmdq_mbox;
0714
0715 mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;
0716 mbox->reg.len = RCFW_COMM_SIZE;
0717 mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
0718 if (!mbox->reg.bar_base) {
0719 dev_err(&pdev->dev,
0720 "QPLIB: CMDQ BAR region %d resc start is 0!\n",
0721 mbox->reg.bar_id);
0722 return -ENOMEM;
0723 }
0724
0725 bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;
0726 mbox->reg.len = RCFW_COMM_SIZE;
0727 mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
0728 if (!mbox->reg.bar_reg) {
0729 dev_err(&pdev->dev,
0730 "QPLIB: CMDQ BAR region %d mapping failed\n",
0731 mbox->reg.bar_id);
0732 return -ENOMEM;
0733 }
0734
0735 prod_offt = is_vf ? RCFW_VF_COMM_PROD_OFFSET :
0736 RCFW_PF_COMM_PROD_OFFSET;
0737 mbox->prod = (void __iomem *)(mbox->reg.bar_reg + prod_offt);
0738 mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET);
0739 return rc;
0740 }
0741
0742 static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
0743 {
0744 struct bnxt_qplib_creq_db *creq_db;
0745 resource_size_t bar_reg;
0746 struct pci_dev *pdev;
0747
0748 pdev = rcfw->pdev;
0749 creq_db = &rcfw->creq.creq_db;
0750
0751 creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
0752 creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
0753 if (!creq_db->reg.bar_id)
0754 dev_err(&pdev->dev,
0755 "QPLIB: CREQ BAR region %d resc start is 0!",
0756 creq_db->reg.bar_id);
0757
0758 bar_reg = creq_db->reg.bar_base + reg_offt;
0759
0760 creq_db->reg.len = 8;
0761 creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
0762 if (!creq_db->reg.bar_reg) {
0763 dev_err(&pdev->dev,
0764 "QPLIB: CREQ BAR region %d mapping failed",
0765 creq_db->reg.bar_id);
0766 return -ENOMEM;
0767 }
0768 creq_db->dbinfo.db = creq_db->reg.bar_reg;
0769 creq_db->dbinfo.hwq = &rcfw->creq.hwq;
0770 creq_db->dbinfo.xid = rcfw->creq.ring_id;
0771 return 0;
0772 }
0773
0774 static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
0775 {
0776 struct bnxt_qplib_cmdq_ctx *cmdq;
0777 struct bnxt_qplib_creq_ctx *creq;
0778 struct bnxt_qplib_cmdq_mbox *mbox;
0779 struct cmdq_init init = {0};
0780
0781 cmdq = &rcfw->cmdq;
0782 creq = &rcfw->creq;
0783 mbox = &cmdq->cmdq_mbox;
0784
0785 init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
0786 init.cmdq_size_cmdq_lvl =
0787 cpu_to_le16(((rcfw->cmdq_depth <<
0788 CMDQ_INIT_CMDQ_SIZE_SFT) &
0789 CMDQ_INIT_CMDQ_SIZE_MASK) |
0790 ((cmdq->hwq.level <<
0791 CMDQ_INIT_CMDQ_LVL_SFT) &
0792 CMDQ_INIT_CMDQ_LVL_MASK));
0793 init.creq_ring_id = cpu_to_le16(creq->ring_id);
0794
0795 __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
0796 }
0797
0798 int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
0799 int msix_vector,
0800 int cp_bar_reg_off, int virt_fn,
0801 aeq_handler_t aeq_handler)
0802 {
0803 struct bnxt_qplib_cmdq_ctx *cmdq;
0804 struct bnxt_qplib_creq_ctx *creq;
0805 int rc;
0806
0807 cmdq = &rcfw->cmdq;
0808 creq = &rcfw->creq;
0809
0810
0811
0812 cmdq->seq_num = 0;
0813 set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
0814 init_waitqueue_head(&cmdq->waitq);
0815
0816 creq->stats.creq_qp_event_processed = 0;
0817 creq->stats.creq_func_event_processed = 0;
0818 creq->aeq_handler = aeq_handler;
0819
0820 rc = bnxt_qplib_map_cmdq_mbox(rcfw, virt_fn);
0821 if (rc)
0822 return rc;
0823
0824 rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
0825 if (rc)
0826 return rc;
0827
0828 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
0829 if (rc) {
0830 dev_err(&rcfw->pdev->dev,
0831 "Failed to request IRQ for CREQ rc = 0x%x\n", rc);
0832 bnxt_qplib_disable_rcfw_channel(rcfw);
0833 return rc;
0834 }
0835
0836 bnxt_qplib_start_rcfw(rcfw);
0837
0838 return 0;
0839 }
0840
0841 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
0842 struct bnxt_qplib_rcfw *rcfw,
0843 u32 size)
0844 {
0845 struct bnxt_qplib_rcfw_sbuf *sbuf;
0846
0847 sbuf = kzalloc(sizeof(*sbuf), GFP_KERNEL);
0848 if (!sbuf)
0849 return NULL;
0850
0851 sbuf->size = size;
0852 sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
0853 &sbuf->dma_addr, GFP_KERNEL);
0854 if (!sbuf->sb)
0855 goto bail;
0856
0857 return sbuf;
0858 bail:
0859 kfree(sbuf);
0860 return NULL;
0861 }
0862
0863 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
0864 struct bnxt_qplib_rcfw_sbuf *sbuf)
0865 {
0866 if (sbuf->sb)
0867 dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
0868 sbuf->sb, sbuf->dma_addr);
0869 kfree(sbuf);
0870 }