0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #include <linux/module.h>
0040 #include <linux/netdevice.h>
0041 #include <linux/ethtool.h>
0042 #include <linux/mutex.h>
0043 #include <linux/list.h>
0044 #include <linux/rculist.h>
0045 #include <linux/spinlock.h>
0046 #include <linux/pci.h>
0047 #include <net/dcbnl.h>
0048 #include <net/ipv6.h>
0049 #include <net/addrconf.h>
0050 #include <linux/if_ether.h>
0051
0052 #include <rdma/ib_verbs.h>
0053 #include <rdma/ib_user_verbs.h>
0054 #include <rdma/ib_umem.h>
0055 #include <rdma/ib_addr.h>
0056
0057 #include "bnxt_ulp.h"
0058 #include "roce_hsi.h"
0059 #include "qplib_res.h"
0060 #include "qplib_sp.h"
0061 #include "qplib_fp.h"
0062 #include "qplib_rcfw.h"
0063 #include "bnxt_re.h"
0064 #include "ib_verbs.h"
0065 #include <rdma/bnxt_re-abi.h>
0066 #include "bnxt.h"
0067 #include "hw_counters.h"
0068
0069 static char version[] =
0070 BNXT_RE_DESC "\n";
0071
0072 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
0073 MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
0074 MODULE_LICENSE("Dual BSD/GPL");
0075
0076
0077 static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
0078
0079 static DEFINE_MUTEX(bnxt_re_dev_lock);
0080 static struct workqueue_struct *bnxt_re_wq;
0081 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
0082 static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
0083 static void bnxt_re_stop_irq(void *handle);
0084 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
0085
0086 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
0087 {
0088 struct bnxt_qplib_chip_ctx *cctx;
0089
0090 cctx = rdev->chip_ctx;
0091 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
0092 mode : BNXT_QPLIB_WQE_MODE_STATIC;
0093 }
0094
0095 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
0096 {
0097 struct bnxt_qplib_chip_ctx *chip_ctx;
0098
0099 if (!rdev->chip_ctx)
0100 return;
0101 chip_ctx = rdev->chip_ctx;
0102 rdev->chip_ctx = NULL;
0103 rdev->rcfw.res = NULL;
0104 rdev->qplib_res.cctx = NULL;
0105 rdev->qplib_res.pdev = NULL;
0106 rdev->qplib_res.netdev = NULL;
0107 kfree(chip_ctx);
0108 }
0109
0110 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
0111 {
0112 struct bnxt_qplib_chip_ctx *chip_ctx;
0113 struct bnxt_en_dev *en_dev;
0114 struct bnxt *bp;
0115
0116 en_dev = rdev->en_dev;
0117 bp = netdev_priv(en_dev->net);
0118
0119 chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
0120 if (!chip_ctx)
0121 return -ENOMEM;
0122 chip_ctx->chip_num = bp->chip_num;
0123 chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
0124
0125 rdev->chip_ctx = chip_ctx;
0126
0127
0128 rdev->qplib_res.cctx = rdev->chip_ctx;
0129 rdev->rcfw.res = &rdev->qplib_res;
0130 rdev->qplib_res.dattr = &rdev->dev_attr;
0131 rdev->qplib_res.is_vf = BNXT_VF(bp);
0132
0133 bnxt_re_set_drv_mode(rdev, wqe_mode);
0134 if (bnxt_qplib_determine_atomics(en_dev->pdev))
0135 ibdev_info(&rdev->ibdev,
0136 "platform doesn't support global atomics.");
0137 return 0;
0138 }
0139
0140
0141
0142 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
0143 {
0144 struct bnxt *bp;
0145
0146 bp = netdev_priv(rdev->en_dev->net);
0147 if (BNXT_VF(bp))
0148 rdev->is_virtfn = 1;
0149 }
0150
0151
0152
0153
0154
0155
0156 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
0157 {
0158 struct bnxt_qplib_dev_attr *attr;
0159 struct bnxt_qplib_ctx *ctx;
0160 int i;
0161
0162 attr = &rdev->dev_attr;
0163 ctx = &rdev->qplib_ctx;
0164
0165 ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
0166 attr->max_qp);
0167 ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
0168
0169 ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
0170 ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
0171 attr->max_srq);
0172 ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
0173 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
0174 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
0175 rdev->qplib_ctx.tqm_ctx.qcount[i] =
0176 rdev->dev_attr.tqm_alloc_reqs[i];
0177 }
0178
0179 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
0180 {
0181 struct bnxt_qplib_vf_res *vf_res;
0182 u32 mrws = 0;
0183 u32 vf_pct;
0184 u32 nvfs;
0185
0186 vf_res = &qplib_ctx->vf_res;
0187
0188
0189
0190
0191 vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
0192 nvfs = num_vf;
0193 num_vf = 100 * num_vf;
0194 vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
0195 vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
0196 vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
0197
0198
0199
0200
0201
0202
0203
0204
0205 if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
0206 mrws = qplib_ctx->mrw_count * vf_pct;
0207 nvfs = num_vf;
0208 } else {
0209 mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
0210 }
0211 vf_res->max_mrw_per_vf = (mrws / nvfs);
0212 vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
0213 }
0214
0215 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
0216 {
0217 u32 num_vfs;
0218
0219 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
0220 bnxt_re_limit_pf_res(rdev);
0221
0222 num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
0223 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
0224 if (num_vfs)
0225 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
0226 }
0227
0228
0229 static void bnxt_re_stop(void *p)
0230 {
0231 struct bnxt_re_dev *rdev = p;
0232 struct bnxt *bp;
0233
0234 if (!rdev)
0235 return;
0236 ASSERT_RTNL();
0237
0238
0239
0240
0241
0242
0243
0244 bp = netdev_priv(rdev->netdev);
0245
0246 ibdev_info(&rdev->ibdev, "Handle device stop call from L2 driver");
0247
0248
0249
0250
0251
0252 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
0253 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
0254
0255 bnxt_re_dev_stop(rdev);
0256 bnxt_re_stop_irq(rdev);
0257
0258
0259
0260 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
0261 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
0262 }
0263
0264 static void bnxt_re_start(void *p)
0265 {
0266 }
0267
0268 static void bnxt_re_sriov_config(void *p, int num_vfs)
0269 {
0270 struct bnxt_re_dev *rdev = p;
0271
0272 if (!rdev)
0273 return;
0274
0275 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
0276 return;
0277 rdev->num_vfs = num_vfs;
0278 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
0279 bnxt_re_set_resource_limits(rdev);
0280 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
0281 &rdev->qplib_ctx);
0282 }
0283 }
0284
0285 static void bnxt_re_shutdown(void *p)
0286 {
0287 struct bnxt_re_dev *rdev = p;
0288
0289 if (!rdev)
0290 return;
0291 ASSERT_RTNL();
0292
0293 bnxt_re_stop_irq(rdev);
0294 ib_unregister_device_queued(&rdev->ibdev);
0295 }
0296
0297 static void bnxt_re_stop_irq(void *handle)
0298 {
0299 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
0300 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
0301 struct bnxt_qplib_nq *nq;
0302 int indx;
0303
0304 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
0305 nq = &rdev->nq[indx - 1];
0306 bnxt_qplib_nq_stop_irq(nq, false);
0307 }
0308
0309 bnxt_qplib_rcfw_stop_irq(rcfw, false);
0310 }
0311
0312 static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
0313 {
0314 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
0315 struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
0316 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
0317 struct bnxt_qplib_nq *nq;
0318 int indx, rc;
0319
0320 if (!ent) {
0321
0322
0323
0324
0325
0326 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
0327 return;
0328 }
0329
0330
0331
0332
0333 for (indx = 0; indx < rdev->num_msix; indx++)
0334 rdev->msix_entries[indx].vector = ent[indx].vector;
0335
0336 bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
0337 false);
0338 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
0339 nq = &rdev->nq[indx - 1];
0340 rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
0341 msix_ent[indx].vector, false);
0342 if (rc)
0343 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
0344 indx - 1);
0345 }
0346 }
0347
0348 static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
0349 .ulp_async_notifier = NULL,
0350 .ulp_stop = bnxt_re_stop,
0351 .ulp_start = bnxt_re_start,
0352 .ulp_sriov_config = bnxt_re_sriov_config,
0353 .ulp_shutdown = bnxt_re_shutdown,
0354 .ulp_irq_stop = bnxt_re_stop_irq,
0355 .ulp_irq_restart = bnxt_re_start_irq
0356 };
0357
0358
0359
0360
0361
0362
0363 static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
0364 {
0365 struct bnxt_en_dev *en_dev;
0366 int rc;
0367
0368 if (!rdev)
0369 return -EINVAL;
0370
0371 en_dev = rdev->en_dev;
0372
0373 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
0374 BNXT_ROCE_ULP);
0375 return rc;
0376 }
0377
0378 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
0379 {
0380 struct bnxt_en_dev *en_dev;
0381 int rc = 0;
0382
0383 if (!rdev)
0384 return -EINVAL;
0385
0386 en_dev = rdev->en_dev;
0387
0388 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
0389 &bnxt_re_ulp_ops, rdev);
0390 rdev->qplib_res.pdev = rdev->en_dev->pdev;
0391 return rc;
0392 }
0393
0394 static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
0395 {
0396 struct bnxt_en_dev *en_dev;
0397 int rc;
0398
0399 if (!rdev)
0400 return -EINVAL;
0401
0402 en_dev = rdev->en_dev;
0403
0404
0405 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
0406
0407 return rc;
0408 }
0409
0410 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
0411 {
0412 int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
0413 struct bnxt_en_dev *en_dev;
0414
0415 if (!rdev)
0416 return -EINVAL;
0417
0418 en_dev = rdev->en_dev;
0419
0420 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
0421
0422 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
0423 rdev->msix_entries,
0424 num_msix_want);
0425 if (num_msix_got < BNXT_RE_MIN_MSIX) {
0426 rc = -EINVAL;
0427 goto done;
0428 }
0429 if (num_msix_got != num_msix_want) {
0430 ibdev_warn(&rdev->ibdev,
0431 "Requested %d MSI-X vectors, got %d\n",
0432 num_msix_want, num_msix_got);
0433 }
0434 rdev->num_msix = num_msix_got;
0435 done:
0436 return rc;
0437 }
0438
0439 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
0440 u16 opcd, u16 crid, u16 trid)
0441 {
0442 hdr->req_type = cpu_to_le16(opcd);
0443 hdr->cmpl_ring = cpu_to_le16(crid);
0444 hdr->target_id = cpu_to_le16(trid);
0445 }
0446
0447 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
0448 int msg_len, void *resp, int resp_max_len,
0449 int timeout)
0450 {
0451 fw_msg->msg = msg;
0452 fw_msg->msg_len = msg_len;
0453 fw_msg->resp = resp;
0454 fw_msg->resp_max_len = resp_max_len;
0455 fw_msg->timeout = timeout;
0456 }
0457
0458 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
0459 u16 fw_ring_id, int type)
0460 {
0461 struct bnxt_en_dev *en_dev = rdev->en_dev;
0462 struct hwrm_ring_free_input req = {0};
0463 struct hwrm_ring_free_output resp;
0464 struct bnxt_fw_msg fw_msg;
0465 int rc = -EINVAL;
0466
0467 if (!en_dev)
0468 return rc;
0469
0470 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
0471 return 0;
0472
0473 memset(&fw_msg, 0, sizeof(fw_msg));
0474
0475 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
0476 req.ring_type = type;
0477 req.ring_id = cpu_to_le16(fw_ring_id);
0478 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
0479 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
0480 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
0481 if (rc)
0482 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
0483 req.ring_id, rc);
0484 return rc;
0485 }
0486
0487 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
0488 struct bnxt_re_ring_attr *ring_attr,
0489 u16 *fw_ring_id)
0490 {
0491 struct bnxt_en_dev *en_dev = rdev->en_dev;
0492 struct hwrm_ring_alloc_input req = {0};
0493 struct hwrm_ring_alloc_output resp;
0494 struct bnxt_fw_msg fw_msg;
0495 int rc = -EINVAL;
0496
0497 if (!en_dev)
0498 return rc;
0499
0500 memset(&fw_msg, 0, sizeof(fw_msg));
0501 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
0502 req.enables = 0;
0503 req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
0504 if (ring_attr->pages > 1) {
0505
0506 req.page_size = BNXT_PAGE_SHIFT;
0507 req.page_tbl_depth = 1;
0508 }
0509 req.fbo = 0;
0510
0511 req.logical_id = cpu_to_le16(ring_attr->lrid);
0512 req.length = cpu_to_le32(ring_attr->depth + 1);
0513 req.ring_type = ring_attr->type;
0514 req.int_mode = ring_attr->mode;
0515 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
0516 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
0517 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
0518 if (!rc)
0519 *fw_ring_id = le16_to_cpu(resp.ring_id);
0520
0521 return rc;
0522 }
0523
0524 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
0525 u32 fw_stats_ctx_id)
0526 {
0527 struct bnxt_en_dev *en_dev = rdev->en_dev;
0528 struct hwrm_stat_ctx_free_input req = {};
0529 struct hwrm_stat_ctx_free_output resp = {};
0530 struct bnxt_fw_msg fw_msg;
0531 int rc = -EINVAL;
0532
0533 if (!en_dev)
0534 return rc;
0535
0536 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
0537 return 0;
0538
0539 memset(&fw_msg, 0, sizeof(fw_msg));
0540
0541 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
0542 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
0543 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
0544 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
0545 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
0546 if (rc)
0547 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
0548 rc);
0549
0550 return rc;
0551 }
0552
0553 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
0554 dma_addr_t dma_map,
0555 u32 *fw_stats_ctx_id)
0556 {
0557 struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
0558 struct hwrm_stat_ctx_alloc_output resp = {0};
0559 struct hwrm_stat_ctx_alloc_input req = {0};
0560 struct bnxt_en_dev *en_dev = rdev->en_dev;
0561 struct bnxt_fw_msg fw_msg;
0562 int rc = -EINVAL;
0563
0564 *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
0565
0566 if (!en_dev)
0567 return rc;
0568
0569 memset(&fw_msg, 0, sizeof(fw_msg));
0570
0571 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
0572 req.update_period_ms = cpu_to_le32(1000);
0573 req.stats_dma_addr = cpu_to_le64(dma_map);
0574 req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
0575 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
0576 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
0577 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
0578 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
0579 if (!rc)
0580 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
0581
0582 return rc;
0583 }
0584
0585
0586
0587 static bool is_bnxt_re_dev(struct net_device *netdev)
0588 {
0589 struct ethtool_drvinfo drvinfo;
0590
0591 if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) {
0592 memset(&drvinfo, 0, sizeof(drvinfo));
0593 netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo);
0594
0595 if (strcmp(drvinfo.driver, "bnxt_en"))
0596 return false;
0597 return true;
0598 }
0599 return false;
0600 }
0601
0602 static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
0603 {
0604 struct ib_device *ibdev =
0605 ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
0606 if (!ibdev)
0607 return NULL;
0608
0609 return container_of(ibdev, struct bnxt_re_dev, ibdev);
0610 }
0611
0612 static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
0613 {
0614 struct bnxt_en_dev *en_dev;
0615 struct pci_dev *pdev;
0616
0617 en_dev = bnxt_ulp_probe(netdev);
0618 if (IS_ERR(en_dev))
0619 return en_dev;
0620
0621 pdev = en_dev->pdev;
0622 if (!pdev)
0623 return ERR_PTR(-EINVAL);
0624
0625 if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
0626 dev_info(&pdev->dev,
0627 "%s: probe error: RoCE is not supported on this device",
0628 ROCE_DRV_MODULE_NAME);
0629 return ERR_PTR(-ENODEV);
0630 }
0631
0632 dev_hold(netdev);
0633
0634 return en_dev;
0635 }
0636
0637 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
0638 char *buf)
0639 {
0640 struct bnxt_re_dev *rdev =
0641 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
0642
0643 return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
0644 }
0645 static DEVICE_ATTR_RO(hw_rev);
0646
0647 static ssize_t hca_type_show(struct device *device,
0648 struct device_attribute *attr, char *buf)
0649 {
0650 struct bnxt_re_dev *rdev =
0651 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
0652
0653 return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
0654 }
0655 static DEVICE_ATTR_RO(hca_type);
0656
0657 static struct attribute *bnxt_re_attributes[] = {
0658 &dev_attr_hw_rev.attr,
0659 &dev_attr_hca_type.attr,
0660 NULL
0661 };
0662
0663 static const struct attribute_group bnxt_re_dev_attr_group = {
0664 .attrs = bnxt_re_attributes,
0665 };
0666
0667 static const struct ib_device_ops bnxt_re_dev_ops = {
0668 .owner = THIS_MODULE,
0669 .driver_id = RDMA_DRIVER_BNXT_RE,
0670 .uverbs_abi_ver = BNXT_RE_ABI_VERSION,
0671
0672 .add_gid = bnxt_re_add_gid,
0673 .alloc_hw_port_stats = bnxt_re_ib_alloc_hw_port_stats,
0674 .alloc_mr = bnxt_re_alloc_mr,
0675 .alloc_pd = bnxt_re_alloc_pd,
0676 .alloc_ucontext = bnxt_re_alloc_ucontext,
0677 .create_ah = bnxt_re_create_ah,
0678 .create_cq = bnxt_re_create_cq,
0679 .create_qp = bnxt_re_create_qp,
0680 .create_srq = bnxt_re_create_srq,
0681 .create_user_ah = bnxt_re_create_ah,
0682 .dealloc_driver = bnxt_re_dealloc_driver,
0683 .dealloc_pd = bnxt_re_dealloc_pd,
0684 .dealloc_ucontext = bnxt_re_dealloc_ucontext,
0685 .del_gid = bnxt_re_del_gid,
0686 .dereg_mr = bnxt_re_dereg_mr,
0687 .destroy_ah = bnxt_re_destroy_ah,
0688 .destroy_cq = bnxt_re_destroy_cq,
0689 .destroy_qp = bnxt_re_destroy_qp,
0690 .destroy_srq = bnxt_re_destroy_srq,
0691 .device_group = &bnxt_re_dev_attr_group,
0692 .get_dev_fw_str = bnxt_re_query_fw_str,
0693 .get_dma_mr = bnxt_re_get_dma_mr,
0694 .get_hw_stats = bnxt_re_ib_get_hw_stats,
0695 .get_link_layer = bnxt_re_get_link_layer,
0696 .get_port_immutable = bnxt_re_get_port_immutable,
0697 .map_mr_sg = bnxt_re_map_mr_sg,
0698 .mmap = bnxt_re_mmap,
0699 .modify_qp = bnxt_re_modify_qp,
0700 .modify_srq = bnxt_re_modify_srq,
0701 .poll_cq = bnxt_re_poll_cq,
0702 .post_recv = bnxt_re_post_recv,
0703 .post_send = bnxt_re_post_send,
0704 .post_srq_recv = bnxt_re_post_srq_recv,
0705 .query_ah = bnxt_re_query_ah,
0706 .query_device = bnxt_re_query_device,
0707 .query_pkey = bnxt_re_query_pkey,
0708 .query_port = bnxt_re_query_port,
0709 .query_qp = bnxt_re_query_qp,
0710 .query_srq = bnxt_re_query_srq,
0711 .reg_user_mr = bnxt_re_reg_user_mr,
0712 .req_notify_cq = bnxt_re_req_notify_cq,
0713 INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
0714 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
0715 INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
0716 INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp),
0717 INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
0718 INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
0719 };
0720
0721 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
0722 {
0723 struct ib_device *ibdev = &rdev->ibdev;
0724 int ret;
0725
0726
0727 ibdev->node_type = RDMA_NODE_IB_CA;
0728 strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
0729 strlen(BNXT_RE_DESC) + 5);
0730 ibdev->phys_port_cnt = 1;
0731
0732 addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
0733
0734 ibdev->num_comp_vectors = rdev->num_msix - 1;
0735 ibdev->dev.parent = &rdev->en_dev->pdev->dev;
0736 ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
0737
0738 ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
0739 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
0740 if (ret)
0741 return ret;
0742
0743 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
0744 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
0745 }
0746
0747 static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
0748 {
0749 dev_put(rdev->netdev);
0750 rdev->netdev = NULL;
0751 mutex_lock(&bnxt_re_dev_lock);
0752 list_del_rcu(&rdev->list);
0753 mutex_unlock(&bnxt_re_dev_lock);
0754
0755 synchronize_rcu();
0756 }
0757
0758 static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
0759 struct bnxt_en_dev *en_dev)
0760 {
0761 struct bnxt_re_dev *rdev;
0762
0763
0764 rdev = ib_alloc_device(bnxt_re_dev, ibdev);
0765 if (!rdev) {
0766 ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!",
0767 ROCE_DRV_MODULE_NAME);
0768 return NULL;
0769 }
0770
0771 rdev->netdev = netdev;
0772 dev_hold(rdev->netdev);
0773 rdev->en_dev = en_dev;
0774 rdev->id = rdev->en_dev->pdev->devfn;
0775 INIT_LIST_HEAD(&rdev->qp_list);
0776 mutex_init(&rdev->qp_lock);
0777 atomic_set(&rdev->qp_count, 0);
0778 atomic_set(&rdev->cq_count, 0);
0779 atomic_set(&rdev->srq_count, 0);
0780 atomic_set(&rdev->mr_count, 0);
0781 atomic_set(&rdev->mw_count, 0);
0782 atomic_set(&rdev->ah_count, 0);
0783 atomic_set(&rdev->pd_count, 0);
0784 rdev->cosq[0] = 0xFFFF;
0785 rdev->cosq[1] = 0xFFFF;
0786
0787 mutex_lock(&bnxt_re_dev_lock);
0788 list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
0789 mutex_unlock(&bnxt_re_dev_lock);
0790 return rdev;
0791 }
0792
0793 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
0794 *unaffi_async)
0795 {
0796 switch (unaffi_async->event) {
0797 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
0798 break;
0799 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
0800 break;
0801 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
0802 break;
0803 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
0804 break;
0805 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
0806 break;
0807 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
0808 break;
0809 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
0810 break;
0811 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
0812 break;
0813 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
0814 break;
0815 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
0816 break;
0817 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
0818 break;
0819 default:
0820 return -EINVAL;
0821 }
0822 return 0;
0823 }
0824
0825 static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
0826 struct bnxt_re_qp *qp)
0827 {
0828 struct ib_event event;
0829 unsigned int flags;
0830
0831 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
0832 rdma_is_kernel_res(&qp->ib_qp.res)) {
0833 flags = bnxt_re_lock_cqs(qp);
0834 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
0835 bnxt_re_unlock_cqs(qp, flags);
0836 }
0837
0838 memset(&event, 0, sizeof(event));
0839 if (qp->qplib_qp.srq) {
0840 event.device = &qp->rdev->ibdev;
0841 event.element.qp = &qp->ib_qp;
0842 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
0843 }
0844
0845 if (event.device && qp->ib_qp.event_handler)
0846 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
0847
0848 return 0;
0849 }
0850
0851 static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
0852 void *obj)
0853 {
0854 int rc = 0;
0855 u8 event;
0856
0857 if (!obj)
0858 return rc;
0859
0860 event = affi_async->event;
0861 if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
0862 struct bnxt_qplib_qp *lib_qp = obj;
0863 struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
0864 qplib_qp);
0865 rc = bnxt_re_handle_qp_async_event(affi_async, qp);
0866 }
0867 return rc;
0868 }
0869
0870 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
0871 void *aeqe, void *obj)
0872 {
0873 struct creq_qp_event *affi_async;
0874 struct creq_func_event *unaffi_async;
0875 u8 type;
0876 int rc;
0877
0878 type = ((struct creq_base *)aeqe)->type;
0879 if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
0880 unaffi_async = aeqe;
0881 rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
0882 } else {
0883 affi_async = aeqe;
0884 rc = bnxt_re_handle_affi_async_event(affi_async, obj);
0885 }
0886
0887 return rc;
0888 }
0889
0890 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
0891 struct bnxt_qplib_srq *handle, u8 event)
0892 {
0893 struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
0894 qplib_srq);
0895 struct ib_event ib_event;
0896
0897 ib_event.device = &srq->rdev->ibdev;
0898 ib_event.element.srq = &srq->ib_srq;
0899 if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
0900 ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
0901 else
0902 ib_event.event = IB_EVENT_SRQ_ERR;
0903
0904 if (srq->ib_srq.event_handler) {
0905
0906 (*srq->ib_srq.event_handler)(&ib_event,
0907 srq->ib_srq.srq_context);
0908 }
0909 return 0;
0910 }
0911
0912 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
0913 struct bnxt_qplib_cq *handle)
0914 {
0915 struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
0916 qplib_cq);
0917
0918 if (cq->ib_cq.comp_handler) {
0919
0920 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
0921 }
0922
0923 return 0;
0924 }
0925
0926 #define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
0927 #define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
0928 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
0929 {
0930 return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
0931 (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
0932 BNXT_RE_GEN_P5_PF_NQ_DB) :
0933 rdev->msix_entries[indx].db_offset;
0934 }
0935
0936 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
0937 {
0938 int i;
0939
0940 for (i = 1; i < rdev->num_msix; i++)
0941 bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
0942
0943 if (rdev->qplib_res.rcfw)
0944 bnxt_qplib_cleanup_res(&rdev->qplib_res);
0945 }
0946
0947 static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
0948 {
0949 int num_vec_enabled = 0;
0950 int rc = 0, i;
0951 u32 db_offt;
0952
0953 bnxt_qplib_init_res(&rdev->qplib_res);
0954
0955 for (i = 1; i < rdev->num_msix ; i++) {
0956 db_offt = bnxt_re_get_nqdb_offset(rdev, i);
0957 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
0958 i - 1, rdev->msix_entries[i].vector,
0959 db_offt, &bnxt_re_cqn_handler,
0960 &bnxt_re_srqn_handler);
0961 if (rc) {
0962 ibdev_err(&rdev->ibdev,
0963 "Failed to enable NQ with rc = 0x%x", rc);
0964 goto fail;
0965 }
0966 num_vec_enabled++;
0967 }
0968 return 0;
0969 fail:
0970 for (i = num_vec_enabled; i >= 0; i--)
0971 bnxt_qplib_disable_nq(&rdev->nq[i]);
0972 return rc;
0973 }
0974
0975 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
0976 {
0977 u8 type;
0978 int i;
0979
0980 for (i = 0; i < rdev->num_msix - 1; i++) {
0981 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
0982 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
0983 bnxt_qplib_free_nq(&rdev->nq[i]);
0984 rdev->nq[i].res = NULL;
0985 }
0986 }
0987
0988 static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
0989 {
0990 bnxt_re_free_nq_res(rdev);
0991
0992 if (rdev->qplib_res.dpi_tbl.max) {
0993 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
0994 &rdev->qplib_res.dpi_tbl,
0995 &rdev->dpi_privileged);
0996 }
0997 if (rdev->qplib_res.rcfw) {
0998 bnxt_qplib_free_res(&rdev->qplib_res);
0999 rdev->qplib_res.rcfw = NULL;
1000 }
1001 }
1002
1003 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
1004 {
1005 struct bnxt_re_ring_attr rattr = {};
1006 int num_vec_created = 0;
1007 int rc = 0, i;
1008 u8 type;
1009
1010
1011 rdev->qplib_res.rcfw = &rdev->rcfw;
1012 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1013 rdev->is_virtfn);
1014 if (rc)
1015 goto fail;
1016
1017 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
1018 rdev->netdev, &rdev->dev_attr);
1019 if (rc)
1020 goto fail;
1021
1022 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
1023 &rdev->dpi_privileged,
1024 rdev);
1025 if (rc)
1026 goto dealloc_res;
1027
1028 for (i = 0; i < rdev->num_msix - 1; i++) {
1029 struct bnxt_qplib_nq *nq;
1030
1031 nq = &rdev->nq[i];
1032 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
1033 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
1034 if (rc) {
1035 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
1036 i, rc);
1037 goto free_nq;
1038 }
1039 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1040 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
1041 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
1042 rattr.type = type;
1043 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1044 rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
1045 rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
1046 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
1047 if (rc) {
1048 ibdev_err(&rdev->ibdev,
1049 "Failed to allocate NQ fw id with rc = 0x%x",
1050 rc);
1051 bnxt_qplib_free_nq(&rdev->nq[i]);
1052 goto free_nq;
1053 }
1054 num_vec_created++;
1055 }
1056 return 0;
1057 free_nq:
1058 for (i = num_vec_created - 1; i >= 0; i--) {
1059 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1060 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1061 bnxt_qplib_free_nq(&rdev->nq[i]);
1062 }
1063 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1064 &rdev->qplib_res.dpi_tbl,
1065 &rdev->dpi_privileged);
1066 dealloc_res:
1067 bnxt_qplib_free_res(&rdev->qplib_res);
1068
1069 fail:
1070 rdev->qplib_res.rcfw = NULL;
1071 return rc;
1072 }
1073
1074 static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
1075 u8 port_num, enum ib_event_type event)
1076 {
1077 struct ib_event ib_event;
1078
1079 ib_event.device = ibdev;
1080 if (qp) {
1081 ib_event.element.qp = qp;
1082 ib_event.event = event;
1083 if (qp->event_handler)
1084 qp->event_handler(&ib_event, qp->qp_context);
1085
1086 } else {
1087 ib_event.element.port_num = port_num;
1088 ib_event.event = event;
1089 ib_dispatch_event(&ib_event);
1090 }
1091 }
1092
1093 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
1094 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
1095 u64 *cid_map)
1096 {
1097 struct hwrm_queue_pri2cos_qcfg_input req = {0};
1098 struct bnxt *bp = netdev_priv(rdev->netdev);
1099 struct hwrm_queue_pri2cos_qcfg_output resp;
1100 struct bnxt_en_dev *en_dev = rdev->en_dev;
1101 struct bnxt_fw_msg fw_msg;
1102 u32 flags = 0;
1103 u8 *qcfgmap, *tmp_map;
1104 int rc = 0, i;
1105
1106 if (!cid_map)
1107 return -EINVAL;
1108
1109 memset(&fw_msg, 0, sizeof(fw_msg));
1110 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1111 HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
1112 flags |= (dir & 0x01);
1113 flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
1114 req.flags = cpu_to_le32(flags);
1115 req.port_id = bp->pf.port_id;
1116
1117 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1118 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1119 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1120 if (rc)
1121 return rc;
1122
1123 if (resp.queue_cfg_info) {
1124 ibdev_warn(&rdev->ibdev,
1125 "Asymmetric cos queue configuration detected");
1126 ibdev_warn(&rdev->ibdev,
1127 " on device, QoS may not be fully functional\n");
1128 }
1129 qcfgmap = &resp.pri0_cos_queue_id;
1130 tmp_map = (u8 *)cid_map;
1131 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1132 tmp_map[i] = qcfgmap[i];
1133
1134 return rc;
1135 }
1136
1137 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1138 struct bnxt_re_qp *qp)
1139 {
1140 return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
1141 (qp == rdev->gsi_ctx.gsi_sqp);
1142 }
1143
1144 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1145 {
1146 int mask = IB_QP_STATE;
1147 struct ib_qp_attr qp_attr;
1148 struct bnxt_re_qp *qp;
1149
1150 qp_attr.qp_state = IB_QPS_ERR;
1151 mutex_lock(&rdev->qp_lock);
1152 list_for_each_entry(qp, &rdev->qp_list, list) {
1153
1154 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1155 if (qp->qplib_qp.state !=
1156 CMDQ_MODIFY_QP_NEW_STATE_RESET &&
1157 qp->qplib_qp.state !=
1158 CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1159 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1160 1, IB_EVENT_QP_FATAL);
1161 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
1162 NULL);
1163 }
1164 }
1165 }
1166 mutex_unlock(&rdev->qp_lock);
1167 }
1168
1169 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1170 {
1171 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1172 struct bnxt_qplib_gid gid;
1173 u16 gid_idx, index;
1174 int rc = 0;
1175
1176 if (!ib_device_try_get(&rdev->ibdev))
1177 return 0;
1178
1179 if (!sgid_tbl) {
1180 ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
1181 rc = -EINVAL;
1182 goto out;
1183 }
1184
1185 for (index = 0; index < sgid_tbl->active; index++) {
1186 gid_idx = sgid_tbl->hw_id[index];
1187
1188 if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
1189 sizeof(bnxt_qplib_gid_zero)))
1190 continue;
1191
1192
1193
1194 if (sgid_tbl->vlan[index])
1195 continue;
1196
1197 memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
1198
1199 rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
1200 rdev->qplib_res.netdev->dev_addr);
1201 }
1202 out:
1203 ib_device_put(&rdev->ibdev);
1204 return rc;
1205 }
1206
1207 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1208 {
1209 u32 prio_map = 0, tmp_map = 0;
1210 struct net_device *netdev;
1211 struct dcb_app app;
1212
1213 netdev = rdev->netdev;
1214
1215 memset(&app, 0, sizeof(app));
1216 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
1217 app.protocol = ETH_P_IBOE;
1218 tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1219 prio_map = tmp_map;
1220
1221 app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
1222 app.protocol = ROCE_V2_UDP_DPORT;
1223 tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1224 prio_map |= tmp_map;
1225
1226 return prio_map;
1227 }
1228
1229 static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq)
1230 {
1231 u16 prio;
1232 u8 id;
1233
1234 for (prio = 0, id = 0; prio < 8; prio++) {
1235 if (prio_map & (1 << prio)) {
1236 cosq[id] = cid_map[prio];
1237 id++;
1238 if (id == 2)
1239 break;
1240 }
1241 }
1242 }
1243
1244 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1245 {
1246 u8 prio_map = 0;
1247 u64 cid_map;
1248 int rc;
1249
1250
1251 prio_map = bnxt_re_get_priority_mask(rdev);
1252
1253 if (prio_map == rdev->cur_prio_map)
1254 return 0;
1255 rdev->cur_prio_map = prio_map;
1256
1257 rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
1258 if (rc) {
1259 ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map);
1260 return rc;
1261 }
1262
1263 bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq);
1264
1265
1266 rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
1267 if (rc) {
1268 ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n",
1269 rdev->cosq[0], rdev->cosq[1]);
1270 return rc;
1271 }
1272
1273
1274
1275
1276 if ((prio_map == 0 && rdev->qplib_res.prio) ||
1277 (prio_map != 0 && !rdev->qplib_res.prio)) {
1278 rdev->qplib_res.prio = prio_map ? true : false;
1279
1280 bnxt_re_update_gid(rdev);
1281 }
1282
1283 return 0;
1284 }
1285
1286 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1287 {
1288 struct bnxt_en_dev *en_dev = rdev->en_dev;
1289 struct hwrm_ver_get_output resp = {0};
1290 struct hwrm_ver_get_input req = {0};
1291 struct bnxt_fw_msg fw_msg;
1292 int rc = 0;
1293
1294 memset(&fw_msg, 0, sizeof(fw_msg));
1295 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1296 HWRM_VER_GET, -1, -1);
1297 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1298 req.hwrm_intf_min = HWRM_VERSION_MINOR;
1299 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1300 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1301 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1302 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1303 if (rc) {
1304 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
1305 rc);
1306 return;
1307 }
1308 rdev->qplib_ctx.hwrm_intf_ver =
1309 (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
1310 (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
1311 (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
1312 le16_to_cpu(resp.hwrm_intf_patch);
1313 }
1314
1315 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
1316 {
1317 int rc = 0;
1318 u32 event;
1319
1320
1321 rc = bnxt_re_register_ib(rdev);
1322 if (rc) {
1323 pr_err("Failed to register with IB: %#x\n", rc);
1324 return rc;
1325 }
1326 dev_info(rdev_to_dev(rdev), "Device registered successfully");
1327 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1328 &rdev->active_width);
1329 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1330
1331 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
1332 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
1333
1334 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
1335
1336 return rc;
1337 }
1338
1339 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
1340 {
1341 u8 type;
1342 int rc;
1343
1344 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1345 cancel_delayed_work_sync(&rdev->worker);
1346
1347 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
1348 &rdev->flags))
1349 bnxt_re_cleanup_res(rdev);
1350 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1351 bnxt_re_free_res(rdev);
1352
1353 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1354 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1355 if (rc)
1356 ibdev_warn(&rdev->ibdev,
1357 "Failed to deinitialize RCFW: %#x", rc);
1358 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1359 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1360 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1361 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1362 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1363 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1364 }
1365 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1366 rc = bnxt_re_free_msix(rdev);
1367 if (rc)
1368 ibdev_warn(&rdev->ibdev,
1369 "Failed to free MSI-X vectors: %#x", rc);
1370 }
1371
1372 bnxt_re_destroy_chip_ctx(rdev);
1373 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1374 rc = bnxt_re_unregister_netdev(rdev);
1375 if (rc)
1376 ibdev_warn(&rdev->ibdev,
1377 "Failed to unregister with netdev: %#x", rc);
1378 }
1379 }
1380
1381
1382 static void bnxt_re_worker(struct work_struct *work)
1383 {
1384 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1385 worker.work);
1386
1387 bnxt_re_setup_qos(rdev);
1388 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1389 }
1390
1391 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
1392 {
1393 struct bnxt_qplib_creq_ctx *creq;
1394 struct bnxt_re_ring_attr rattr;
1395 u32 db_offt;
1396 int vid;
1397 u8 type;
1398 int rc;
1399
1400
1401 memset(&rattr, 0, sizeof(rattr));
1402 rc = bnxt_re_register_netdev(rdev);
1403 if (rc) {
1404 ibdev_err(&rdev->ibdev,
1405 "Failed to register with netedev: %#x\n", rc);
1406 return -EINVAL;
1407 }
1408 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1409
1410 rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
1411 if (rc) {
1412 ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
1413 return -EINVAL;
1414 }
1415
1416
1417 bnxt_re_get_sriov_func_type(rdev);
1418
1419 rc = bnxt_re_request_msix(rdev);
1420 if (rc) {
1421 ibdev_err(&rdev->ibdev,
1422 "Failed to get MSI-X vectors: %#x\n", rc);
1423 rc = -EINVAL;
1424 goto fail;
1425 }
1426 set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
1427
1428 bnxt_re_query_hwrm_intf_version(rdev);
1429
1430
1431
1432
1433 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
1434 &rdev->qplib_ctx,
1435 BNXT_RE_MAX_QPC_COUNT);
1436 if (rc) {
1437 ibdev_err(&rdev->ibdev,
1438 "Failed to allocate RCFW Channel: %#x\n", rc);
1439 goto fail;
1440 }
1441
1442 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1443 creq = &rdev->rcfw.creq;
1444 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr;
1445 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
1446 rattr.type = type;
1447 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1448 rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
1449 rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
1450 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
1451 if (rc) {
1452 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
1453 goto free_rcfw;
1454 }
1455 db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
1456 vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
1457 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
1458 vid, db_offt, rdev->is_virtfn,
1459 &bnxt_re_aeq_handler);
1460 if (rc) {
1461 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
1462 rc);
1463 goto free_ring;
1464 }
1465
1466 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1467 rdev->is_virtfn);
1468 if (rc)
1469 goto disable_rcfw;
1470
1471 bnxt_re_set_resource_limits(rdev);
1472
1473 rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
1474 bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
1475 if (rc) {
1476 ibdev_err(&rdev->ibdev,
1477 "Failed to allocate QPLIB context: %#x\n", rc);
1478 goto disable_rcfw;
1479 }
1480 rc = bnxt_re_net_stats_ctx_alloc(rdev,
1481 rdev->qplib_ctx.stats.dma_map,
1482 &rdev->qplib_ctx.stats.fw_id);
1483 if (rc) {
1484 ibdev_err(&rdev->ibdev,
1485 "Failed to allocate stats context: %#x\n", rc);
1486 goto free_ctx;
1487 }
1488
1489 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1490 rdev->is_virtfn);
1491 if (rc) {
1492 ibdev_err(&rdev->ibdev,
1493 "Failed to initialize RCFW: %#x\n", rc);
1494 goto free_sctx;
1495 }
1496 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1497
1498
1499 rc = bnxt_re_alloc_res(rdev);
1500 if (rc) {
1501 ibdev_err(&rdev->ibdev,
1502 "Failed to allocate resources: %#x\n", rc);
1503 goto fail;
1504 }
1505 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1506 rc = bnxt_re_init_res(rdev);
1507 if (rc) {
1508 ibdev_err(&rdev->ibdev,
1509 "Failed to initialize resources: %#x\n", rc);
1510 goto fail;
1511 }
1512
1513 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1514
1515 if (!rdev->is_virtfn) {
1516 rc = bnxt_re_setup_qos(rdev);
1517 if (rc)
1518 ibdev_info(&rdev->ibdev,
1519 "RoCE priority not yet configured\n");
1520
1521 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1522 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1523 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1524 }
1525
1526 return 0;
1527 free_sctx:
1528 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1529 free_ctx:
1530 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1531 disable_rcfw:
1532 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1533 free_ring:
1534 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1535 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1536 free_rcfw:
1537 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1538 fail:
1539 bnxt_re_dev_uninit(rdev);
1540
1541 return rc;
1542 }
1543
1544 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
1545 {
1546 struct net_device *netdev = rdev->netdev;
1547
1548 bnxt_re_dev_remove(rdev);
1549
1550 if (netdev)
1551 dev_put(netdev);
1552 }
1553
1554 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
1555 {
1556 struct bnxt_en_dev *en_dev;
1557 int rc = 0;
1558
1559 if (!is_bnxt_re_dev(netdev))
1560 return -ENODEV;
1561
1562 en_dev = bnxt_re_dev_probe(netdev);
1563 if (IS_ERR(en_dev)) {
1564 if (en_dev != ERR_PTR(-ENODEV))
1565 ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n",
1566 ROCE_DRV_MODULE_NAME);
1567 rc = PTR_ERR(en_dev);
1568 goto exit;
1569 }
1570 *rdev = bnxt_re_dev_add(netdev, en_dev);
1571 if (!*rdev) {
1572 rc = -ENOMEM;
1573 dev_put(netdev);
1574 goto exit;
1575 }
1576 exit:
1577 return rc;
1578 }
1579
1580 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev)
1581 {
1582 bnxt_re_dev_uninit(rdev);
1583 pci_dev_put(rdev->en_dev->pdev);
1584 bnxt_re_dev_unreg(rdev);
1585 }
1586
1587 static int bnxt_re_add_device(struct bnxt_re_dev **rdev,
1588 struct net_device *netdev, u8 wqe_mode)
1589 {
1590 int rc;
1591
1592 rc = bnxt_re_dev_reg(rdev, netdev);
1593 if (rc == -ENODEV)
1594 return rc;
1595 if (rc) {
1596 pr_err("Failed to register with the device %s: %#x\n",
1597 netdev->name, rc);
1598 return rc;
1599 }
1600
1601 pci_dev_get((*rdev)->en_dev->pdev);
1602 rc = bnxt_re_dev_init(*rdev, wqe_mode);
1603 if (rc) {
1604 pci_dev_put((*rdev)->en_dev->pdev);
1605 bnxt_re_dev_unreg(*rdev);
1606 }
1607
1608 return rc;
1609 }
1610
1611 static void bnxt_re_dealloc_driver(struct ib_device *ib_dev)
1612 {
1613 struct bnxt_re_dev *rdev =
1614 container_of(ib_dev, struct bnxt_re_dev, ibdev);
1615
1616 dev_info(rdev_to_dev(rdev), "Unregistering Device");
1617
1618 rtnl_lock();
1619 bnxt_re_remove_device(rdev);
1620 rtnl_unlock();
1621 }
1622
1623
1624 static void bnxt_re_task(struct work_struct *work)
1625 {
1626 struct bnxt_re_work *re_work;
1627 struct bnxt_re_dev *rdev;
1628 int rc = 0;
1629
1630 re_work = container_of(work, struct bnxt_re_work, work);
1631 rdev = re_work->rdev;
1632
1633 if (re_work->event == NETDEV_REGISTER) {
1634 rc = bnxt_re_ib_init(rdev);
1635 if (rc) {
1636 ibdev_err(&rdev->ibdev,
1637 "Failed to register with IB: %#x", rc);
1638 rtnl_lock();
1639 bnxt_re_remove_device(rdev);
1640 rtnl_unlock();
1641 goto exit;
1642 }
1643 goto exit;
1644 }
1645
1646 if (!ib_device_try_get(&rdev->ibdev))
1647 goto exit;
1648
1649 switch (re_work->event) {
1650 case NETDEV_UP:
1651 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1652 IB_EVENT_PORT_ACTIVE);
1653 break;
1654 case NETDEV_DOWN:
1655 bnxt_re_dev_stop(rdev);
1656 break;
1657 case NETDEV_CHANGE:
1658 if (!netif_carrier_ok(rdev->netdev))
1659 bnxt_re_dev_stop(rdev);
1660 else if (netif_carrier_ok(rdev->netdev))
1661 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1662 IB_EVENT_PORT_ACTIVE);
1663 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1664 &rdev->active_width);
1665 break;
1666 default:
1667 break;
1668 }
1669 ib_device_put(&rdev->ibdev);
1670 exit:
1671 put_device(&rdev->ibdev.dev);
1672 kfree(re_work);
1673 }
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 static int bnxt_re_netdev_event(struct notifier_block *notifier,
1690 unsigned long event, void *ptr)
1691 {
1692 struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
1693 struct bnxt_re_work *re_work;
1694 struct bnxt_re_dev *rdev;
1695 int rc = 0;
1696 bool sch_work = false;
1697 bool release = true;
1698
1699 real_dev = rdma_vlan_dev_real_dev(netdev);
1700 if (!real_dev)
1701 real_dev = netdev;
1702
1703 rdev = bnxt_re_from_netdev(real_dev);
1704 if (!rdev && event != NETDEV_REGISTER)
1705 return NOTIFY_OK;
1706
1707 if (real_dev != netdev)
1708 goto exit;
1709
1710 switch (event) {
1711 case NETDEV_REGISTER:
1712 if (rdev)
1713 break;
1714 rc = bnxt_re_add_device(&rdev, real_dev,
1715 BNXT_QPLIB_WQE_MODE_STATIC);
1716 if (!rc)
1717 sch_work = true;
1718 release = false;
1719 break;
1720
1721 case NETDEV_UNREGISTER:
1722 ib_unregister_device_queued(&rdev->ibdev);
1723 break;
1724
1725 default:
1726 sch_work = true;
1727 break;
1728 }
1729 if (sch_work) {
1730
1731 re_work = kzalloc(sizeof(*re_work), GFP_KERNEL);
1732 if (re_work) {
1733 get_device(&rdev->ibdev.dev);
1734 re_work->rdev = rdev;
1735 re_work->event = event;
1736 re_work->vlan_dev = (real_dev == netdev ?
1737 NULL : netdev);
1738 INIT_WORK(&re_work->work, bnxt_re_task);
1739 queue_work(bnxt_re_wq, &re_work->work);
1740 }
1741 }
1742
1743 exit:
1744 if (rdev && release)
1745 ib_device_put(&rdev->ibdev);
1746 return NOTIFY_DONE;
1747 }
1748
1749 static struct notifier_block bnxt_re_netdev_notifier = {
1750 .notifier_call = bnxt_re_netdev_event
1751 };
1752
1753 static int __init bnxt_re_mod_init(void)
1754 {
1755 int rc = 0;
1756
1757 pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
1758
1759 bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
1760 if (!bnxt_re_wq)
1761 return -ENOMEM;
1762
1763 INIT_LIST_HEAD(&bnxt_re_dev_list);
1764
1765 rc = register_netdevice_notifier(&bnxt_re_netdev_notifier);
1766 if (rc) {
1767 pr_err("%s: Cannot register to netdevice_notifier",
1768 ROCE_DRV_MODULE_NAME);
1769 goto err_netdev;
1770 }
1771 return 0;
1772
1773 err_netdev:
1774 destroy_workqueue(bnxt_re_wq);
1775
1776 return rc;
1777 }
1778
1779 static void __exit bnxt_re_mod_exit(void)
1780 {
1781 struct bnxt_re_dev *rdev;
1782
1783 unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
1784 if (bnxt_re_wq)
1785 destroy_workqueue(bnxt_re_wq);
1786 list_for_each_entry(rdev, &bnxt_re_dev_list, list) {
1787
1788
1789
1790
1791
1792 if (rdev->is_virtfn)
1793 ib_unregister_device(&rdev->ibdev);
1794 }
1795 ib_unregister_driver(RDMA_DRIVER_BNXT_RE);
1796 }
1797
1798 module_init(bnxt_re_mod_init);
1799 module_exit(bnxt_re_mod_exit);