0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #include <linux/interrupt.h>
0040 #include <linux/types.h>
0041 #include <linux/pci.h>
0042 #include <linux/netdevice.h>
0043 #include <linux/if_ether.h>
0044 #include <net/addrconf.h>
0045
0046 #include <rdma/ib_verbs.h>
0047 #include <rdma/ib_user_verbs.h>
0048 #include <rdma/ib_umem.h>
0049 #include <rdma/ib_addr.h>
0050 #include <rdma/ib_mad.h>
0051 #include <rdma/ib_cache.h>
0052 #include <rdma/uverbs_ioctl.h>
0053
0054 #include "bnxt_ulp.h"
0055
0056 #include "roce_hsi.h"
0057 #include "qplib_res.h"
0058 #include "qplib_sp.h"
0059 #include "qplib_fp.h"
0060 #include "qplib_rcfw.h"
0061
0062 #include "bnxt_re.h"
0063 #include "ib_verbs.h"
0064 #include <rdma/bnxt_re-abi.h>
0065
0066 static int __from_ib_access_flags(int iflags)
0067 {
0068 int qflags = 0;
0069
0070 if (iflags & IB_ACCESS_LOCAL_WRITE)
0071 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
0072 if (iflags & IB_ACCESS_REMOTE_READ)
0073 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
0074 if (iflags & IB_ACCESS_REMOTE_WRITE)
0075 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
0076 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
0077 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
0078 if (iflags & IB_ACCESS_MW_BIND)
0079 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
0080 if (iflags & IB_ZERO_BASED)
0081 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
0082 if (iflags & IB_ACCESS_ON_DEMAND)
0083 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
0084 return qflags;
0085 };
0086
0087 static enum ib_access_flags __to_ib_access_flags(int qflags)
0088 {
0089 enum ib_access_flags iflags = 0;
0090
0091 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
0092 iflags |= IB_ACCESS_LOCAL_WRITE;
0093 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
0094 iflags |= IB_ACCESS_REMOTE_WRITE;
0095 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
0096 iflags |= IB_ACCESS_REMOTE_READ;
0097 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
0098 iflags |= IB_ACCESS_REMOTE_ATOMIC;
0099 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
0100 iflags |= IB_ACCESS_MW_BIND;
0101 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
0102 iflags |= IB_ZERO_BASED;
0103 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
0104 iflags |= IB_ACCESS_ON_DEMAND;
0105 return iflags;
0106 };
0107
0108 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
0109 struct bnxt_qplib_sge *sg_list, int num)
0110 {
0111 int i, total = 0;
0112
0113 for (i = 0; i < num; i++) {
0114 sg_list[i].addr = ib_sg_list[i].addr;
0115 sg_list[i].lkey = ib_sg_list[i].lkey;
0116 sg_list[i].size = ib_sg_list[i].length;
0117 total += sg_list[i].size;
0118 }
0119 return total;
0120 }
0121
0122
0123 int bnxt_re_query_device(struct ib_device *ibdev,
0124 struct ib_device_attr *ib_attr,
0125 struct ib_udata *udata)
0126 {
0127 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
0128 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
0129
0130 memset(ib_attr, 0, sizeof(*ib_attr));
0131 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
0132 min(sizeof(dev_attr->fw_ver),
0133 sizeof(ib_attr->fw_ver)));
0134 addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
0135 rdev->netdev->dev_addr);
0136 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
0137 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
0138
0139 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
0140 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
0141 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
0142 ib_attr->max_qp = dev_attr->max_qp;
0143 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
0144 ib_attr->device_cap_flags =
0145 IB_DEVICE_CURR_QP_STATE_MOD
0146 | IB_DEVICE_RC_RNR_NAK_GEN
0147 | IB_DEVICE_SHUTDOWN_PORT
0148 | IB_DEVICE_SYS_IMAGE_GUID
0149 | IB_DEVICE_RESIZE_MAX_WR
0150 | IB_DEVICE_PORT_ACTIVE_EVENT
0151 | IB_DEVICE_N_NOTIFY_CQ
0152 | IB_DEVICE_MEM_WINDOW
0153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
0154 | IB_DEVICE_MEM_MGT_EXTENSIONS;
0155 ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
0156 ib_attr->max_send_sge = dev_attr->max_qp_sges;
0157 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
0158 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
0159 ib_attr->max_cq = dev_attr->max_cq;
0160 ib_attr->max_cqe = dev_attr->max_cq_wqes;
0161 ib_attr->max_mr = dev_attr->max_mr;
0162 ib_attr->max_pd = dev_attr->max_pd;
0163 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
0164 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
0165 ib_attr->atomic_cap = IB_ATOMIC_NONE;
0166 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
0167 if (dev_attr->is_atomic) {
0168 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
0169 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
0170 }
0171
0172 ib_attr->max_ee_rd_atom = 0;
0173 ib_attr->max_res_rd_atom = 0;
0174 ib_attr->max_ee_init_rd_atom = 0;
0175 ib_attr->max_ee = 0;
0176 ib_attr->max_rdd = 0;
0177 ib_attr->max_mw = dev_attr->max_mw;
0178 ib_attr->max_raw_ipv6_qp = 0;
0179 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
0180 ib_attr->max_mcast_grp = 0;
0181 ib_attr->max_mcast_qp_attach = 0;
0182 ib_attr->max_total_mcast_qp_attach = 0;
0183 ib_attr->max_ah = dev_attr->max_ah;
0184
0185 ib_attr->max_srq = dev_attr->max_srq;
0186 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
0187 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
0188
0189 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
0190
0191 ib_attr->max_pkeys = 1;
0192 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
0193 return 0;
0194 }
0195
0196
0197 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
0198 struct ib_port_attr *port_attr)
0199 {
0200 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
0201 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
0202
0203 memset(port_attr, 0, sizeof(*port_attr));
0204
0205 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
0206 port_attr->state = IB_PORT_ACTIVE;
0207 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
0208 } else {
0209 port_attr->state = IB_PORT_DOWN;
0210 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
0211 }
0212 port_attr->max_mtu = IB_MTU_4096;
0213 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
0214 port_attr->gid_tbl_len = dev_attr->max_sgid;
0215 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
0216 IB_PORT_DEVICE_MGMT_SUP |
0217 IB_PORT_VENDOR_CLASS_SUP;
0218 port_attr->ip_gids = true;
0219
0220 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
0221 port_attr->bad_pkey_cntr = 0;
0222 port_attr->qkey_viol_cntr = 0;
0223 port_attr->pkey_tbl_len = dev_attr->max_pkey;
0224 port_attr->lid = 0;
0225 port_attr->sm_lid = 0;
0226 port_attr->lmc = 0;
0227 port_attr->max_vl_num = 4;
0228 port_attr->sm_sl = 0;
0229 port_attr->subnet_timeout = 0;
0230 port_attr->init_type_reply = 0;
0231 port_attr->active_speed = rdev->active_speed;
0232 port_attr->active_width = rdev->active_width;
0233
0234 return 0;
0235 }
0236
0237 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
0238 struct ib_port_immutable *immutable)
0239 {
0240 struct ib_port_attr port_attr;
0241
0242 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
0243 return -EINVAL;
0244
0245 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
0246 immutable->gid_tbl_len = port_attr.gid_tbl_len;
0247 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
0248 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
0249 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
0250 return 0;
0251 }
0252
0253 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
0254 {
0255 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
0256
0257 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
0258 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
0259 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
0260 }
0261
0262 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
0263 u16 index, u16 *pkey)
0264 {
0265 if (index > 0)
0266 return -EINVAL;
0267
0268 *pkey = IB_DEFAULT_PKEY_FULL;
0269
0270 return 0;
0271 }
0272
0273 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
0274 int index, union ib_gid *gid)
0275 {
0276 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
0277 int rc = 0;
0278
0279
0280 memset(gid, 0, sizeof(*gid));
0281 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
0282 &rdev->qplib_res.sgid_tbl, index,
0283 (struct bnxt_qplib_gid *)gid);
0284 return rc;
0285 }
0286
0287 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
0288 {
0289 int rc = 0;
0290 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
0291 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
0292 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
0293 struct bnxt_qplib_gid *gid_to_del;
0294 u16 vlan_id = 0xFFFF;
0295
0296
0297 ctx = *context;
0298 if (!ctx)
0299 return -EINVAL;
0300
0301 if (sgid_tbl && sgid_tbl->active) {
0302 if (ctx->idx >= sgid_tbl->max)
0303 return -EINVAL;
0304 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
0305 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
0306
0307
0308
0309
0310
0311
0312
0313
0314 if (ctx->idx == 0 &&
0315 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
0316 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
0317 ibdev_dbg(&rdev->ibdev,
0318 "Trying to delete GID0 while QP1 is alive\n");
0319 return -EFAULT;
0320 }
0321 ctx->refcnt--;
0322 if (!ctx->refcnt) {
0323 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
0324 vlan_id, true);
0325 if (rc) {
0326 ibdev_err(&rdev->ibdev,
0327 "Failed to remove GID: %#x", rc);
0328 } else {
0329 ctx_tbl = sgid_tbl->ctx;
0330 ctx_tbl[ctx->idx] = NULL;
0331 kfree(ctx);
0332 }
0333 }
0334 } else {
0335 return -EINVAL;
0336 }
0337 return rc;
0338 }
0339
0340 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
0341 {
0342 int rc;
0343 u32 tbl_idx = 0;
0344 u16 vlan_id = 0xFFFF;
0345 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
0346 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
0347 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
0348
0349 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
0350 if (rc)
0351 return rc;
0352
0353 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
0354 rdev->qplib_res.netdev->dev_addr,
0355 vlan_id, true, &tbl_idx);
0356 if (rc == -EALREADY) {
0357 ctx_tbl = sgid_tbl->ctx;
0358 ctx_tbl[tbl_idx]->refcnt++;
0359 *context = ctx_tbl[tbl_idx];
0360 return 0;
0361 }
0362
0363 if (rc < 0) {
0364 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
0365 return rc;
0366 }
0367
0368 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
0369 if (!ctx)
0370 return -ENOMEM;
0371 ctx_tbl = sgid_tbl->ctx;
0372 ctx->idx = tbl_idx;
0373 ctx->refcnt = 1;
0374 ctx_tbl[tbl_idx] = ctx;
0375 *context = ctx;
0376
0377 return rc;
0378 }
0379
0380 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
0381 u32 port_num)
0382 {
0383 return IB_LINK_LAYER_ETHERNET;
0384 }
0385
0386 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
0387
0388 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
0389 {
0390 struct bnxt_re_fence_data *fence = &pd->fence;
0391 struct ib_mr *ib_mr = &fence->mr->ib_mr;
0392 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
0393
0394 memset(wqe, 0, sizeof(*wqe));
0395 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
0396 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
0397 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
0398 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
0399 wqe->bind.zero_based = false;
0400 wqe->bind.parent_l_key = ib_mr->lkey;
0401 wqe->bind.va = (u64)(unsigned long)fence->va;
0402 wqe->bind.length = fence->size;
0403 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
0404 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
0405
0406
0407
0408
0409 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
0410 }
0411
0412 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
0413 {
0414 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
0415 qplib_qp);
0416 struct ib_pd *ib_pd = qp->ib_qp.pd;
0417 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
0418 struct bnxt_re_fence_data *fence = &pd->fence;
0419 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
0420 struct bnxt_qplib_swqe wqe;
0421 int rc;
0422
0423 memcpy(&wqe, fence_wqe, sizeof(wqe));
0424 wqe.bind.r_key = fence->bind_rkey;
0425 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
0426
0427 ibdev_dbg(&qp->rdev->ibdev,
0428 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
0429 wqe.bind.r_key, qp->qplib_qp.id, pd);
0430 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
0431 if (rc) {
0432 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
0433 return rc;
0434 }
0435 bnxt_qplib_post_send_db(&qp->qplib_qp);
0436
0437 return rc;
0438 }
0439
0440 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
0441 {
0442 struct bnxt_re_fence_data *fence = &pd->fence;
0443 struct bnxt_re_dev *rdev = pd->rdev;
0444 struct device *dev = &rdev->en_dev->pdev->dev;
0445 struct bnxt_re_mr *mr = fence->mr;
0446
0447 if (fence->mw) {
0448 bnxt_re_dealloc_mw(fence->mw);
0449 fence->mw = NULL;
0450 }
0451 if (mr) {
0452 if (mr->ib_mr.rkey)
0453 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
0454 true);
0455 if (mr->ib_mr.lkey)
0456 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
0457 kfree(mr);
0458 fence->mr = NULL;
0459 }
0460 if (fence->dma_addr) {
0461 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
0462 DMA_BIDIRECTIONAL);
0463 fence->dma_addr = 0;
0464 }
0465 }
0466
0467 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
0468 {
0469 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
0470 struct bnxt_re_fence_data *fence = &pd->fence;
0471 struct bnxt_re_dev *rdev = pd->rdev;
0472 struct device *dev = &rdev->en_dev->pdev->dev;
0473 struct bnxt_re_mr *mr = NULL;
0474 dma_addr_t dma_addr = 0;
0475 struct ib_mw *mw;
0476 int rc;
0477
0478 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
0479 DMA_BIDIRECTIONAL);
0480 rc = dma_mapping_error(dev, dma_addr);
0481 if (rc) {
0482 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
0483 rc = -EIO;
0484 fence->dma_addr = 0;
0485 goto fail;
0486 }
0487 fence->dma_addr = dma_addr;
0488
0489
0490 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0491 if (!mr) {
0492 rc = -ENOMEM;
0493 goto fail;
0494 }
0495 fence->mr = mr;
0496 mr->rdev = rdev;
0497 mr->qplib_mr.pd = &pd->qplib_pd;
0498 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
0499 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
0500 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
0501 if (rc) {
0502 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
0503 goto fail;
0504 }
0505
0506
0507 mr->ib_mr.lkey = mr->qplib_mr.lkey;
0508 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
0509 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
0510 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
0511 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
0512 if (rc) {
0513 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
0514 goto fail;
0515 }
0516 mr->ib_mr.rkey = mr->qplib_mr.rkey;
0517
0518
0519 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
0520 if (IS_ERR(mw)) {
0521 ibdev_err(&rdev->ibdev,
0522 "Failed to create fence-MW for PD: %p\n", pd);
0523 rc = PTR_ERR(mw);
0524 goto fail;
0525 }
0526 fence->mw = mw;
0527
0528 bnxt_re_create_fence_wqe(pd);
0529 return 0;
0530
0531 fail:
0532 bnxt_re_destroy_fence_mr(pd);
0533 return rc;
0534 }
0535
0536
0537 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
0538 {
0539 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
0540 struct bnxt_re_dev *rdev = pd->rdev;
0541
0542 bnxt_re_destroy_fence_mr(pd);
0543
0544 if (pd->qplib_pd.id) {
0545 if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
0546 &rdev->qplib_res.pd_tbl,
0547 &pd->qplib_pd))
0548 atomic_dec(&rdev->pd_count);
0549 }
0550 return 0;
0551 }
0552
0553 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
0554 {
0555 struct ib_device *ibdev = ibpd->device;
0556 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
0557 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
0558 udata, struct bnxt_re_ucontext, ib_uctx);
0559 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
0560 int rc;
0561
0562 pd->rdev = rdev;
0563 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
0564 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
0565 rc = -ENOMEM;
0566 goto fail;
0567 }
0568
0569 if (udata) {
0570 struct bnxt_re_pd_resp resp;
0571
0572 if (!ucntx->dpi.dbr) {
0573
0574
0575
0576
0577 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
0578 &ucntx->dpi, ucntx)) {
0579 rc = -ENOMEM;
0580 goto dbfail;
0581 }
0582 }
0583
0584 resp.pdid = pd->qplib_pd.id;
0585
0586 resp.dpi = ucntx->dpi.dpi;
0587 resp.dbr = (u64)ucntx->dpi.umdbr;
0588
0589 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
0590 if (rc) {
0591 ibdev_err(&rdev->ibdev,
0592 "Failed to copy user response\n");
0593 goto dbfail;
0594 }
0595 }
0596
0597 if (!udata)
0598 if (bnxt_re_create_fence_mr(pd))
0599 ibdev_warn(&rdev->ibdev,
0600 "Failed to create Fence-MR\n");
0601 atomic_inc(&rdev->pd_count);
0602
0603 return 0;
0604 dbfail:
0605 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
0606 &pd->qplib_pd);
0607 fail:
0608 return rc;
0609 }
0610
0611
0612 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
0613 {
0614 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
0615 struct bnxt_re_dev *rdev = ah->rdev;
0616
0617 bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
0618 !(flags & RDMA_DESTROY_AH_SLEEPABLE));
0619 atomic_dec(&rdev->ah_count);
0620
0621 return 0;
0622 }
0623
0624 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
0625 {
0626 u8 nw_type;
0627
0628 switch (ntype) {
0629 case RDMA_NETWORK_IPV4:
0630 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
0631 break;
0632 case RDMA_NETWORK_IPV6:
0633 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
0634 break;
0635 default:
0636 nw_type = CMDQ_CREATE_AH_TYPE_V1;
0637 break;
0638 }
0639 return nw_type;
0640 }
0641
0642 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
0643 struct ib_udata *udata)
0644 {
0645 struct ib_pd *ib_pd = ib_ah->pd;
0646 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
0647 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
0648 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
0649 struct bnxt_re_dev *rdev = pd->rdev;
0650 const struct ib_gid_attr *sgid_attr;
0651 struct bnxt_re_gid_ctx *ctx;
0652 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
0653 u8 nw_type;
0654 int rc;
0655
0656 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
0657 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
0658 return -EINVAL;
0659 }
0660
0661 ah->rdev = rdev;
0662 ah->qplib_ah.pd = &pd->qplib_pd;
0663
0664
0665 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
0666 sizeof(union ib_gid));
0667 sgid_attr = grh->sgid_attr;
0668
0669
0670
0671 ctx = rdma_read_gid_hw_context(sgid_attr);
0672 ah->qplib_ah.sgid_index = ctx->idx;
0673 ah->qplib_ah.host_sgid_index = grh->sgid_index;
0674 ah->qplib_ah.traffic_class = grh->traffic_class;
0675 ah->qplib_ah.flow_label = grh->flow_label;
0676 ah->qplib_ah.hop_limit = grh->hop_limit;
0677 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
0678
0679
0680 nw_type = rdma_gid_attr_network_type(sgid_attr);
0681 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
0682
0683 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
0684 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
0685 !(init_attr->flags &
0686 RDMA_CREATE_AH_SLEEPABLE));
0687 if (rc) {
0688 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
0689 return rc;
0690 }
0691
0692
0693 if (udata) {
0694 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
0695 udata, struct bnxt_re_ucontext, ib_uctx);
0696 unsigned long flag;
0697 u32 *wrptr;
0698
0699 spin_lock_irqsave(&uctx->sh_lock, flag);
0700 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
0701 *wrptr = ah->qplib_ah.id;
0702 wmb();
0703 spin_unlock_irqrestore(&uctx->sh_lock, flag);
0704 }
0705 atomic_inc(&rdev->ah_count);
0706
0707 return 0;
0708 }
0709
0710 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
0711 {
0712 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
0713
0714 ah_attr->type = ib_ah->type;
0715 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
0716 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
0717 rdma_ah_set_grh(ah_attr, NULL, 0,
0718 ah->qplib_ah.host_sgid_index,
0719 0, ah->qplib_ah.traffic_class);
0720 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
0721 rdma_ah_set_port_num(ah_attr, 1);
0722 rdma_ah_set_static_rate(ah_attr, 0);
0723 return 0;
0724 }
0725
0726 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
0727 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
0728 {
0729 unsigned long flags;
0730
0731 spin_lock_irqsave(&qp->scq->cq_lock, flags);
0732 if (qp->rcq != qp->scq)
0733 spin_lock(&qp->rcq->cq_lock);
0734 else
0735 __acquire(&qp->rcq->cq_lock);
0736
0737 return flags;
0738 }
0739
0740 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
0741 unsigned long flags)
0742 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
0743 {
0744 if (qp->rcq != qp->scq)
0745 spin_unlock(&qp->rcq->cq_lock);
0746 else
0747 __release(&qp->rcq->cq_lock);
0748 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
0749 }
0750
0751 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
0752 {
0753 struct bnxt_re_qp *gsi_sqp;
0754 struct bnxt_re_ah *gsi_sah;
0755 struct bnxt_re_dev *rdev;
0756 int rc = 0;
0757
0758 rdev = qp->rdev;
0759 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
0760 gsi_sah = rdev->gsi_ctx.gsi_sah;
0761
0762 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
0763 bnxt_qplib_destroy_ah(&rdev->qplib_res,
0764 &gsi_sah->qplib_ah,
0765 true);
0766 atomic_dec(&rdev->ah_count);
0767 bnxt_qplib_clean_qp(&qp->qplib_qp);
0768
0769 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
0770 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
0771 if (rc) {
0772 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
0773 goto fail;
0774 }
0775 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
0776
0777
0778 mutex_lock(&rdev->qp_lock);
0779 list_del(&gsi_sqp->list);
0780 mutex_unlock(&rdev->qp_lock);
0781 atomic_dec(&rdev->qp_count);
0782
0783 kfree(rdev->gsi_ctx.sqp_tbl);
0784 kfree(gsi_sah);
0785 kfree(gsi_sqp);
0786 rdev->gsi_ctx.gsi_sqp = NULL;
0787 rdev->gsi_ctx.gsi_sah = NULL;
0788 rdev->gsi_ctx.sqp_tbl = NULL;
0789
0790 return 0;
0791 fail:
0792 return rc;
0793 }
0794
0795
0796 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
0797 {
0798 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
0799 struct bnxt_re_dev *rdev = qp->rdev;
0800 unsigned int flags;
0801 int rc;
0802
0803 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
0804
0805 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
0806 if (rc) {
0807 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
0808 return rc;
0809 }
0810
0811 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
0812 flags = bnxt_re_lock_cqs(qp);
0813 bnxt_qplib_clean_qp(&qp->qplib_qp);
0814 bnxt_re_unlock_cqs(qp, flags);
0815 }
0816
0817 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
0818
0819 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
0820 rc = bnxt_re_destroy_gsi_sqp(qp);
0821 if (rc)
0822 return rc;
0823 }
0824
0825 mutex_lock(&rdev->qp_lock);
0826 list_del(&qp->list);
0827 mutex_unlock(&rdev->qp_lock);
0828 atomic_dec(&rdev->qp_count);
0829
0830 ib_umem_release(qp->rumem);
0831 ib_umem_release(qp->sumem);
0832
0833 return 0;
0834 }
0835
0836 static u8 __from_ib_qp_type(enum ib_qp_type type)
0837 {
0838 switch (type) {
0839 case IB_QPT_GSI:
0840 return CMDQ_CREATE_QP1_TYPE_GSI;
0841 case IB_QPT_RC:
0842 return CMDQ_CREATE_QP_TYPE_RC;
0843 case IB_QPT_UD:
0844 return CMDQ_CREATE_QP_TYPE_UD;
0845 default:
0846 return IB_QPT_MAX;
0847 }
0848 }
0849
0850 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
0851 int rsge, int max)
0852 {
0853 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
0854 rsge = max;
0855 return bnxt_re_get_rwqe_size(rsge);
0856 }
0857
0858 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
0859 {
0860 u16 wqe_size, calc_ils;
0861
0862 wqe_size = bnxt_re_get_swqe_size(nsge);
0863 if (ilsize) {
0864 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
0865 wqe_size = max_t(u16, calc_ils, wqe_size);
0866 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
0867 }
0868 return wqe_size;
0869 }
0870
0871 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
0872 struct ib_qp_init_attr *init_attr)
0873 {
0874 struct bnxt_qplib_dev_attr *dev_attr;
0875 struct bnxt_qplib_qp *qplqp;
0876 struct bnxt_re_dev *rdev;
0877 struct bnxt_qplib_q *sq;
0878 int align, ilsize;
0879
0880 rdev = qp->rdev;
0881 qplqp = &qp->qplib_qp;
0882 sq = &qplqp->sq;
0883 dev_attr = &rdev->dev_attr;
0884
0885 align = sizeof(struct sq_send_hdr);
0886 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
0887
0888 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
0889 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
0890 return -EINVAL;
0891
0892
0893
0894 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
0895 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
0896 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
0897
0898 if (init_attr->cap.max_inline_data) {
0899 qplqp->max_inline_data = sq->wqe_size -
0900 sizeof(struct sq_send_hdr);
0901 init_attr->cap.max_inline_data = qplqp->max_inline_data;
0902 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
0903 sq->max_sge = qplqp->max_inline_data /
0904 sizeof(struct sq_sge);
0905 }
0906
0907 return 0;
0908 }
0909
0910 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
0911 struct bnxt_re_qp *qp, struct ib_udata *udata)
0912 {
0913 struct bnxt_qplib_qp *qplib_qp;
0914 struct bnxt_re_ucontext *cntx;
0915 struct bnxt_re_qp_req ureq;
0916 int bytes = 0, psn_sz;
0917 struct ib_umem *umem;
0918 int psn_nume;
0919
0920 qplib_qp = &qp->qplib_qp;
0921 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
0922 ib_uctx);
0923 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
0924 return -EFAULT;
0925
0926 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
0927
0928 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
0929 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
0930 sizeof(struct sq_psn_search_ext) :
0931 sizeof(struct sq_psn_search);
0932 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
0933 qplib_qp->sq.max_wqe :
0934 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
0935 sizeof(struct bnxt_qplib_sge));
0936 bytes += (psn_nume * psn_sz);
0937 }
0938
0939 bytes = PAGE_ALIGN(bytes);
0940 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
0941 IB_ACCESS_LOCAL_WRITE);
0942 if (IS_ERR(umem))
0943 return PTR_ERR(umem);
0944
0945 qp->sumem = umem;
0946 qplib_qp->sq.sg_info.umem = umem;
0947 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
0948 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
0949 qplib_qp->qp_handle = ureq.qp_handle;
0950
0951 if (!qp->qplib_qp.srq) {
0952 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
0953 bytes = PAGE_ALIGN(bytes);
0954 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
0955 IB_ACCESS_LOCAL_WRITE);
0956 if (IS_ERR(umem))
0957 goto rqfail;
0958 qp->rumem = umem;
0959 qplib_qp->rq.sg_info.umem = umem;
0960 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
0961 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
0962 }
0963
0964 qplib_qp->dpi = &cntx->dpi;
0965 return 0;
0966 rqfail:
0967 ib_umem_release(qp->sumem);
0968 qp->sumem = NULL;
0969 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
0970
0971 return PTR_ERR(umem);
0972 }
0973
0974 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
0975 (struct bnxt_re_pd *pd,
0976 struct bnxt_qplib_res *qp1_res,
0977 struct bnxt_qplib_qp *qp1_qp)
0978 {
0979 struct bnxt_re_dev *rdev = pd->rdev;
0980 struct bnxt_re_ah *ah;
0981 union ib_gid sgid;
0982 int rc;
0983
0984 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
0985 if (!ah)
0986 return NULL;
0987
0988 ah->rdev = rdev;
0989 ah->qplib_ah.pd = &pd->qplib_pd;
0990
0991 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
0992 if (rc)
0993 goto fail;
0994
0995
0996 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
0997 sizeof(union ib_gid));
0998 ah->qplib_ah.sgid_index = 0;
0999
1000 ah->qplib_ah.traffic_class = 0;
1001 ah->qplib_ah.flow_label = 0;
1002 ah->qplib_ah.hop_limit = 1;
1003 ah->qplib_ah.sl = 0;
1004
1005 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1006
1007 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1008 if (rc) {
1009 ibdev_err(&rdev->ibdev,
1010 "Failed to allocate HW AH for Shadow QP");
1011 goto fail;
1012 }
1013 atomic_inc(&rdev->ah_count);
1014
1015 return ah;
1016
1017 fail:
1018 kfree(ah);
1019 return NULL;
1020 }
1021
1022 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1023 (struct bnxt_re_pd *pd,
1024 struct bnxt_qplib_res *qp1_res,
1025 struct bnxt_qplib_qp *qp1_qp)
1026 {
1027 struct bnxt_re_dev *rdev = pd->rdev;
1028 struct bnxt_re_qp *qp;
1029 int rc;
1030
1031 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1032 if (!qp)
1033 return NULL;
1034
1035 qp->rdev = rdev;
1036
1037
1038 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1039
1040 qp->qplib_qp.pd = &pd->qplib_pd;
1041 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1042 qp->qplib_qp.type = IB_QPT_UD;
1043
1044 qp->qplib_qp.max_inline_data = 0;
1045 qp->qplib_qp.sig_type = true;
1046
1047
1048 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1049 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1050 qp->qplib_qp.sq.max_sge = 2;
1051
1052 qp->qplib_qp.sq.q_full_delta = 1;
1053 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1054 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1055
1056 qp->qplib_qp.scq = qp1_qp->scq;
1057 qp->qplib_qp.rcq = qp1_qp->rcq;
1058
1059 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1060 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1061 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1062
1063 qp->qplib_qp.rq.q_full_delta = 1;
1064 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1065 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1066
1067 qp->qplib_qp.mtu = qp1_qp->mtu;
1068
1069 qp->qplib_qp.sq_hdr_buf_size = 0;
1070 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1071 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1072
1073 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1074 if (rc)
1075 goto fail;
1076
1077 spin_lock_init(&qp->sq_lock);
1078 INIT_LIST_HEAD(&qp->list);
1079 mutex_lock(&rdev->qp_lock);
1080 list_add_tail(&qp->list, &rdev->qp_list);
1081 atomic_inc(&rdev->qp_count);
1082 mutex_unlock(&rdev->qp_lock);
1083 return qp;
1084 fail:
1085 kfree(qp);
1086 return NULL;
1087 }
1088
1089 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1090 struct ib_qp_init_attr *init_attr)
1091 {
1092 struct bnxt_qplib_dev_attr *dev_attr;
1093 struct bnxt_qplib_qp *qplqp;
1094 struct bnxt_re_dev *rdev;
1095 struct bnxt_qplib_q *rq;
1096 int entries;
1097
1098 rdev = qp->rdev;
1099 qplqp = &qp->qplib_qp;
1100 rq = &qplqp->rq;
1101 dev_attr = &rdev->dev_attr;
1102
1103 if (init_attr->srq) {
1104 struct bnxt_re_srq *srq;
1105
1106 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1107 qplqp->srq = &srq->qplib_srq;
1108 rq->max_wqe = 0;
1109 } else {
1110 rq->max_sge = init_attr->cap.max_recv_sge;
1111 if (rq->max_sge > dev_attr->max_qp_sges)
1112 rq->max_sge = dev_attr->max_qp_sges;
1113 init_attr->cap.max_recv_sge = rq->max_sge;
1114 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1115 dev_attr->max_qp_sges);
1116
1117
1118
1119 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1120 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1121 rq->q_full_delta = 0;
1122 rq->sg_info.pgsize = PAGE_SIZE;
1123 rq->sg_info.pgshft = PAGE_SHIFT;
1124 }
1125
1126 return 0;
1127 }
1128
1129 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1130 {
1131 struct bnxt_qplib_dev_attr *dev_attr;
1132 struct bnxt_qplib_qp *qplqp;
1133 struct bnxt_re_dev *rdev;
1134
1135 rdev = qp->rdev;
1136 qplqp = &qp->qplib_qp;
1137 dev_attr = &rdev->dev_attr;
1138
1139 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1140 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1141 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1142 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1143 qplqp->rq.max_sge = 6;
1144 }
1145 }
1146
1147 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1148 struct ib_qp_init_attr *init_attr,
1149 struct ib_udata *udata)
1150 {
1151 struct bnxt_qplib_dev_attr *dev_attr;
1152 struct bnxt_qplib_qp *qplqp;
1153 struct bnxt_re_dev *rdev;
1154 struct bnxt_qplib_q *sq;
1155 int entries;
1156 int diff;
1157 int rc;
1158
1159 rdev = qp->rdev;
1160 qplqp = &qp->qplib_qp;
1161 sq = &qplqp->sq;
1162 dev_attr = &rdev->dev_attr;
1163
1164 sq->max_sge = init_attr->cap.max_send_sge;
1165 if (sq->max_sge > dev_attr->max_qp_sges) {
1166 sq->max_sge = dev_attr->max_qp_sges;
1167 init_attr->cap.max_send_sge = sq->max_sge;
1168 }
1169
1170 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1171 if (rc)
1172 return rc;
1173
1174 entries = init_attr->cap.max_send_wr;
1175
1176 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1177 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1178 entries = roundup_pow_of_two(entries + diff + 1);
1179 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1180 sq->q_full_delta = diff + 1;
1181
1182
1183
1184
1185
1186 qplqp->sq.q_full_delta -= 1;
1187 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1188 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1189
1190 return 0;
1191 }
1192
1193 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1194 struct ib_qp_init_attr *init_attr)
1195 {
1196 struct bnxt_qplib_dev_attr *dev_attr;
1197 struct bnxt_qplib_qp *qplqp;
1198 struct bnxt_re_dev *rdev;
1199 int entries;
1200
1201 rdev = qp->rdev;
1202 qplqp = &qp->qplib_qp;
1203 dev_attr = &rdev->dev_attr;
1204
1205 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1206 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1207 qplqp->sq.max_wqe = min_t(u32, entries,
1208 dev_attr->max_qp_wqes + 1);
1209 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1210 init_attr->cap.max_send_wr;
1211 qplqp->sq.max_sge++;
1212 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1213 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1214 }
1215 }
1216
1217 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1218 struct ib_qp_init_attr *init_attr)
1219 {
1220 struct bnxt_qplib_chip_ctx *chip_ctx;
1221 int qptype;
1222
1223 chip_ctx = rdev->chip_ctx;
1224
1225 qptype = __from_ib_qp_type(init_attr->qp_type);
1226 if (qptype == IB_QPT_MAX) {
1227 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1228 qptype = -EOPNOTSUPP;
1229 goto out;
1230 }
1231
1232 if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1233 init_attr->qp_type == IB_QPT_GSI)
1234 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1235 out:
1236 return qptype;
1237 }
1238
1239 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1240 struct ib_qp_init_attr *init_attr,
1241 struct ib_udata *udata)
1242 {
1243 struct bnxt_qplib_dev_attr *dev_attr;
1244 struct bnxt_qplib_qp *qplqp;
1245 struct bnxt_re_dev *rdev;
1246 struct bnxt_re_cq *cq;
1247 int rc = 0, qptype;
1248
1249 rdev = qp->rdev;
1250 qplqp = &qp->qplib_qp;
1251 dev_attr = &rdev->dev_attr;
1252
1253
1254 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1255 qplqp->pd = &pd->qplib_pd;
1256 qplqp->qp_handle = (u64)qplqp;
1257 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1258 qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
1259 true : false);
1260 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1261 if (qptype < 0) {
1262 rc = qptype;
1263 goto out;
1264 }
1265 qplqp->type = (u8)qptype;
1266 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1267
1268 if (init_attr->qp_type == IB_QPT_RC) {
1269 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1270 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1271 }
1272 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1273 qplqp->dpi = &rdev->dpi_privileged;
1274 if (init_attr->create_flags) {
1275 ibdev_dbg(&rdev->ibdev,
1276 "QP create flags 0x%x not supported",
1277 init_attr->create_flags);
1278 return -EOPNOTSUPP;
1279 }
1280
1281
1282 if (init_attr->send_cq) {
1283 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1284 qplqp->scq = &cq->qplib_cq;
1285 qp->scq = cq;
1286 }
1287
1288 if (init_attr->recv_cq) {
1289 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1290 qplqp->rcq = &cq->qplib_cq;
1291 qp->rcq = cq;
1292 }
1293
1294
1295 rc = bnxt_re_init_rq_attr(qp, init_attr);
1296 if (rc)
1297 goto out;
1298 if (init_attr->qp_type == IB_QPT_GSI)
1299 bnxt_re_adjust_gsi_rq_attr(qp);
1300
1301
1302 rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
1303 if (rc)
1304 goto out;
1305 if (init_attr->qp_type == IB_QPT_GSI)
1306 bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1307
1308 if (udata)
1309 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1310 out:
1311 return rc;
1312 }
1313
1314 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1315 struct bnxt_re_pd *pd)
1316 {
1317 struct bnxt_re_sqp_entries *sqp_tbl;
1318 struct bnxt_re_dev *rdev;
1319 struct bnxt_re_qp *sqp;
1320 struct bnxt_re_ah *sah;
1321 int rc = 0;
1322
1323 rdev = qp->rdev;
1324
1325 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1326 GFP_KERNEL);
1327 if (!sqp_tbl)
1328 return -ENOMEM;
1329 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1330
1331 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1332 if (!sqp) {
1333 rc = -ENODEV;
1334 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1335 goto out;
1336 }
1337 rdev->gsi_ctx.gsi_sqp = sqp;
1338
1339 sqp->rcq = qp->rcq;
1340 sqp->scq = qp->scq;
1341 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1342 &qp->qplib_qp);
1343 if (!sah) {
1344 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1345 &sqp->qplib_qp);
1346 rc = -ENODEV;
1347 ibdev_err(&rdev->ibdev,
1348 "Failed to create AH entry for ShadowQP");
1349 goto out;
1350 }
1351 rdev->gsi_ctx.gsi_sah = sah;
1352
1353 return 0;
1354 out:
1355 kfree(sqp_tbl);
1356 return rc;
1357 }
1358
1359 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1360 struct ib_qp_init_attr *init_attr)
1361 {
1362 struct bnxt_re_dev *rdev;
1363 struct bnxt_qplib_qp *qplqp;
1364 int rc = 0;
1365
1366 rdev = qp->rdev;
1367 qplqp = &qp->qplib_qp;
1368
1369 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1370 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1371
1372 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1373 if (rc) {
1374 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1375 goto out;
1376 }
1377
1378 rc = bnxt_re_create_shadow_gsi(qp, pd);
1379 out:
1380 return rc;
1381 }
1382
1383 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1384 struct ib_qp_init_attr *init_attr,
1385 struct bnxt_qplib_dev_attr *dev_attr)
1386 {
1387 bool rc = true;
1388
1389 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1390 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1391 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1392 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1393 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1394 ibdev_err(&rdev->ibdev,
1395 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1396 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1397 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1398 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1399 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1400 init_attr->cap.max_inline_data,
1401 dev_attr->max_inline_data);
1402 rc = false;
1403 }
1404 return rc;
1405 }
1406
1407 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1408 struct ib_udata *udata)
1409 {
1410 struct ib_pd *ib_pd = ib_qp->pd;
1411 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1412 struct bnxt_re_dev *rdev = pd->rdev;
1413 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1414 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1415 int rc;
1416
1417 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1418 if (!rc) {
1419 rc = -EINVAL;
1420 goto fail;
1421 }
1422
1423 qp->rdev = rdev;
1424 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1425 if (rc)
1426 goto fail;
1427
1428 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1429 !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1430 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1431 if (rc == -ENODEV)
1432 goto qp_destroy;
1433 if (rc)
1434 goto fail;
1435 } else {
1436 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1437 if (rc) {
1438 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1439 goto free_umem;
1440 }
1441 if (udata) {
1442 struct bnxt_re_qp_resp resp;
1443
1444 resp.qpid = qp->qplib_qp.id;
1445 resp.rsvd = 0;
1446 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1447 if (rc) {
1448 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1449 goto qp_destroy;
1450 }
1451 }
1452 }
1453
1454 qp->ib_qp.qp_num = qp->qplib_qp.id;
1455 if (qp_init_attr->qp_type == IB_QPT_GSI)
1456 rdev->gsi_ctx.gsi_qp = qp;
1457 spin_lock_init(&qp->sq_lock);
1458 spin_lock_init(&qp->rq_lock);
1459 INIT_LIST_HEAD(&qp->list);
1460 mutex_lock(&rdev->qp_lock);
1461 list_add_tail(&qp->list, &rdev->qp_list);
1462 mutex_unlock(&rdev->qp_lock);
1463 atomic_inc(&rdev->qp_count);
1464
1465 return 0;
1466 qp_destroy:
1467 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1468 free_umem:
1469 ib_umem_release(qp->rumem);
1470 ib_umem_release(qp->sumem);
1471 fail:
1472 return rc;
1473 }
1474
1475 static u8 __from_ib_qp_state(enum ib_qp_state state)
1476 {
1477 switch (state) {
1478 case IB_QPS_RESET:
1479 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1480 case IB_QPS_INIT:
1481 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1482 case IB_QPS_RTR:
1483 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1484 case IB_QPS_RTS:
1485 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1486 case IB_QPS_SQD:
1487 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1488 case IB_QPS_SQE:
1489 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1490 case IB_QPS_ERR:
1491 default:
1492 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1493 }
1494 }
1495
1496 static enum ib_qp_state __to_ib_qp_state(u8 state)
1497 {
1498 switch (state) {
1499 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1500 return IB_QPS_RESET;
1501 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1502 return IB_QPS_INIT;
1503 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1504 return IB_QPS_RTR;
1505 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1506 return IB_QPS_RTS;
1507 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1508 return IB_QPS_SQD;
1509 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1510 return IB_QPS_SQE;
1511 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1512 default:
1513 return IB_QPS_ERR;
1514 }
1515 }
1516
1517 static u32 __from_ib_mtu(enum ib_mtu mtu)
1518 {
1519 switch (mtu) {
1520 case IB_MTU_256:
1521 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1522 case IB_MTU_512:
1523 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1524 case IB_MTU_1024:
1525 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1526 case IB_MTU_2048:
1527 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1528 case IB_MTU_4096:
1529 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1530 default:
1531 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1532 }
1533 }
1534
1535 static enum ib_mtu __to_ib_mtu(u32 mtu)
1536 {
1537 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1538 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1539 return IB_MTU_256;
1540 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1541 return IB_MTU_512;
1542 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1543 return IB_MTU_1024;
1544 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1545 return IB_MTU_2048;
1546 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1547 return IB_MTU_4096;
1548 default:
1549 return IB_MTU_2048;
1550 }
1551 }
1552
1553
1554 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1555 {
1556 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1557 ib_srq);
1558 struct bnxt_re_dev *rdev = srq->rdev;
1559 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1560 struct bnxt_qplib_nq *nq = NULL;
1561
1562 if (qplib_srq->cq)
1563 nq = qplib_srq->cq->nq;
1564 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1565 ib_umem_release(srq->umem);
1566 atomic_dec(&rdev->srq_count);
1567 if (nq)
1568 nq->budget--;
1569 return 0;
1570 }
1571
1572 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1573 struct bnxt_re_pd *pd,
1574 struct bnxt_re_srq *srq,
1575 struct ib_udata *udata)
1576 {
1577 struct bnxt_re_srq_req ureq;
1578 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1579 struct ib_umem *umem;
1580 int bytes = 0;
1581 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1582 udata, struct bnxt_re_ucontext, ib_uctx);
1583
1584 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1585 return -EFAULT;
1586
1587 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1588 bytes = PAGE_ALIGN(bytes);
1589 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1590 IB_ACCESS_LOCAL_WRITE);
1591 if (IS_ERR(umem))
1592 return PTR_ERR(umem);
1593
1594 srq->umem = umem;
1595 qplib_srq->sg_info.umem = umem;
1596 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1597 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1598 qplib_srq->srq_handle = ureq.srq_handle;
1599 qplib_srq->dpi = &cntx->dpi;
1600
1601 return 0;
1602 }
1603
1604 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1605 struct ib_srq_init_attr *srq_init_attr,
1606 struct ib_udata *udata)
1607 {
1608 struct bnxt_qplib_dev_attr *dev_attr;
1609 struct bnxt_qplib_nq *nq = NULL;
1610 struct bnxt_re_dev *rdev;
1611 struct bnxt_re_srq *srq;
1612 struct bnxt_re_pd *pd;
1613 struct ib_pd *ib_pd;
1614 int rc, entries;
1615
1616 ib_pd = ib_srq->pd;
1617 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1618 rdev = pd->rdev;
1619 dev_attr = &rdev->dev_attr;
1620 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1621
1622 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1623 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1624 rc = -EINVAL;
1625 goto exit;
1626 }
1627
1628 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1629 rc = -EOPNOTSUPP;
1630 goto exit;
1631 }
1632
1633 srq->rdev = rdev;
1634 srq->qplib_srq.pd = &pd->qplib_pd;
1635 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1636
1637
1638
1639 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1640 if (entries > dev_attr->max_srq_wqes + 1)
1641 entries = dev_attr->max_srq_wqes + 1;
1642 srq->qplib_srq.max_wqe = entries;
1643
1644 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1645
1646 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1647 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1648 srq->srq_limit = srq_init_attr->attr.srq_limit;
1649 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1650 nq = &rdev->nq[0];
1651
1652 if (udata) {
1653 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1654 if (rc)
1655 goto fail;
1656 }
1657
1658 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1659 if (rc) {
1660 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1661 goto fail;
1662 }
1663
1664 if (udata) {
1665 struct bnxt_re_srq_resp resp;
1666
1667 resp.srqid = srq->qplib_srq.id;
1668 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1669 if (rc) {
1670 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1671 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1672 &srq->qplib_srq);
1673 goto fail;
1674 }
1675 }
1676 if (nq)
1677 nq->budget++;
1678 atomic_inc(&rdev->srq_count);
1679 spin_lock_init(&srq->lock);
1680
1681 return 0;
1682
1683 fail:
1684 ib_umem_release(srq->umem);
1685 exit:
1686 return rc;
1687 }
1688
1689 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1690 enum ib_srq_attr_mask srq_attr_mask,
1691 struct ib_udata *udata)
1692 {
1693 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1694 ib_srq);
1695 struct bnxt_re_dev *rdev = srq->rdev;
1696 int rc;
1697
1698 switch (srq_attr_mask) {
1699 case IB_SRQ_MAX_WR:
1700
1701 break;
1702 case IB_SRQ_LIMIT:
1703
1704 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1705 return -EINVAL;
1706
1707 srq->qplib_srq.threshold = srq_attr->srq_limit;
1708 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1709 if (rc) {
1710 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1711 return rc;
1712 }
1713
1714 srq->srq_limit = srq_attr->srq_limit;
1715
1716 break;
1717 default:
1718 ibdev_err(&rdev->ibdev,
1719 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1720 return -EINVAL;
1721 }
1722 return 0;
1723 }
1724
1725 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1726 {
1727 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1728 ib_srq);
1729 struct bnxt_re_srq tsrq;
1730 struct bnxt_re_dev *rdev = srq->rdev;
1731 int rc;
1732
1733
1734 tsrq.qplib_srq.id = srq->qplib_srq.id;
1735 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1736 if (rc) {
1737 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1738 return rc;
1739 }
1740 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1741 srq_attr->max_sge = srq->qplib_srq.max_sge;
1742 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1743
1744 return 0;
1745 }
1746
1747 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1748 const struct ib_recv_wr **bad_wr)
1749 {
1750 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1751 ib_srq);
1752 struct bnxt_qplib_swqe wqe;
1753 unsigned long flags;
1754 int rc = 0;
1755
1756 spin_lock_irqsave(&srq->lock, flags);
1757 while (wr) {
1758
1759 wqe.num_sge = wr->num_sge;
1760 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1761 wqe.wr_id = wr->wr_id;
1762 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1763
1764 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1765 if (rc) {
1766 *bad_wr = wr;
1767 break;
1768 }
1769 wr = wr->next;
1770 }
1771 spin_unlock_irqrestore(&srq->lock, flags);
1772
1773 return rc;
1774 }
1775 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1776 struct bnxt_re_qp *qp1_qp,
1777 int qp_attr_mask)
1778 {
1779 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1780 int rc = 0;
1781
1782 if (qp_attr_mask & IB_QP_STATE) {
1783 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1784 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1785 }
1786 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1787 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1788 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1789 }
1790
1791 if (qp_attr_mask & IB_QP_QKEY) {
1792 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1793
1794 qp->qplib_qp.qkey = 0x81818181;
1795 }
1796 if (qp_attr_mask & IB_QP_SQ_PSN) {
1797 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1798 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1799 }
1800
1801 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1802 if (rc)
1803 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1804 return rc;
1805 }
1806
1807 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1808 int qp_attr_mask, struct ib_udata *udata)
1809 {
1810 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1811 struct bnxt_re_dev *rdev = qp->rdev;
1812 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1813 enum ib_qp_state curr_qp_state, new_qp_state;
1814 int rc, entries;
1815 unsigned int flags;
1816 u8 nw_type;
1817
1818 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1819 return -EOPNOTSUPP;
1820
1821 qp->qplib_qp.modify_flags = 0;
1822 if (qp_attr_mask & IB_QP_STATE) {
1823 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1824 new_qp_state = qp_attr->qp_state;
1825 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1826 ib_qp->qp_type, qp_attr_mask)) {
1827 ibdev_err(&rdev->ibdev,
1828 "Invalid attribute mask: %#x specified ",
1829 qp_attr_mask);
1830 ibdev_err(&rdev->ibdev,
1831 "for qpn: %#x type: %#x",
1832 ib_qp->qp_num, ib_qp->qp_type);
1833 ibdev_err(&rdev->ibdev,
1834 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1835 curr_qp_state, new_qp_state);
1836 return -EINVAL;
1837 }
1838 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1839 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1840
1841 if (!qp->sumem &&
1842 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1843 ibdev_dbg(&rdev->ibdev,
1844 "Move QP = %p to flush list\n", qp);
1845 flags = bnxt_re_lock_cqs(qp);
1846 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1847 bnxt_re_unlock_cqs(qp, flags);
1848 }
1849 if (!qp->sumem &&
1850 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1851 ibdev_dbg(&rdev->ibdev,
1852 "Move QP = %p out of flush list\n", qp);
1853 flags = bnxt_re_lock_cqs(qp);
1854 bnxt_qplib_clean_qp(&qp->qplib_qp);
1855 bnxt_re_unlock_cqs(qp, flags);
1856 }
1857 }
1858 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1859 qp->qplib_qp.modify_flags |=
1860 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1861 qp->qplib_qp.en_sqd_async_notify = true;
1862 }
1863 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1864 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1865 qp->qplib_qp.access =
1866 __from_ib_access_flags(qp_attr->qp_access_flags);
1867
1868 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1869
1870 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1871 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1872 }
1873 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1874 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1875 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1876 }
1877 if (qp_attr_mask & IB_QP_QKEY) {
1878 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1879 qp->qplib_qp.qkey = qp_attr->qkey;
1880 }
1881 if (qp_attr_mask & IB_QP_AV) {
1882 const struct ib_global_route *grh =
1883 rdma_ah_read_grh(&qp_attr->ah_attr);
1884 const struct ib_gid_attr *sgid_attr;
1885 struct bnxt_re_gid_ctx *ctx;
1886
1887 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1888 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1889 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1890 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1891 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1892 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1893 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1894 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1895 sizeof(qp->qplib_qp.ah.dgid.data));
1896 qp->qplib_qp.ah.flow_label = grh->flow_label;
1897 sgid_attr = grh->sgid_attr;
1898
1899
1900
1901 ctx = rdma_read_gid_hw_context(sgid_attr);
1902 qp->qplib_qp.ah.sgid_index = ctx->idx;
1903 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1904 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1905 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1906 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1907 ether_addr_copy(qp->qplib_qp.ah.dmac,
1908 qp_attr->ah_attr.roce.dmac);
1909
1910 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1911 &qp->qplib_qp.smac[0]);
1912 if (rc)
1913 return rc;
1914
1915 nw_type = rdma_gid_attr_network_type(sgid_attr);
1916 switch (nw_type) {
1917 case RDMA_NETWORK_IPV4:
1918 qp->qplib_qp.nw_type =
1919 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1920 break;
1921 case RDMA_NETWORK_IPV6:
1922 qp->qplib_qp.nw_type =
1923 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1924 break;
1925 default:
1926 qp->qplib_qp.nw_type =
1927 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1928 break;
1929 }
1930 }
1931
1932 if (qp_attr_mask & IB_QP_PATH_MTU) {
1933 qp->qplib_qp.modify_flags |=
1934 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1935 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1936 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1937 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1938 qp->qplib_qp.modify_flags |=
1939 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1940 qp->qplib_qp.path_mtu =
1941 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1942 qp->qplib_qp.mtu =
1943 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1944 }
1945
1946 if (qp_attr_mask & IB_QP_TIMEOUT) {
1947 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1948 qp->qplib_qp.timeout = qp_attr->timeout;
1949 }
1950 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1951 qp->qplib_qp.modify_flags |=
1952 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1953 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1954 }
1955 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1956 qp->qplib_qp.modify_flags |=
1957 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1958 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1959 }
1960 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1961 qp->qplib_qp.modify_flags |=
1962 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1963 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1964 }
1965 if (qp_attr_mask & IB_QP_RQ_PSN) {
1966 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1967 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1968 }
1969 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1970 qp->qplib_qp.modify_flags |=
1971 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1972
1973 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1974 dev_attr->max_qp_rd_atom);
1975 }
1976 if (qp_attr_mask & IB_QP_SQ_PSN) {
1977 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1978 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1979 }
1980 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1981 if (qp_attr->max_dest_rd_atomic >
1982 dev_attr->max_qp_init_rd_atom) {
1983 ibdev_err(&rdev->ibdev,
1984 "max_dest_rd_atomic requested%d is > dev_max%d",
1985 qp_attr->max_dest_rd_atomic,
1986 dev_attr->max_qp_init_rd_atom);
1987 return -EINVAL;
1988 }
1989
1990 qp->qplib_qp.modify_flags |=
1991 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1992 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1993 }
1994 if (qp_attr_mask & IB_QP_CAP) {
1995 qp->qplib_qp.modify_flags |=
1996 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1997 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1998 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1999 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2000 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2001 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2002 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2003 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2004 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2005 (qp_attr->cap.max_inline_data >=
2006 dev_attr->max_inline_data)) {
2007 ibdev_err(&rdev->ibdev,
2008 "Create QP failed - max exceeded");
2009 return -EINVAL;
2010 }
2011 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2012 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2013 dev_attr->max_qp_wqes + 1);
2014 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2015 qp_attr->cap.max_send_wr;
2016
2017
2018
2019
2020
2021 qp->qplib_qp.sq.q_full_delta -= 1;
2022 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2023 if (qp->qplib_qp.rq.max_wqe) {
2024 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2025 qp->qplib_qp.rq.max_wqe =
2026 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2027 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2028 qp_attr->cap.max_recv_wr;
2029 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2030 } else {
2031
2032 }
2033 }
2034 if (qp_attr_mask & IB_QP_DEST_QPN) {
2035 qp->qplib_qp.modify_flags |=
2036 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2037 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2038 }
2039 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2040 if (rc) {
2041 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2042 return rc;
2043 }
2044 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2045 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2046 return rc;
2047 }
2048
2049 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2050 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2051 {
2052 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2053 struct bnxt_re_dev *rdev = qp->rdev;
2054 struct bnxt_qplib_qp *qplib_qp;
2055 int rc;
2056
2057 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2058 if (!qplib_qp)
2059 return -ENOMEM;
2060
2061 qplib_qp->id = qp->qplib_qp.id;
2062 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2063
2064 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2065 if (rc) {
2066 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2067 goto out;
2068 }
2069 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2070 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2071 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2072 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2073 qp_attr->pkey_index = qplib_qp->pkey_index;
2074 qp_attr->qkey = qplib_qp->qkey;
2075 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2076 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2077 qplib_qp->ah.host_sgid_index,
2078 qplib_qp->ah.hop_limit,
2079 qplib_qp->ah.traffic_class);
2080 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2081 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2082 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2083 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2084 qp_attr->timeout = qplib_qp->timeout;
2085 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2086 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2087 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2088 qp_attr->rq_psn = qplib_qp->rq.psn;
2089 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2090 qp_attr->sq_psn = qplib_qp->sq.psn;
2091 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2092 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2093 IB_SIGNAL_REQ_WR;
2094 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2095
2096 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2097 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2098 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2099 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2100 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2101 qp_init_attr->cap = qp_attr->cap;
2102
2103 out:
2104 kfree(qplib_qp);
2105 return rc;
2106 }
2107
2108
2109
2110 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2111 const struct ib_send_wr *wr,
2112 struct bnxt_qplib_swqe *wqe,
2113 int payload_size)
2114 {
2115 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2116 ib_ah);
2117 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2118 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2119 struct bnxt_qplib_sge sge;
2120 u8 nw_type;
2121 u16 ether_type;
2122 union ib_gid dgid;
2123 bool is_eth = false;
2124 bool is_vlan = false;
2125 bool is_grh = false;
2126 bool is_udp = false;
2127 u8 ip_version = 0;
2128 u16 vlan_id = 0xFFFF;
2129 void *buf;
2130 int i, rc = 0;
2131
2132 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2133
2134 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2135 if (rc)
2136 return rc;
2137
2138
2139 nw_type = rdma_gid_attr_network_type(sgid_attr);
2140 switch (nw_type) {
2141 case RDMA_NETWORK_IPV4:
2142 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2143 break;
2144 case RDMA_NETWORK_IPV6:
2145 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2146 break;
2147 default:
2148 nw_type = BNXT_RE_ROCE_V1_PACKET;
2149 break;
2150 }
2151 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2152 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2153 if (is_udp) {
2154 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2155 ip_version = 4;
2156 ether_type = ETH_P_IP;
2157 } else {
2158 ip_version = 6;
2159 ether_type = ETH_P_IPV6;
2160 }
2161 is_grh = false;
2162 } else {
2163 ether_type = ETH_P_IBOE;
2164 is_grh = true;
2165 }
2166
2167 is_eth = true;
2168 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
2169
2170 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2171 ip_version, is_udp, 0, &qp->qp1_hdr);
2172
2173
2174 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2175 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2176
2177
2178
2179 if (!is_vlan) {
2180 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2181 } else {
2182 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2183 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2184 }
2185
2186 if (is_grh || (ip_version == 6)) {
2187 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2188 sizeof(sgid_attr->gid));
2189 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2190 sizeof(sgid_attr->gid));
2191 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2192 }
2193
2194 if (ip_version == 4) {
2195 qp->qp1_hdr.ip4.tos = 0;
2196 qp->qp1_hdr.ip4.id = 0;
2197 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2198 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2199
2200 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2201 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2202 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2203 }
2204
2205 if (is_udp) {
2206 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2207 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2208 qp->qp1_hdr.udp.csum = 0;
2209 }
2210
2211
2212 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2213 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2214 qp->qp1_hdr.immediate_present = 1;
2215 } else {
2216 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2217 }
2218 if (wr->send_flags & IB_SEND_SOLICITED)
2219 qp->qp1_hdr.bth.solicited_event = 1;
2220
2221 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2222
2223
2224 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2225 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2226 qp->qp1_hdr.bth.ack_req = 0;
2227 qp->send_psn++;
2228 qp->send_psn &= BTH_PSN_MASK;
2229 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2230
2231
2232 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2233 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2234
2235
2236 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2237 if (buf) {
2238 ib_ud_header_pack(&qp->qp1_hdr, buf);
2239 for (i = wqe->num_sge; i; i--) {
2240 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2241 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2242 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2243 }
2244
2245
2246
2247
2248
2249
2250
2251
2252 if (is_udp && ip_version == 4)
2253 sge.size -= 20;
2254
2255
2256
2257
2258
2259 if (!is_udp)
2260 sge.size -= 8;
2261
2262
2263 if (!is_vlan)
2264 sge.size -= 4;
2265
2266 wqe->sg_list[0].addr = sge.addr;
2267 wqe->sg_list[0].lkey = sge.lkey;
2268 wqe->sg_list[0].size = sge.size;
2269 wqe->num_sge++;
2270
2271 } else {
2272 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2273 rc = -ENOMEM;
2274 }
2275 return rc;
2276 }
2277
2278
2279
2280
2281
2282
2283
2284 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2285 const struct ib_recv_wr *wr,
2286 struct bnxt_qplib_swqe *wqe,
2287 int payload_size)
2288 {
2289 struct bnxt_re_sqp_entries *sqp_entry;
2290 struct bnxt_qplib_sge ref, sge;
2291 struct bnxt_re_dev *rdev;
2292 u32 rq_prod_index;
2293
2294 rdev = qp->rdev;
2295
2296 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2297
2298 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2299 return -ENOMEM;
2300
2301
2302
2303
2304
2305 ref.addr = wqe->sg_list[0].addr;
2306 ref.lkey = wqe->sg_list[0].lkey;
2307 ref.size = wqe->sg_list[0].size;
2308
2309 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2310
2311
2312 wqe->sg_list[0].addr = sge.addr;
2313 wqe->sg_list[0].lkey = sge.lkey;
2314 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2315 sge.size -= wqe->sg_list[0].size;
2316
2317 sqp_entry->sge.addr = ref.addr;
2318 sqp_entry->sge.lkey = ref.lkey;
2319 sqp_entry->sge.size = ref.size;
2320
2321 sqp_entry->wrid = wqe->wr_id;
2322
2323 wqe->wr_id = rq_prod_index;
2324 return 0;
2325 }
2326
2327 static int is_ud_qp(struct bnxt_re_qp *qp)
2328 {
2329 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2330 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2331 }
2332
2333 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2334 const struct ib_send_wr *wr,
2335 struct bnxt_qplib_swqe *wqe)
2336 {
2337 struct bnxt_re_ah *ah = NULL;
2338
2339 if (is_ud_qp(qp)) {
2340 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2341 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2342 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2343 wqe->send.avid = ah->qplib_ah.id;
2344 }
2345 switch (wr->opcode) {
2346 case IB_WR_SEND:
2347 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2348 break;
2349 case IB_WR_SEND_WITH_IMM:
2350 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2351 wqe->send.imm_data = wr->ex.imm_data;
2352 break;
2353 case IB_WR_SEND_WITH_INV:
2354 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2355 wqe->send.inv_key = wr->ex.invalidate_rkey;
2356 break;
2357 default:
2358 return -EINVAL;
2359 }
2360 if (wr->send_flags & IB_SEND_SIGNALED)
2361 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2362 if (wr->send_flags & IB_SEND_FENCE)
2363 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2364 if (wr->send_flags & IB_SEND_SOLICITED)
2365 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2366 if (wr->send_flags & IB_SEND_INLINE)
2367 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2368
2369 return 0;
2370 }
2371
2372 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2373 struct bnxt_qplib_swqe *wqe)
2374 {
2375 switch (wr->opcode) {
2376 case IB_WR_RDMA_WRITE:
2377 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2378 break;
2379 case IB_WR_RDMA_WRITE_WITH_IMM:
2380 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2381 wqe->rdma.imm_data = wr->ex.imm_data;
2382 break;
2383 case IB_WR_RDMA_READ:
2384 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2385 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2386 break;
2387 default:
2388 return -EINVAL;
2389 }
2390 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2391 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2392 if (wr->send_flags & IB_SEND_SIGNALED)
2393 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2394 if (wr->send_flags & IB_SEND_FENCE)
2395 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2396 if (wr->send_flags & IB_SEND_SOLICITED)
2397 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2398 if (wr->send_flags & IB_SEND_INLINE)
2399 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2400
2401 return 0;
2402 }
2403
2404 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2405 struct bnxt_qplib_swqe *wqe)
2406 {
2407 switch (wr->opcode) {
2408 case IB_WR_ATOMIC_CMP_AND_SWP:
2409 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2410 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2411 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2412 break;
2413 case IB_WR_ATOMIC_FETCH_AND_ADD:
2414 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2415 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2416 break;
2417 default:
2418 return -EINVAL;
2419 }
2420 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2421 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2422 if (wr->send_flags & IB_SEND_SIGNALED)
2423 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2424 if (wr->send_flags & IB_SEND_FENCE)
2425 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2426 if (wr->send_flags & IB_SEND_SOLICITED)
2427 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2428 return 0;
2429 }
2430
2431 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2432 struct bnxt_qplib_swqe *wqe)
2433 {
2434 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2435 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2436
2437
2438
2439
2440 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2441
2442 if (wr->send_flags & IB_SEND_SIGNALED)
2443 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2444 if (wr->send_flags & IB_SEND_SOLICITED)
2445 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2446
2447 return 0;
2448 }
2449
2450 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2451 struct bnxt_qplib_swqe *wqe)
2452 {
2453 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2454 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2455 int access = wr->access;
2456
2457 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2458 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2459 wqe->frmr.page_list = mr->pages;
2460 wqe->frmr.page_list_len = mr->npages;
2461 wqe->frmr.levels = qplib_frpl->hwq.level;
2462 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2463
2464
2465
2466
2467
2468 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2469
2470 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2471 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2472
2473 if (access & IB_ACCESS_LOCAL_WRITE)
2474 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2475 if (access & IB_ACCESS_REMOTE_READ)
2476 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2477 if (access & IB_ACCESS_REMOTE_WRITE)
2478 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2479 if (access & IB_ACCESS_REMOTE_ATOMIC)
2480 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2481 if (access & IB_ACCESS_MW_BIND)
2482 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2483
2484 wqe->frmr.l_key = wr->key;
2485 wqe->frmr.length = wr->mr->length;
2486 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2487 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2488 wqe->frmr.va = wr->mr->iova;
2489 return 0;
2490 }
2491
2492 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2493 const struct ib_send_wr *wr,
2494 struct bnxt_qplib_swqe *wqe)
2495 {
2496
2497 u8 *in_data;
2498 u32 i, sge_len;
2499 void *sge_addr;
2500
2501 in_data = wqe->inline_data;
2502 for (i = 0; i < wr->num_sge; i++) {
2503 sge_addr = (void *)(unsigned long)
2504 wr->sg_list[i].addr;
2505 sge_len = wr->sg_list[i].length;
2506
2507 if ((sge_len + wqe->inline_len) >
2508 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2509 ibdev_err(&rdev->ibdev,
2510 "Inline data size requested > supported value");
2511 return -EINVAL;
2512 }
2513 sge_len = wr->sg_list[i].length;
2514
2515 memcpy(in_data, sge_addr, sge_len);
2516 in_data += wr->sg_list[i].length;
2517 wqe->inline_len += wr->sg_list[i].length;
2518 }
2519 return wqe->inline_len;
2520 }
2521
2522 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2523 const struct ib_send_wr *wr,
2524 struct bnxt_qplib_swqe *wqe)
2525 {
2526 int payload_sz = 0;
2527
2528 if (wr->send_flags & IB_SEND_INLINE)
2529 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2530 else
2531 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2532 wqe->num_sge);
2533
2534 return payload_sz;
2535 }
2536
2537 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2538 {
2539 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2540 qp->ib_qp.qp_type == IB_QPT_GSI ||
2541 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2542 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2543 int qp_attr_mask;
2544 struct ib_qp_attr qp_attr;
2545
2546 qp_attr_mask = IB_QP_STATE;
2547 qp_attr.qp_state = IB_QPS_RTS;
2548 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2549 qp->qplib_qp.wqe_cnt = 0;
2550 }
2551 }
2552
2553 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2554 struct bnxt_re_qp *qp,
2555 const struct ib_send_wr *wr)
2556 {
2557 int rc = 0, payload_sz = 0;
2558 unsigned long flags;
2559
2560 spin_lock_irqsave(&qp->sq_lock, flags);
2561 while (wr) {
2562 struct bnxt_qplib_swqe wqe = {};
2563
2564
2565 wqe.num_sge = wr->num_sge;
2566 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2567 ibdev_err(&rdev->ibdev,
2568 "Limit exceeded for Send SGEs");
2569 rc = -EINVAL;
2570 goto bad;
2571 }
2572
2573 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2574 if (payload_sz < 0) {
2575 rc = -EINVAL;
2576 goto bad;
2577 }
2578 wqe.wr_id = wr->wr_id;
2579
2580 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2581
2582 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2583 if (!rc)
2584 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2585 bad:
2586 if (rc) {
2587 ibdev_err(&rdev->ibdev,
2588 "Post send failed opcode = %#x rc = %d",
2589 wr->opcode, rc);
2590 break;
2591 }
2592 wr = wr->next;
2593 }
2594 bnxt_qplib_post_send_db(&qp->qplib_qp);
2595 bnxt_ud_qp_hw_stall_workaround(qp);
2596 spin_unlock_irqrestore(&qp->sq_lock, flags);
2597 return rc;
2598 }
2599
2600 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2601 const struct ib_send_wr **bad_wr)
2602 {
2603 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2604 struct bnxt_qplib_swqe wqe;
2605 int rc = 0, payload_sz = 0;
2606 unsigned long flags;
2607
2608 spin_lock_irqsave(&qp->sq_lock, flags);
2609 while (wr) {
2610
2611 memset(&wqe, 0, sizeof(wqe));
2612
2613
2614 wqe.num_sge = wr->num_sge;
2615 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2616 ibdev_err(&qp->rdev->ibdev,
2617 "Limit exceeded for Send SGEs");
2618 rc = -EINVAL;
2619 goto bad;
2620 }
2621
2622 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2623 if (payload_sz < 0) {
2624 rc = -EINVAL;
2625 goto bad;
2626 }
2627 wqe.wr_id = wr->wr_id;
2628
2629 switch (wr->opcode) {
2630 case IB_WR_SEND:
2631 case IB_WR_SEND_WITH_IMM:
2632 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2633 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2634 payload_sz);
2635 if (rc)
2636 goto bad;
2637 wqe.rawqp1.lflags |=
2638 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2639 }
2640 switch (wr->send_flags) {
2641 case IB_SEND_IP_CSUM:
2642 wqe.rawqp1.lflags |=
2643 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2644 break;
2645 default:
2646 break;
2647 }
2648 fallthrough;
2649 case IB_WR_SEND_WITH_INV:
2650 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2651 break;
2652 case IB_WR_RDMA_WRITE:
2653 case IB_WR_RDMA_WRITE_WITH_IMM:
2654 case IB_WR_RDMA_READ:
2655 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2656 break;
2657 case IB_WR_ATOMIC_CMP_AND_SWP:
2658 case IB_WR_ATOMIC_FETCH_AND_ADD:
2659 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2660 break;
2661 case IB_WR_RDMA_READ_WITH_INV:
2662 ibdev_err(&qp->rdev->ibdev,
2663 "RDMA Read with Invalidate is not supported");
2664 rc = -EINVAL;
2665 goto bad;
2666 case IB_WR_LOCAL_INV:
2667 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2668 break;
2669 case IB_WR_REG_MR:
2670 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2671 break;
2672 default:
2673
2674 ibdev_err(&qp->rdev->ibdev,
2675 "WR (%#x) is not supported", wr->opcode);
2676 rc = -EINVAL;
2677 goto bad;
2678 }
2679 if (!rc)
2680 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2681 bad:
2682 if (rc) {
2683 ibdev_err(&qp->rdev->ibdev,
2684 "post_send failed op:%#x qps = %#x rc = %d\n",
2685 wr->opcode, qp->qplib_qp.state, rc);
2686 *bad_wr = wr;
2687 break;
2688 }
2689 wr = wr->next;
2690 }
2691 bnxt_qplib_post_send_db(&qp->qplib_qp);
2692 bnxt_ud_qp_hw_stall_workaround(qp);
2693 spin_unlock_irqrestore(&qp->sq_lock, flags);
2694
2695 return rc;
2696 }
2697
2698 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2699 struct bnxt_re_qp *qp,
2700 const struct ib_recv_wr *wr)
2701 {
2702 struct bnxt_qplib_swqe wqe;
2703 int rc = 0;
2704
2705 memset(&wqe, 0, sizeof(wqe));
2706 while (wr) {
2707
2708 memset(&wqe, 0, sizeof(wqe));
2709
2710
2711 wqe.num_sge = wr->num_sge;
2712 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2713 ibdev_err(&rdev->ibdev,
2714 "Limit exceeded for Receive SGEs");
2715 rc = -EINVAL;
2716 break;
2717 }
2718 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2719 wqe.wr_id = wr->wr_id;
2720 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2721
2722 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2723 if (rc)
2724 break;
2725
2726 wr = wr->next;
2727 }
2728 if (!rc)
2729 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2730 return rc;
2731 }
2732
2733 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2734 const struct ib_recv_wr **bad_wr)
2735 {
2736 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2737 struct bnxt_qplib_swqe wqe;
2738 int rc = 0, payload_sz = 0;
2739 unsigned long flags;
2740 u32 count = 0;
2741
2742 spin_lock_irqsave(&qp->rq_lock, flags);
2743 while (wr) {
2744
2745 memset(&wqe, 0, sizeof(wqe));
2746
2747
2748 wqe.num_sge = wr->num_sge;
2749 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2750 ibdev_err(&qp->rdev->ibdev,
2751 "Limit exceeded for Receive SGEs");
2752 rc = -EINVAL;
2753 *bad_wr = wr;
2754 break;
2755 }
2756
2757 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2758 wr->num_sge);
2759 wqe.wr_id = wr->wr_id;
2760 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2761
2762 if (ib_qp->qp_type == IB_QPT_GSI &&
2763 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2764 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2765 payload_sz);
2766 if (!rc)
2767 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2768 if (rc) {
2769 *bad_wr = wr;
2770 break;
2771 }
2772
2773
2774 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2775 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2776 count = 0;
2777 }
2778
2779 wr = wr->next;
2780 }
2781
2782 if (count)
2783 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2784
2785 spin_unlock_irqrestore(&qp->rq_lock, flags);
2786
2787 return rc;
2788 }
2789
2790
2791 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2792 {
2793 struct bnxt_re_cq *cq;
2794 struct bnxt_qplib_nq *nq;
2795 struct bnxt_re_dev *rdev;
2796
2797 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2798 rdev = cq->rdev;
2799 nq = cq->qplib_cq.nq;
2800
2801 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2802 ib_umem_release(cq->umem);
2803
2804 atomic_dec(&rdev->cq_count);
2805 nq->budget--;
2806 kfree(cq->cql);
2807 return 0;
2808 }
2809
2810 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2811 struct ib_udata *udata)
2812 {
2813 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2814 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2815 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2816 int rc, entries;
2817 int cqe = attr->cqe;
2818 struct bnxt_qplib_nq *nq = NULL;
2819 unsigned int nq_alloc_cnt;
2820
2821 if (attr->flags)
2822 return -EOPNOTSUPP;
2823
2824
2825 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2826 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2827 return -EINVAL;
2828 }
2829
2830 cq->rdev = rdev;
2831 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2832
2833 entries = roundup_pow_of_two(cqe + 1);
2834 if (entries > dev_attr->max_cq_wqes + 1)
2835 entries = dev_attr->max_cq_wqes + 1;
2836
2837 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2838 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2839 if (udata) {
2840 struct bnxt_re_cq_req req;
2841 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2842 udata, struct bnxt_re_ucontext, ib_uctx);
2843 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2844 rc = -EFAULT;
2845 goto fail;
2846 }
2847
2848 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2849 entries * sizeof(struct cq_base),
2850 IB_ACCESS_LOCAL_WRITE);
2851 if (IS_ERR(cq->umem)) {
2852 rc = PTR_ERR(cq->umem);
2853 goto fail;
2854 }
2855 cq->qplib_cq.sg_info.umem = cq->umem;
2856 cq->qplib_cq.dpi = &uctx->dpi;
2857 } else {
2858 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2859 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2860 GFP_KERNEL);
2861 if (!cq->cql) {
2862 rc = -ENOMEM;
2863 goto fail;
2864 }
2865
2866 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2867 }
2868
2869
2870
2871
2872 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2873 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2874 cq->qplib_cq.max_wqe = entries;
2875 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2876 cq->qplib_cq.nq = nq;
2877
2878 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2879 if (rc) {
2880 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
2881 goto fail;
2882 }
2883
2884 cq->ib_cq.cqe = entries;
2885 cq->cq_period = cq->qplib_cq.period;
2886 nq->budget++;
2887
2888 atomic_inc(&rdev->cq_count);
2889 spin_lock_init(&cq->cq_lock);
2890
2891 if (udata) {
2892 struct bnxt_re_cq_resp resp;
2893
2894 resp.cqid = cq->qplib_cq.id;
2895 resp.tail = cq->qplib_cq.hwq.cons;
2896 resp.phase = cq->qplib_cq.period;
2897 resp.rsvd = 0;
2898 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2899 if (rc) {
2900 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
2901 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2902 goto c2fail;
2903 }
2904 }
2905
2906 return 0;
2907
2908 c2fail:
2909 ib_umem_release(cq->umem);
2910 fail:
2911 kfree(cq->cql);
2912 return rc;
2913 }
2914
2915 static u8 __req_to_ib_wc_status(u8 qstatus)
2916 {
2917 switch (qstatus) {
2918 case CQ_REQ_STATUS_OK:
2919 return IB_WC_SUCCESS;
2920 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2921 return IB_WC_BAD_RESP_ERR;
2922 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2923 return IB_WC_LOC_LEN_ERR;
2924 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2925 return IB_WC_LOC_QP_OP_ERR;
2926 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2927 return IB_WC_LOC_PROT_ERR;
2928 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2929 return IB_WC_GENERAL_ERR;
2930 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2931 return IB_WC_REM_INV_REQ_ERR;
2932 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2933 return IB_WC_REM_ACCESS_ERR;
2934 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2935 return IB_WC_REM_OP_ERR;
2936 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2937 return IB_WC_RNR_RETRY_EXC_ERR;
2938 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2939 return IB_WC_RETRY_EXC_ERR;
2940 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2941 return IB_WC_WR_FLUSH_ERR;
2942 default:
2943 return IB_WC_GENERAL_ERR;
2944 }
2945 return 0;
2946 }
2947
2948 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2949 {
2950 switch (qstatus) {
2951 case CQ_RES_RAWETH_QP1_STATUS_OK:
2952 return IB_WC_SUCCESS;
2953 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2954 return IB_WC_LOC_ACCESS_ERR;
2955 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2956 return IB_WC_LOC_LEN_ERR;
2957 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2958 return IB_WC_LOC_PROT_ERR;
2959 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2960 return IB_WC_LOC_QP_OP_ERR;
2961 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2962 return IB_WC_GENERAL_ERR;
2963 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2964 return IB_WC_WR_FLUSH_ERR;
2965 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2966 return IB_WC_WR_FLUSH_ERR;
2967 default:
2968 return IB_WC_GENERAL_ERR;
2969 }
2970 }
2971
2972 static u8 __rc_to_ib_wc_status(u8 qstatus)
2973 {
2974 switch (qstatus) {
2975 case CQ_RES_RC_STATUS_OK:
2976 return IB_WC_SUCCESS;
2977 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2978 return IB_WC_LOC_ACCESS_ERR;
2979 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2980 return IB_WC_LOC_LEN_ERR;
2981 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2982 return IB_WC_LOC_PROT_ERR;
2983 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2984 return IB_WC_LOC_QP_OP_ERR;
2985 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2986 return IB_WC_GENERAL_ERR;
2987 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2988 return IB_WC_REM_INV_REQ_ERR;
2989 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2990 return IB_WC_WR_FLUSH_ERR;
2991 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2992 return IB_WC_WR_FLUSH_ERR;
2993 default:
2994 return IB_WC_GENERAL_ERR;
2995 }
2996 }
2997
2998 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2999 {
3000 switch (cqe->type) {
3001 case BNXT_QPLIB_SWQE_TYPE_SEND:
3002 wc->opcode = IB_WC_SEND;
3003 break;
3004 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3005 wc->opcode = IB_WC_SEND;
3006 wc->wc_flags |= IB_WC_WITH_IMM;
3007 break;
3008 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3009 wc->opcode = IB_WC_SEND;
3010 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3011 break;
3012 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3013 wc->opcode = IB_WC_RDMA_WRITE;
3014 break;
3015 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3016 wc->opcode = IB_WC_RDMA_WRITE;
3017 wc->wc_flags |= IB_WC_WITH_IMM;
3018 break;
3019 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3020 wc->opcode = IB_WC_RDMA_READ;
3021 break;
3022 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3023 wc->opcode = IB_WC_COMP_SWAP;
3024 break;
3025 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3026 wc->opcode = IB_WC_FETCH_ADD;
3027 break;
3028 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3029 wc->opcode = IB_WC_LOCAL_INV;
3030 break;
3031 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3032 wc->opcode = IB_WC_REG_MR;
3033 break;
3034 default:
3035 wc->opcode = IB_WC_SEND;
3036 break;
3037 }
3038
3039 wc->status = __req_to_ib_wc_status(cqe->status);
3040 }
3041
3042 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3043 u16 raweth_qp1_flags2)
3044 {
3045 bool is_ipv6 = false, is_ipv4 = false;
3046
3047
3048 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3049 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3050 return -1;
3051
3052 if (raweth_qp1_flags2 &
3053 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3054 raweth_qp1_flags2 &
3055 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3056
3057 (raweth_qp1_flags2 &
3058 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3059 (is_ipv6 = true) : (is_ipv4 = true);
3060 return ((is_ipv6) ?
3061 BNXT_RE_ROCEV2_IPV6_PACKET :
3062 BNXT_RE_ROCEV2_IPV4_PACKET);
3063 } else {
3064 return BNXT_RE_ROCE_V1_PACKET;
3065 }
3066 }
3067
3068 static int bnxt_re_to_ib_nw_type(int nw_type)
3069 {
3070 u8 nw_hdr_type = 0xFF;
3071
3072 switch (nw_type) {
3073 case BNXT_RE_ROCE_V1_PACKET:
3074 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3075 break;
3076 case BNXT_RE_ROCEV2_IPV4_PACKET:
3077 nw_hdr_type = RDMA_NETWORK_IPV4;
3078 break;
3079 case BNXT_RE_ROCEV2_IPV6_PACKET:
3080 nw_hdr_type = RDMA_NETWORK_IPV6;
3081 break;
3082 }
3083 return nw_hdr_type;
3084 }
3085
3086 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3087 void *rq_hdr_buf)
3088 {
3089 u8 *tmp_buf = NULL;
3090 struct ethhdr *eth_hdr;
3091 u16 eth_type;
3092 bool rc = false;
3093
3094 tmp_buf = (u8 *)rq_hdr_buf;
3095
3096
3097
3098
3099
3100 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3101 tmp_buf += 4;
3102
3103 eth_hdr = (struct ethhdr *)tmp_buf;
3104 eth_type = ntohs(eth_hdr->h_proto);
3105 switch (eth_type) {
3106 case ETH_P_IBOE:
3107 rc = true;
3108 break;
3109 case ETH_P_IP:
3110 case ETH_P_IPV6: {
3111 u32 len;
3112 struct udphdr *udp_hdr;
3113
3114 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3115 sizeof(struct ipv6hdr));
3116 tmp_buf += sizeof(struct ethhdr) + len;
3117 udp_hdr = (struct udphdr *)tmp_buf;
3118 if (ntohs(udp_hdr->dest) ==
3119 ROCE_V2_UDP_DPORT)
3120 rc = true;
3121 break;
3122 }
3123 default:
3124 break;
3125 }
3126 }
3127
3128 return rc;
3129 }
3130
3131 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3132 struct bnxt_qplib_cqe *cqe)
3133 {
3134 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3135 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3136 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3137 struct bnxt_re_ah *gsi_sah;
3138 struct ib_send_wr *swr;
3139 struct ib_ud_wr udwr;
3140 struct ib_recv_wr rwr;
3141 int pkt_type = 0;
3142 u32 tbl_idx;
3143 void *rq_hdr_buf;
3144 dma_addr_t rq_hdr_buf_map;
3145 dma_addr_t shrq_hdr_buf_map;
3146 u32 offset = 0;
3147 u32 skip_bytes = 0;
3148 struct ib_sge s_sge[2];
3149 struct ib_sge r_sge[2];
3150 int rc;
3151
3152 memset(&udwr, 0, sizeof(udwr));
3153 memset(&rwr, 0, sizeof(rwr));
3154 memset(&s_sge, 0, sizeof(s_sge));
3155 memset(&r_sge, 0, sizeof(r_sge));
3156
3157 swr = &udwr.wr;
3158 tbl_idx = cqe->wr_id;
3159
3160 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3161 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3162 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3163 tbl_idx);
3164
3165
3166 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3167 tbl_idx);
3168 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3169
3170
3171 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3172 sqp_entry->qp1_qp = gsi_qp;
3173
3174
3175
3176 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3177 cqe->raweth_qp1_flags2);
3178 if (pkt_type < 0) {
3179 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3180 return -EINVAL;
3181 }
3182
3183
3184
3185 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3186 offset = 20;
3187
3188
3189
3190
3191
3192 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3193 skip_bytes = 4;
3194
3195
3196 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3197 + skip_bytes;
3198 s_sge[0].lkey = 0xFFFFFFFF;
3199 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3200 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3201
3202
3203 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3204 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3205 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3206 s_sge[1].addr += 8;
3207 s_sge[1].lkey = 0xFFFFFFFF;
3208 s_sge[1].length = 256;
3209
3210
3211
3212 r_sge[0].addr = shrq_hdr_buf_map;
3213 r_sge[0].lkey = 0xFFFFFFFF;
3214 r_sge[0].length = 40;
3215
3216 r_sge[1].addr = sqp_entry->sge.addr + offset;
3217 r_sge[1].lkey = sqp_entry->sge.lkey;
3218 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3219
3220
3221 rwr.num_sge = 2;
3222 rwr.sg_list = r_sge;
3223 rwr.wr_id = tbl_idx;
3224 rwr.next = NULL;
3225
3226 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3227 if (rc) {
3228 ibdev_err(&rdev->ibdev,
3229 "Failed to post Rx buffers to shadow QP");
3230 return -ENOMEM;
3231 }
3232
3233 swr->num_sge = 2;
3234 swr->sg_list = s_sge;
3235 swr->wr_id = tbl_idx;
3236 swr->opcode = IB_WR_SEND;
3237 swr->next = NULL;
3238 gsi_sah = rdev->gsi_ctx.gsi_sah;
3239 udwr.ah = &gsi_sah->ib_ah;
3240 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3241 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3242
3243
3244 rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3245
3246 return 0;
3247 }
3248
3249 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3250 struct bnxt_qplib_cqe *cqe)
3251 {
3252 wc->opcode = IB_WC_RECV;
3253 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3254 wc->wc_flags |= IB_WC_GRH;
3255 }
3256
3257 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3258 u16 vlan_id)
3259 {
3260
3261
3262
3263
3264 if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3265 htons(ETH_P_8021Q), vlan_id))
3266 return false;
3267 return true;
3268 }
3269
3270 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3271 u16 *vid, u8 *sl)
3272 {
3273 bool ret = false;
3274 u32 metadata;
3275 u16 tpid;
3276
3277 metadata = orig_cqe->raweth_qp1_metadata;
3278 if (orig_cqe->raweth_qp1_flags2 &
3279 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3280 tpid = ((metadata &
3281 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3282 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3283 if (tpid == ETH_P_8021Q) {
3284 *vid = metadata &
3285 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3286 *sl = (metadata &
3287 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3288 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3289 ret = true;
3290 }
3291 }
3292
3293 return ret;
3294 }
3295
3296 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3297 struct bnxt_qplib_cqe *cqe)
3298 {
3299 wc->opcode = IB_WC_RECV;
3300 wc->status = __rc_to_ib_wc_status(cqe->status);
3301
3302 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3303 wc->wc_flags |= IB_WC_WITH_IMM;
3304 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3305 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3306 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3307 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3308 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3309 }
3310
3311 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3312 struct ib_wc *wc,
3313 struct bnxt_qplib_cqe *cqe)
3314 {
3315 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3316 struct bnxt_re_qp *gsi_qp = NULL;
3317 struct bnxt_qplib_cqe *orig_cqe = NULL;
3318 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3319 int nw_type;
3320 u32 tbl_idx;
3321 u16 vlan_id;
3322 u8 sl;
3323
3324 tbl_idx = cqe->wr_id;
3325
3326 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3327 gsi_qp = sqp_entry->qp1_qp;
3328 orig_cqe = &sqp_entry->cqe;
3329
3330 wc->wr_id = sqp_entry->wrid;
3331 wc->byte_len = orig_cqe->length;
3332 wc->qp = &gsi_qp->ib_qp;
3333
3334 wc->ex.imm_data = orig_cqe->immdata;
3335 wc->src_qp = orig_cqe->src_qp;
3336 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3337 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3338 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3339 wc->vlan_id = vlan_id;
3340 wc->sl = sl;
3341 wc->wc_flags |= IB_WC_WITH_VLAN;
3342 }
3343 }
3344 wc->port_num = 1;
3345 wc->vendor_err = orig_cqe->status;
3346
3347 wc->opcode = IB_WC_RECV;
3348 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3349 wc->wc_flags |= IB_WC_GRH;
3350
3351 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3352 orig_cqe->raweth_qp1_flags2);
3353 if (nw_type >= 0) {
3354 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3355 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3356 }
3357 }
3358
3359 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3360 struct ib_wc *wc,
3361 struct bnxt_qplib_cqe *cqe)
3362 {
3363 struct bnxt_re_dev *rdev;
3364 u16 vlan_id = 0;
3365 u8 nw_type;
3366
3367 rdev = qp->rdev;
3368 wc->opcode = IB_WC_RECV;
3369 wc->status = __rc_to_ib_wc_status(cqe->status);
3370
3371 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3372 wc->wc_flags |= IB_WC_WITH_IMM;
3373
3374 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3375 wc->wc_flags |= IB_WC_GRH;
3376 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3377 wc->wc_flags |= IB_WC_WITH_SMAC;
3378 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3379 vlan_id = (cqe->cfa_meta & 0xFFF);
3380 }
3381
3382 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3383 wc->vlan_id = vlan_id;
3384 wc->wc_flags |= IB_WC_WITH_VLAN;
3385 }
3386 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3387 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3388 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3389 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3390 }
3391
3392 }
3393
3394 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3395 {
3396 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3397 unsigned long flags;
3398 int rc = 0;
3399
3400 spin_lock_irqsave(&qp->sq_lock, flags);
3401
3402 rc = bnxt_re_bind_fence_mw(lib_qp);
3403 if (!rc) {
3404 lib_qp->sq.phantom_wqe_cnt++;
3405 ibdev_dbg(&qp->rdev->ibdev,
3406 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3407 lib_qp->id, lib_qp->sq.hwq.prod,
3408 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3409 lib_qp->sq.phantom_wqe_cnt);
3410 }
3411
3412 spin_unlock_irqrestore(&qp->sq_lock, flags);
3413 return rc;
3414 }
3415
3416 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3417 {
3418 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3419 struct bnxt_re_qp *qp, *sh_qp;
3420 struct bnxt_qplib_cqe *cqe;
3421 int i, ncqe, budget;
3422 struct bnxt_qplib_q *sq;
3423 struct bnxt_qplib_qp *lib_qp;
3424 u32 tbl_idx;
3425 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3426 unsigned long flags;
3427
3428 spin_lock_irqsave(&cq->cq_lock, flags);
3429 budget = min_t(u32, num_entries, cq->max_cql);
3430 num_entries = budget;
3431 if (!cq->cql) {
3432 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3433 goto exit;
3434 }
3435 cqe = &cq->cql[0];
3436 while (budget) {
3437 lib_qp = NULL;
3438 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3439 if (lib_qp) {
3440 sq = &lib_qp->sq;
3441 if (sq->send_phantom) {
3442 qp = container_of(lib_qp,
3443 struct bnxt_re_qp, qplib_qp);
3444 if (send_phantom_wqe(qp) == -ENOMEM)
3445 ibdev_err(&cq->rdev->ibdev,
3446 "Phantom failed! Scheduled to send again\n");
3447 else
3448 sq->send_phantom = false;
3449 }
3450 }
3451 if (ncqe < budget)
3452 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3453 cqe + ncqe,
3454 budget - ncqe);
3455
3456 if (!ncqe)
3457 break;
3458
3459 for (i = 0; i < ncqe; i++, cqe++) {
3460
3461 memset(wc, 0, sizeof(*wc));
3462
3463 wc->wr_id = cqe->wr_id;
3464 wc->byte_len = cqe->length;
3465 qp = container_of
3466 ((struct bnxt_qplib_qp *)
3467 (unsigned long)(cqe->qp_handle),
3468 struct bnxt_re_qp, qplib_qp);
3469 wc->qp = &qp->ib_qp;
3470 wc->ex.imm_data = cqe->immdata;
3471 wc->src_qp = cqe->src_qp;
3472 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3473 wc->port_num = 1;
3474 wc->vendor_err = cqe->status;
3475
3476 switch (cqe->opcode) {
3477 case CQ_BASE_CQE_TYPE_REQ:
3478 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3479 if (sh_qp &&
3480 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3481
3482
3483
3484 memset(wc, 0, sizeof(*wc));
3485 continue;
3486 }
3487 bnxt_re_process_req_wc(wc, cqe);
3488 break;
3489 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3490 if (!cqe->status) {
3491 int rc = 0;
3492
3493 rc = bnxt_re_process_raw_qp_pkt_rx
3494 (qp, cqe);
3495 if (!rc) {
3496 memset(wc, 0, sizeof(*wc));
3497 continue;
3498 }
3499 cqe->status = -1;
3500 }
3501
3502
3503
3504
3505 tbl_idx = cqe->wr_id;
3506 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3507 wc->wr_id = sqp_entry->wrid;
3508 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3509 break;
3510 case CQ_BASE_CQE_TYPE_RES_RC:
3511 bnxt_re_process_res_rc_wc(wc, cqe);
3512 break;
3513 case CQ_BASE_CQE_TYPE_RES_UD:
3514 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3515 if (sh_qp &&
3516 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3517
3518
3519
3520 if (cqe->status) {
3521 continue;
3522 } else {
3523 bnxt_re_process_res_shadow_qp_wc
3524 (qp, wc, cqe);
3525 break;
3526 }
3527 }
3528 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3529 break;
3530 default:
3531 ibdev_err(&cq->rdev->ibdev,
3532 "POLL CQ : type 0x%x not handled",
3533 cqe->opcode);
3534 continue;
3535 }
3536 wc++;
3537 budget--;
3538 }
3539 }
3540 exit:
3541 spin_unlock_irqrestore(&cq->cq_lock, flags);
3542 return num_entries - budget;
3543 }
3544
3545 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3546 enum ib_cq_notify_flags ib_cqn_flags)
3547 {
3548 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3549 int type = 0, rc = 0;
3550 unsigned long flags;
3551
3552 spin_lock_irqsave(&cq->cq_lock, flags);
3553
3554 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3555 type = DBC_DBC_TYPE_CQ_ARMALL;
3556
3557 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3558 type = DBC_DBC_TYPE_CQ_ARMSE;
3559
3560
3561 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3562 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3563 rc = 1;
3564 goto exit;
3565 }
3566 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3567
3568 exit:
3569 spin_unlock_irqrestore(&cq->cq_lock, flags);
3570 return rc;
3571 }
3572
3573
3574 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3575 {
3576 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3577 struct bnxt_re_dev *rdev = pd->rdev;
3578 struct bnxt_re_mr *mr;
3579 int rc;
3580
3581 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3582 if (!mr)
3583 return ERR_PTR(-ENOMEM);
3584
3585 mr->rdev = rdev;
3586 mr->qplib_mr.pd = &pd->qplib_pd;
3587 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3588 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3589
3590
3591 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3592 if (rc)
3593 goto fail;
3594
3595 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3596 mr->qplib_mr.total_size = -1;
3597 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3598 PAGE_SIZE);
3599 if (rc)
3600 goto fail_mr;
3601
3602 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3603 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3604 IB_ACCESS_REMOTE_ATOMIC))
3605 mr->ib_mr.rkey = mr->ib_mr.lkey;
3606 atomic_inc(&rdev->mr_count);
3607
3608 return &mr->ib_mr;
3609
3610 fail_mr:
3611 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3612 fail:
3613 kfree(mr);
3614 return ERR_PTR(rc);
3615 }
3616
3617 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3618 {
3619 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3620 struct bnxt_re_dev *rdev = mr->rdev;
3621 int rc;
3622
3623 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3624 if (rc) {
3625 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3626 return rc;
3627 }
3628
3629 if (mr->pages) {
3630 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3631 &mr->qplib_frpl);
3632 kfree(mr->pages);
3633 mr->npages = 0;
3634 mr->pages = NULL;
3635 }
3636 ib_umem_release(mr->ib_umem);
3637
3638 kfree(mr);
3639 atomic_dec(&rdev->mr_count);
3640 return rc;
3641 }
3642
3643 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3644 {
3645 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3646
3647 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3648 return -ENOMEM;
3649
3650 mr->pages[mr->npages++] = addr;
3651 return 0;
3652 }
3653
3654 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3655 unsigned int *sg_offset)
3656 {
3657 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3658
3659 mr->npages = 0;
3660 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3661 }
3662
3663 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3664 u32 max_num_sg)
3665 {
3666 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3667 struct bnxt_re_dev *rdev = pd->rdev;
3668 struct bnxt_re_mr *mr = NULL;
3669 int rc;
3670
3671 if (type != IB_MR_TYPE_MEM_REG) {
3672 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3673 return ERR_PTR(-EINVAL);
3674 }
3675 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3676 return ERR_PTR(-EINVAL);
3677
3678 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3679 if (!mr)
3680 return ERR_PTR(-ENOMEM);
3681
3682 mr->rdev = rdev;
3683 mr->qplib_mr.pd = &pd->qplib_pd;
3684 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3685 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3686
3687 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3688 if (rc)
3689 goto bail;
3690
3691 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3692 mr->ib_mr.rkey = mr->ib_mr.lkey;
3693
3694 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3695 if (!mr->pages) {
3696 rc = -ENOMEM;
3697 goto fail;
3698 }
3699 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3700 &mr->qplib_frpl, max_num_sg);
3701 if (rc) {
3702 ibdev_err(&rdev->ibdev,
3703 "Failed to allocate HW FR page list");
3704 goto fail_mr;
3705 }
3706
3707 atomic_inc(&rdev->mr_count);
3708 return &mr->ib_mr;
3709
3710 fail_mr:
3711 kfree(mr->pages);
3712 fail:
3713 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3714 bail:
3715 kfree(mr);
3716 return ERR_PTR(rc);
3717 }
3718
3719 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3720 struct ib_udata *udata)
3721 {
3722 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3723 struct bnxt_re_dev *rdev = pd->rdev;
3724 struct bnxt_re_mw *mw;
3725 int rc;
3726
3727 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3728 if (!mw)
3729 return ERR_PTR(-ENOMEM);
3730 mw->rdev = rdev;
3731 mw->qplib_mw.pd = &pd->qplib_pd;
3732
3733 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3734 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3735 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3736 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3737 if (rc) {
3738 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
3739 goto fail;
3740 }
3741 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3742
3743 atomic_inc(&rdev->mw_count);
3744 return &mw->ib_mw;
3745
3746 fail:
3747 kfree(mw);
3748 return ERR_PTR(rc);
3749 }
3750
3751 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3752 {
3753 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3754 struct bnxt_re_dev *rdev = mw->rdev;
3755 int rc;
3756
3757 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3758 if (rc) {
3759 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
3760 return rc;
3761 }
3762
3763 kfree(mw);
3764 atomic_dec(&rdev->mw_count);
3765 return rc;
3766 }
3767
3768
3769 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3770 u64 virt_addr, int mr_access_flags,
3771 struct ib_udata *udata)
3772 {
3773 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3774 struct bnxt_re_dev *rdev = pd->rdev;
3775 struct bnxt_re_mr *mr;
3776 struct ib_umem *umem;
3777 unsigned long page_size;
3778 int umem_pgs, rc;
3779
3780 if (length > BNXT_RE_MAX_MR_SIZE) {
3781 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
3782 length, BNXT_RE_MAX_MR_SIZE);
3783 return ERR_PTR(-ENOMEM);
3784 }
3785
3786 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3787 if (!mr)
3788 return ERR_PTR(-ENOMEM);
3789
3790 mr->rdev = rdev;
3791 mr->qplib_mr.pd = &pd->qplib_pd;
3792 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3793 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3794
3795 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3796 if (rc) {
3797 ibdev_err(&rdev->ibdev, "Failed to allocate MR");
3798 goto free_mr;
3799 }
3800
3801 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3802
3803 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
3804 if (IS_ERR(umem)) {
3805 ibdev_err(&rdev->ibdev, "Failed to get umem");
3806 rc = -EFAULT;
3807 goto free_mrw;
3808 }
3809 mr->ib_umem = umem;
3810
3811 mr->qplib_mr.va = virt_addr;
3812 page_size = ib_umem_find_best_pgsz(
3813 umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
3814 if (!page_size) {
3815 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
3816 rc = -EFAULT;
3817 goto free_umem;
3818 }
3819 mr->qplib_mr.total_size = length;
3820
3821 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
3822 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
3823 umem_pgs, page_size);
3824 if (rc) {
3825 ibdev_err(&rdev->ibdev, "Failed to register user MR");
3826 goto free_umem;
3827 }
3828
3829 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3830 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3831 atomic_inc(&rdev->mr_count);
3832
3833 return &mr->ib_mr;
3834 free_umem:
3835 ib_umem_release(umem);
3836 free_mrw:
3837 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3838 free_mr:
3839 kfree(mr);
3840 return ERR_PTR(rc);
3841 }
3842
3843 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3844 {
3845 struct ib_device *ibdev = ctx->device;
3846 struct bnxt_re_ucontext *uctx =
3847 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3848 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3849 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3850 struct bnxt_re_uctx_resp resp = {};
3851 u32 chip_met_rev_num = 0;
3852 int rc;
3853
3854 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
3855
3856 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3857 ibdev_dbg(ibdev, " is different from the device %d ",
3858 BNXT_RE_ABI_VERSION);
3859 return -EPERM;
3860 }
3861
3862 uctx->rdev = rdev;
3863
3864 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3865 if (!uctx->shpg) {
3866 rc = -ENOMEM;
3867 goto fail;
3868 }
3869 spin_lock_init(&uctx->sh_lock);
3870
3871 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3872 chip_met_rev_num = rdev->chip_ctx->chip_num;
3873 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
3874 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3875 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
3876 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3877 resp.chip_id0 = chip_met_rev_num;
3878
3879 resp.dev_id = rdev->en_dev->pdev->devfn;
3880 resp.max_qp = rdev->qplib_ctx.qpc_count;
3881 resp.pg_size = PAGE_SIZE;
3882 resp.cqe_sz = sizeof(struct cq_base);
3883 resp.max_cqd = dev_attr->max_cq_wqes;
3884
3885 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
3886 resp.mode = rdev->chip_ctx->modes.wqe_mode;
3887
3888 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3889 if (rc) {
3890 ibdev_err(ibdev, "Failed to copy user context");
3891 rc = -EFAULT;
3892 goto cfail;
3893 }
3894
3895 return 0;
3896 cfail:
3897 free_page((unsigned long)uctx->shpg);
3898 uctx->shpg = NULL;
3899 fail:
3900 return rc;
3901 }
3902
3903 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3904 {
3905 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3906 struct bnxt_re_ucontext,
3907 ib_uctx);
3908
3909 struct bnxt_re_dev *rdev = uctx->rdev;
3910
3911 if (uctx->shpg)
3912 free_page((unsigned long)uctx->shpg);
3913
3914 if (uctx->dpi.dbr) {
3915
3916
3917
3918 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3919 &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3920 uctx->dpi.dbr = NULL;
3921 }
3922 }
3923
3924
3925 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3926 {
3927 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3928 struct bnxt_re_ucontext,
3929 ib_uctx);
3930 struct bnxt_re_dev *rdev = uctx->rdev;
3931 u64 pfn;
3932
3933 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3934 return -EINVAL;
3935
3936 if (vma->vm_pgoff) {
3937 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3938 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3939 PAGE_SIZE, vma->vm_page_prot)) {
3940 ibdev_err(&rdev->ibdev, "Failed to map DPI");
3941 return -EAGAIN;
3942 }
3943 } else {
3944 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3945 if (remap_pfn_range(vma, vma->vm_start,
3946 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3947 ibdev_err(&rdev->ibdev, "Failed to map shared page");
3948 return -EAGAIN;
3949 }
3950 }
3951
3952 return 0;
3953 }