0001
0002
0003
0004
0005
0006 #include <linux/errno.h>
0007 #include <linux/types.h>
0008 #include <linux/uaccess.h>
0009 #include <linux/vmalloc.h>
0010 #include <linux/xarray.h>
0011 #include <net/addrconf.h>
0012
0013 #include <rdma/iw_cm.h>
0014 #include <rdma/ib_verbs.h>
0015 #include <rdma/ib_user_verbs.h>
0016 #include <rdma/uverbs_ioctl.h>
0017
0018 #include "siw.h"
0019 #include "siw_verbs.h"
0020 #include "siw_mem.h"
0021
0022 static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
0023 [IB_QPS_RESET] = SIW_QP_STATE_IDLE,
0024 [IB_QPS_INIT] = SIW_QP_STATE_IDLE,
0025 [IB_QPS_RTR] = SIW_QP_STATE_RTR,
0026 [IB_QPS_RTS] = SIW_QP_STATE_RTS,
0027 [IB_QPS_SQD] = SIW_QP_STATE_CLOSING,
0028 [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE,
0029 [IB_QPS_ERR] = SIW_QP_STATE_ERROR
0030 };
0031
0032 static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
0033 [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR",
0034 [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE",
0035 [IB_QPS_ERR] = "ERR"
0036 };
0037
0038 void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
0039 {
0040 struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
0041
0042 kfree(entry);
0043 }
0044
0045 int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
0046 {
0047 struct siw_ucontext *uctx = to_siw_ctx(ctx);
0048 size_t size = vma->vm_end - vma->vm_start;
0049 struct rdma_user_mmap_entry *rdma_entry;
0050 struct siw_user_mmap_entry *entry;
0051 int rv = -EINVAL;
0052
0053
0054
0055
0056 if (vma->vm_start & (PAGE_SIZE - 1)) {
0057 pr_warn("siw: mmap not page aligned\n");
0058 return -EINVAL;
0059 }
0060 rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
0061 if (!rdma_entry) {
0062 siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
0063 vma->vm_pgoff, size);
0064 return -EINVAL;
0065 }
0066 entry = to_siw_mmap_entry(rdma_entry);
0067
0068 rv = remap_vmalloc_range(vma, entry->address, 0);
0069 if (rv) {
0070 pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
0071 size);
0072 goto out;
0073 }
0074 out:
0075 rdma_user_mmap_entry_put(rdma_entry);
0076
0077 return rv;
0078 }
0079
0080 int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
0081 {
0082 struct siw_device *sdev = to_siw_dev(base_ctx->device);
0083 struct siw_ucontext *ctx = to_siw_ctx(base_ctx);
0084 struct siw_uresp_alloc_ctx uresp = {};
0085 int rv;
0086
0087 if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
0088 rv = -ENOMEM;
0089 goto err_out;
0090 }
0091 ctx->sdev = sdev;
0092
0093 uresp.dev_id = sdev->vendor_part_id;
0094
0095 if (udata->outlen < sizeof(uresp)) {
0096 rv = -EINVAL;
0097 goto err_out;
0098 }
0099 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
0100 if (rv)
0101 goto err_out;
0102
0103 siw_dbg(base_ctx->device, "success. now %d context(s)\n",
0104 atomic_read(&sdev->num_ctx));
0105
0106 return 0;
0107
0108 err_out:
0109 atomic_dec(&sdev->num_ctx);
0110 siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
0111 atomic_read(&sdev->num_ctx));
0112
0113 return rv;
0114 }
0115
0116 void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
0117 {
0118 struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
0119
0120 atomic_dec(&uctx->sdev->num_ctx);
0121 }
0122
0123 int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
0124 struct ib_udata *udata)
0125 {
0126 struct siw_device *sdev = to_siw_dev(base_dev);
0127
0128 if (udata->inlen || udata->outlen)
0129 return -EINVAL;
0130
0131 memset(attr, 0, sizeof(*attr));
0132
0133
0134 attr->atomic_cap = 0;
0135 attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
0136 attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG;
0137 attr->max_cq = sdev->attrs.max_cq;
0138 attr->max_cqe = sdev->attrs.max_cqe;
0139 attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
0140 attr->max_mr = sdev->attrs.max_mr;
0141 attr->max_mw = sdev->attrs.max_mw;
0142 attr->max_mr_size = ~0ull;
0143 attr->max_pd = sdev->attrs.max_pd;
0144 attr->max_qp = sdev->attrs.max_qp;
0145 attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
0146 attr->max_qp_rd_atom = sdev->attrs.max_ord;
0147 attr->max_qp_wr = sdev->attrs.max_qp_wr;
0148 attr->max_recv_sge = sdev->attrs.max_sge;
0149 attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
0150 attr->max_send_sge = sdev->attrs.max_sge;
0151 attr->max_sge_rd = sdev->attrs.max_sge_rd;
0152 attr->max_srq = sdev->attrs.max_srq;
0153 attr->max_srq_sge = sdev->attrs.max_srq_sge;
0154 attr->max_srq_wr = sdev->attrs.max_srq_wr;
0155 attr->page_size_cap = PAGE_SIZE;
0156 attr->vendor_id = SIW_VENDOR_ID;
0157 attr->vendor_part_id = sdev->vendor_part_id;
0158
0159 addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
0160 sdev->netdev->dev_addr);
0161
0162 return 0;
0163 }
0164
0165 int siw_query_port(struct ib_device *base_dev, u32 port,
0166 struct ib_port_attr *attr)
0167 {
0168 struct siw_device *sdev = to_siw_dev(base_dev);
0169 int rv;
0170
0171 memset(attr, 0, sizeof(*attr));
0172
0173 rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
0174 &attr->active_width);
0175 attr->gid_tbl_len = 1;
0176 attr->max_msg_sz = -1;
0177 attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
0178 attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
0179 attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
0180 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
0181 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
0182 attr->state = sdev->state;
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 return rv;
0197 }
0198
0199 int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
0200 struct ib_port_immutable *port_immutable)
0201 {
0202 struct ib_port_attr attr;
0203 int rv = siw_query_port(base_dev, port, &attr);
0204
0205 if (rv)
0206 return rv;
0207
0208 port_immutable->gid_tbl_len = attr.gid_tbl_len;
0209 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
0210
0211 return 0;
0212 }
0213
0214 int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
0215 union ib_gid *gid)
0216 {
0217 struct siw_device *sdev = to_siw_dev(base_dev);
0218
0219
0220 memset(gid, 0, sizeof(*gid));
0221 memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
0222
0223 return 0;
0224 }
0225
0226 int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
0227 {
0228 struct siw_device *sdev = to_siw_dev(pd->device);
0229
0230 if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
0231 atomic_dec(&sdev->num_pd);
0232 return -ENOMEM;
0233 }
0234 siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
0235
0236 return 0;
0237 }
0238
0239 int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
0240 {
0241 struct siw_device *sdev = to_siw_dev(pd->device);
0242
0243 siw_dbg_pd(pd, "free PD\n");
0244 atomic_dec(&sdev->num_pd);
0245 return 0;
0246 }
0247
0248 void siw_qp_get_ref(struct ib_qp *base_qp)
0249 {
0250 siw_qp_get(to_siw_qp(base_qp));
0251 }
0252
0253 void siw_qp_put_ref(struct ib_qp *base_qp)
0254 {
0255 siw_qp_put(to_siw_qp(base_qp));
0256 }
0257
0258 static struct rdma_user_mmap_entry *
0259 siw_mmap_entry_insert(struct siw_ucontext *uctx,
0260 void *address, size_t length,
0261 u64 *offset)
0262 {
0263 struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
0264 int rv;
0265
0266 *offset = SIW_INVAL_UOBJ_KEY;
0267 if (!entry)
0268 return NULL;
0269
0270 entry->address = address;
0271
0272 rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
0273 &entry->rdma_entry,
0274 length);
0275 if (rv) {
0276 kfree(entry);
0277 return NULL;
0278 }
0279
0280 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
0281
0282 return &entry->rdma_entry;
0283 }
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295 int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
0296 struct ib_udata *udata)
0297 {
0298 struct ib_pd *pd = ibqp->pd;
0299 struct siw_qp *qp = to_siw_qp(ibqp);
0300 struct ib_device *base_dev = pd->device;
0301 struct siw_device *sdev = to_siw_dev(base_dev);
0302 struct siw_ucontext *uctx =
0303 rdma_udata_to_drv_context(udata, struct siw_ucontext,
0304 base_ucontext);
0305 unsigned long flags;
0306 int num_sqe, num_rqe, rv = 0;
0307 size_t length;
0308
0309 siw_dbg(base_dev, "create new QP\n");
0310
0311 if (attrs->create_flags)
0312 return -EOPNOTSUPP;
0313
0314 if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
0315 siw_dbg(base_dev, "too many QP's\n");
0316 rv = -ENOMEM;
0317 goto err_atomic;
0318 }
0319 if (attrs->qp_type != IB_QPT_RC) {
0320 siw_dbg(base_dev, "only RC QP's supported\n");
0321 rv = -EOPNOTSUPP;
0322 goto err_atomic;
0323 }
0324 if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
0325 (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
0326 (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
0327 (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
0328 siw_dbg(base_dev, "QP size error\n");
0329 rv = -EINVAL;
0330 goto err_atomic;
0331 }
0332 if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
0333 siw_dbg(base_dev, "max inline send: %d > %d\n",
0334 attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
0335 rv = -EINVAL;
0336 goto err_atomic;
0337 }
0338
0339
0340
0341
0342 if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
0343 siw_dbg(base_dev, "QP must have send or receive queue\n");
0344 rv = -EINVAL;
0345 goto err_atomic;
0346 }
0347
0348 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
0349 siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
0350 rv = -EINVAL;
0351 goto err_atomic;
0352 }
0353
0354 init_rwsem(&qp->state_lock);
0355 spin_lock_init(&qp->sq_lock);
0356 spin_lock_init(&qp->rq_lock);
0357 spin_lock_init(&qp->orq_lock);
0358
0359 rv = siw_qp_add(sdev, qp);
0360 if (rv)
0361 goto err_atomic;
0362
0363 num_sqe = attrs->cap.max_send_wr;
0364 num_rqe = attrs->cap.max_recv_wr;
0365
0366
0367
0368
0369
0370
0371 if (num_sqe)
0372 num_sqe = roundup_pow_of_two(num_sqe);
0373 else {
0374
0375 rv = -EINVAL;
0376 goto err_out_xa;
0377 }
0378 if (num_rqe)
0379 num_rqe = roundup_pow_of_two(num_rqe);
0380
0381 if (udata)
0382 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
0383 else
0384 qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
0385
0386 if (qp->sendq == NULL) {
0387 rv = -ENOMEM;
0388 goto err_out_xa;
0389 }
0390 if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
0391 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
0392 qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
0393 else {
0394 rv = -EINVAL;
0395 goto err_out_xa;
0396 }
0397 }
0398 qp->pd = pd;
0399 qp->scq = to_siw_cq(attrs->send_cq);
0400 qp->rcq = to_siw_cq(attrs->recv_cq);
0401
0402 if (attrs->srq) {
0403
0404
0405
0406
0407
0408 qp->srq = to_siw_srq(attrs->srq);
0409 qp->attrs.rq_size = 0;
0410 siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
0411 qp->base_qp.qp_num);
0412 } else if (num_rqe) {
0413 if (udata)
0414 qp->recvq =
0415 vmalloc_user(num_rqe * sizeof(struct siw_rqe));
0416 else
0417 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
0418
0419 if (qp->recvq == NULL) {
0420 rv = -ENOMEM;
0421 goto err_out_xa;
0422 }
0423 qp->attrs.rq_size = num_rqe;
0424 }
0425 qp->attrs.sq_size = num_sqe;
0426 qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
0427 qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
0428
0429
0430 qp->tx_ctx.gso_seg_limit = 1;
0431 qp->tx_ctx.zcopy_tx = zcopy_tx;
0432
0433 qp->attrs.state = SIW_QP_STATE_IDLE;
0434
0435 if (udata) {
0436 struct siw_uresp_create_qp uresp = {};
0437
0438 uresp.num_sqe = num_sqe;
0439 uresp.num_rqe = num_rqe;
0440 uresp.qp_id = qp_id(qp);
0441
0442 if (qp->sendq) {
0443 length = num_sqe * sizeof(struct siw_sqe);
0444 qp->sq_entry =
0445 siw_mmap_entry_insert(uctx, qp->sendq,
0446 length, &uresp.sq_key);
0447 if (!qp->sq_entry) {
0448 rv = -ENOMEM;
0449 goto err_out_xa;
0450 }
0451 }
0452
0453 if (qp->recvq) {
0454 length = num_rqe * sizeof(struct siw_rqe);
0455 qp->rq_entry =
0456 siw_mmap_entry_insert(uctx, qp->recvq,
0457 length, &uresp.rq_key);
0458 if (!qp->rq_entry) {
0459 uresp.sq_key = SIW_INVAL_UOBJ_KEY;
0460 rv = -ENOMEM;
0461 goto err_out_xa;
0462 }
0463 }
0464
0465 if (udata->outlen < sizeof(uresp)) {
0466 rv = -EINVAL;
0467 goto err_out_xa;
0468 }
0469 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
0470 if (rv)
0471 goto err_out_xa;
0472 }
0473 qp->tx_cpu = siw_get_tx_cpu(sdev);
0474 if (qp->tx_cpu < 0) {
0475 rv = -EINVAL;
0476 goto err_out_xa;
0477 }
0478 INIT_LIST_HEAD(&qp->devq);
0479 spin_lock_irqsave(&sdev->lock, flags);
0480 list_add_tail(&qp->devq, &sdev->qp_list);
0481 spin_unlock_irqrestore(&sdev->lock, flags);
0482
0483 return 0;
0484
0485 err_out_xa:
0486 xa_erase(&sdev->qp_xa, qp_id(qp));
0487 if (uctx) {
0488 rdma_user_mmap_entry_remove(qp->sq_entry);
0489 rdma_user_mmap_entry_remove(qp->rq_entry);
0490 }
0491 vfree(qp->sendq);
0492 vfree(qp->recvq);
0493
0494 err_atomic:
0495 atomic_dec(&sdev->num_qp);
0496 return rv;
0497 }
0498
0499
0500
0501
0502
0503
0504 int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
0505 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
0506 {
0507 struct siw_qp *qp;
0508 struct siw_device *sdev;
0509
0510 if (base_qp && qp_attr && qp_init_attr) {
0511 qp = to_siw_qp(base_qp);
0512 sdev = to_siw_dev(base_qp->device);
0513 } else {
0514 return -EINVAL;
0515 }
0516 qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
0517 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
0518 qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
0519 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
0520 qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
0521 qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
0522 qp_attr->max_rd_atomic = qp->attrs.irq_size;
0523 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
0524
0525 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
0526 IB_ACCESS_REMOTE_WRITE |
0527 IB_ACCESS_REMOTE_READ;
0528
0529 qp_init_attr->qp_type = base_qp->qp_type;
0530 qp_init_attr->send_cq = base_qp->send_cq;
0531 qp_init_attr->recv_cq = base_qp->recv_cq;
0532 qp_init_attr->srq = base_qp->srq;
0533
0534 qp_init_attr->cap = qp_attr->cap;
0535
0536 return 0;
0537 }
0538
0539 int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
0540 int attr_mask, struct ib_udata *udata)
0541 {
0542 struct siw_qp_attrs new_attrs;
0543 enum siw_qp_attr_mask siw_attr_mask = 0;
0544 struct siw_qp *qp = to_siw_qp(base_qp);
0545 int rv = 0;
0546
0547 if (!attr_mask)
0548 return 0;
0549
0550 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
0551 return -EOPNOTSUPP;
0552
0553 memset(&new_attrs, 0, sizeof(new_attrs));
0554
0555 if (attr_mask & IB_QP_ACCESS_FLAGS) {
0556 siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS;
0557
0558 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
0559 new_attrs.flags |= SIW_RDMA_READ_ENABLED;
0560 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
0561 new_attrs.flags |= SIW_RDMA_WRITE_ENABLED;
0562 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
0563 new_attrs.flags |= SIW_RDMA_BIND_ENABLED;
0564 }
0565 if (attr_mask & IB_QP_STATE) {
0566 siw_dbg_qp(qp, "desired IB QP state: %s\n",
0567 ib_qp_state_to_string[attr->qp_state]);
0568
0569 new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
0570
0571 if (new_attrs.state > SIW_QP_STATE_RTS)
0572 qp->tx_ctx.tx_suspend = 1;
0573
0574 siw_attr_mask |= SIW_QP_ATTR_STATE;
0575 }
0576 if (!siw_attr_mask)
0577 goto out;
0578
0579 down_write(&qp->state_lock);
0580
0581 rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
0582
0583 up_write(&qp->state_lock);
0584 out:
0585 return rv;
0586 }
0587
0588 int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
0589 {
0590 struct siw_qp *qp = to_siw_qp(base_qp);
0591 struct siw_ucontext *uctx =
0592 rdma_udata_to_drv_context(udata, struct siw_ucontext,
0593 base_ucontext);
0594 struct siw_qp_attrs qp_attrs;
0595
0596 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
0597
0598
0599
0600
0601
0602 qp->attrs.flags |= SIW_QP_IN_DESTROY;
0603 qp->rx_stream.rx_suspend = 1;
0604
0605 if (uctx) {
0606 rdma_user_mmap_entry_remove(qp->sq_entry);
0607 rdma_user_mmap_entry_remove(qp->rq_entry);
0608 }
0609
0610 down_write(&qp->state_lock);
0611
0612 qp_attrs.state = SIW_QP_STATE_ERROR;
0613 siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
0614
0615 if (qp->cep) {
0616 siw_cep_put(qp->cep);
0617 qp->cep = NULL;
0618 }
0619 up_write(&qp->state_lock);
0620
0621 kfree(qp->tx_ctx.mpa_crc_hd);
0622 kfree(qp->rx_stream.mpa_crc_hd);
0623
0624 qp->scq = qp->rcq = NULL;
0625
0626 siw_qp_put(qp);
0627
0628 return 0;
0629 }
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640 static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
0641 struct siw_sqe *sqe)
0642 {
0643 struct ib_sge *core_sge = core_wr->sg_list;
0644 void *kbuf = &sqe->sge[1];
0645 int num_sge = core_wr->num_sge, bytes = 0;
0646
0647 sqe->sge[0].laddr = (uintptr_t)kbuf;
0648 sqe->sge[0].lkey = 0;
0649
0650 while (num_sge--) {
0651 if (!core_sge->length) {
0652 core_sge++;
0653 continue;
0654 }
0655 bytes += core_sge->length;
0656 if (bytes > SIW_MAX_INLINE) {
0657 bytes = -EINVAL;
0658 break;
0659 }
0660 memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
0661 core_sge->length);
0662
0663 kbuf += core_sge->length;
0664 core_sge++;
0665 }
0666 sqe->sge[0].length = max(bytes, 0);
0667 sqe->num_sge = bytes > 0 ? 1 : 0;
0668
0669 return bytes;
0670 }
0671
0672
0673 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
0674 const struct ib_send_wr **bad_wr)
0675 {
0676 struct siw_sqe sqe = {};
0677 int rv = 0;
0678
0679 while (wr) {
0680 sqe.id = wr->wr_id;
0681 sqe.opcode = wr->opcode;
0682 rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
0683 if (rv) {
0684 if (bad_wr)
0685 *bad_wr = wr;
0686 break;
0687 }
0688 wr = wr->next;
0689 }
0690 return rv;
0691 }
0692
0693
0694 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
0695 const struct ib_recv_wr **bad_wr)
0696 {
0697 struct siw_rqe rqe = {};
0698 int rv = 0;
0699
0700 while (wr) {
0701 rqe.id = wr->wr_id;
0702 rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
0703 if (rv) {
0704 if (bad_wr)
0705 *bad_wr = wr;
0706 break;
0707 }
0708 wr = wr->next;
0709 }
0710 return rv;
0711 }
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722 int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
0723 const struct ib_send_wr **bad_wr)
0724 {
0725 struct siw_qp *qp = to_siw_qp(base_qp);
0726 struct siw_wqe *wqe = tx_wqe(qp);
0727
0728 unsigned long flags;
0729 int rv = 0;
0730
0731 if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
0732 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
0733 *bad_wr = wr;
0734 return -EINVAL;
0735 }
0736
0737
0738
0739
0740
0741 if (!down_read_trylock(&qp->state_lock)) {
0742 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752 rv = siw_sq_flush_wr(qp, wr, bad_wr);
0753 } else {
0754 siw_dbg_qp(qp, "QP locked, state %d\n",
0755 qp->attrs.state);
0756 *bad_wr = wr;
0757 rv = -ENOTCONN;
0758 }
0759 return rv;
0760 }
0761 if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
0762 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
0763
0764
0765
0766
0767
0768
0769
0770 rv = siw_sq_flush_wr(qp, wr, bad_wr);
0771 } else {
0772 siw_dbg_qp(qp, "QP out of state %d\n",
0773 qp->attrs.state);
0774 *bad_wr = wr;
0775 rv = -ENOTCONN;
0776 }
0777 up_read(&qp->state_lock);
0778 return rv;
0779 }
0780 spin_lock_irqsave(&qp->sq_lock, flags);
0781
0782 while (wr) {
0783 u32 idx = qp->sq_put % qp->attrs.sq_size;
0784 struct siw_sqe *sqe = &qp->sendq[idx];
0785
0786 if (sqe->flags) {
0787 siw_dbg_qp(qp, "sq full\n");
0788 rv = -ENOMEM;
0789 break;
0790 }
0791 if (wr->num_sge > qp->attrs.sq_max_sges) {
0792 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
0793 rv = -EINVAL;
0794 break;
0795 }
0796 sqe->id = wr->wr_id;
0797
0798 if ((wr->send_flags & IB_SEND_SIGNALED) ||
0799 (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
0800 sqe->flags |= SIW_WQE_SIGNALLED;
0801
0802 if (wr->send_flags & IB_SEND_FENCE)
0803 sqe->flags |= SIW_WQE_READ_FENCE;
0804
0805 switch (wr->opcode) {
0806 case IB_WR_SEND:
0807 case IB_WR_SEND_WITH_INV:
0808 if (wr->send_flags & IB_SEND_SOLICITED)
0809 sqe->flags |= SIW_WQE_SOLICITED;
0810
0811 if (!(wr->send_flags & IB_SEND_INLINE)) {
0812 siw_copy_sgl(wr->sg_list, sqe->sge,
0813 wr->num_sge);
0814 sqe->num_sge = wr->num_sge;
0815 } else {
0816 rv = siw_copy_inline_sgl(wr, sqe);
0817 if (rv <= 0) {
0818 rv = -EINVAL;
0819 break;
0820 }
0821 sqe->flags |= SIW_WQE_INLINE;
0822 sqe->num_sge = 1;
0823 }
0824 if (wr->opcode == IB_WR_SEND)
0825 sqe->opcode = SIW_OP_SEND;
0826 else {
0827 sqe->opcode = SIW_OP_SEND_REMOTE_INV;
0828 sqe->rkey = wr->ex.invalidate_rkey;
0829 }
0830 break;
0831
0832 case IB_WR_RDMA_READ_WITH_INV:
0833 case IB_WR_RDMA_READ:
0834
0835
0836
0837
0838
0839
0840
0841 if (unlikely(wr->num_sge != 1)) {
0842 rv = -EINVAL;
0843 break;
0844 }
0845 siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
0846
0847
0848
0849 sqe->raddr = rdma_wr(wr)->remote_addr;
0850 sqe->rkey = rdma_wr(wr)->rkey;
0851 sqe->num_sge = 1;
0852
0853 if (wr->opcode == IB_WR_RDMA_READ)
0854 sqe->opcode = SIW_OP_READ;
0855 else
0856 sqe->opcode = SIW_OP_READ_LOCAL_INV;
0857 break;
0858
0859 case IB_WR_RDMA_WRITE:
0860 if (!(wr->send_flags & IB_SEND_INLINE)) {
0861 siw_copy_sgl(wr->sg_list, &sqe->sge[0],
0862 wr->num_sge);
0863 sqe->num_sge = wr->num_sge;
0864 } else {
0865 rv = siw_copy_inline_sgl(wr, sqe);
0866 if (unlikely(rv < 0)) {
0867 rv = -EINVAL;
0868 break;
0869 }
0870 sqe->flags |= SIW_WQE_INLINE;
0871 sqe->num_sge = 1;
0872 }
0873 sqe->raddr = rdma_wr(wr)->remote_addr;
0874 sqe->rkey = rdma_wr(wr)->rkey;
0875 sqe->opcode = SIW_OP_WRITE;
0876 break;
0877
0878 case IB_WR_REG_MR:
0879 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
0880 sqe->rkey = reg_wr(wr)->key;
0881 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
0882 sqe->opcode = SIW_OP_REG_MR;
0883 break;
0884
0885 case IB_WR_LOCAL_INV:
0886 sqe->rkey = wr->ex.invalidate_rkey;
0887 sqe->opcode = SIW_OP_INVAL_STAG;
0888 break;
0889
0890 default:
0891 siw_dbg_qp(qp, "ib wr type %d unsupported\n",
0892 wr->opcode);
0893 rv = -EINVAL;
0894 break;
0895 }
0896 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
0897 sqe->opcode, sqe->flags,
0898 (void *)(uintptr_t)sqe->id);
0899
0900 if (unlikely(rv < 0))
0901 break;
0902
0903
0904 smp_wmb();
0905 sqe->flags |= SIW_WQE_VALID;
0906
0907 qp->sq_put++;
0908 wr = wr->next;
0909 }
0910
0911
0912
0913
0914
0915
0916
0917
0918 if (wqe->wr_status != SIW_WR_IDLE) {
0919 spin_unlock_irqrestore(&qp->sq_lock, flags);
0920 goto skip_direct_sending;
0921 }
0922 rv = siw_activate_tx(qp);
0923 spin_unlock_irqrestore(&qp->sq_lock, flags);
0924
0925 if (rv <= 0)
0926 goto skip_direct_sending;
0927
0928 if (rdma_is_kernel_res(&qp->base_qp.res)) {
0929 rv = siw_sq_start(qp);
0930 } else {
0931 qp->tx_ctx.in_syscall = 1;
0932
0933 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
0934 siw_qp_cm_drop(qp, 0);
0935
0936 qp->tx_ctx.in_syscall = 0;
0937 }
0938 skip_direct_sending:
0939
0940 up_read(&qp->state_lock);
0941
0942 if (rv >= 0)
0943 return 0;
0944
0945
0946
0947 siw_dbg_qp(qp, "error %d\n", rv);
0948
0949 *bad_wr = wr;
0950 return rv;
0951 }
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962 int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
0963 const struct ib_recv_wr **bad_wr)
0964 {
0965 struct siw_qp *qp = to_siw_qp(base_qp);
0966 unsigned long flags;
0967 int rv = 0;
0968
0969 if (qp->srq || qp->attrs.rq_size == 0) {
0970 *bad_wr = wr;
0971 return -EINVAL;
0972 }
0973 if (!rdma_is_kernel_res(&qp->base_qp.res)) {
0974 siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
0975 *bad_wr = wr;
0976 return -EINVAL;
0977 }
0978
0979
0980
0981
0982
0983 if (!down_read_trylock(&qp->state_lock)) {
0984 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994 rv = siw_rq_flush_wr(qp, wr, bad_wr);
0995 } else {
0996 siw_dbg_qp(qp, "QP locked, state %d\n",
0997 qp->attrs.state);
0998 *bad_wr = wr;
0999 rv = -ENOTCONN;
1000 }
1001 return rv;
1002 }
1003 if (qp->attrs.state > SIW_QP_STATE_RTS) {
1004 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1005
1006
1007
1008
1009
1010
1011
1012 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1013 } else {
1014 siw_dbg_qp(qp, "QP out of state %d\n",
1015 qp->attrs.state);
1016 *bad_wr = wr;
1017 rv = -ENOTCONN;
1018 }
1019 up_read(&qp->state_lock);
1020 return rv;
1021 }
1022
1023
1024
1025
1026 spin_lock_irqsave(&qp->rq_lock, flags);
1027
1028 while (wr) {
1029 u32 idx = qp->rq_put % qp->attrs.rq_size;
1030 struct siw_rqe *rqe = &qp->recvq[idx];
1031
1032 if (rqe->flags) {
1033 siw_dbg_qp(qp, "RQ full\n");
1034 rv = -ENOMEM;
1035 break;
1036 }
1037 if (wr->num_sge > qp->attrs.rq_max_sges) {
1038 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
1039 rv = -EINVAL;
1040 break;
1041 }
1042 rqe->id = wr->wr_id;
1043 rqe->num_sge = wr->num_sge;
1044 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1045
1046
1047 smp_wmb();
1048
1049 rqe->flags = SIW_WQE_VALID;
1050
1051 qp->rq_put++;
1052 wr = wr->next;
1053 }
1054 spin_unlock_irqrestore(&qp->rq_lock, flags);
1055
1056 up_read(&qp->state_lock);
1057
1058 if (rv < 0) {
1059 siw_dbg_qp(qp, "error %d\n", rv);
1060 *bad_wr = wr;
1061 }
1062 return rv > 0 ? 0 : rv;
1063 }
1064
1065 int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
1066 {
1067 struct siw_cq *cq = to_siw_cq(base_cq);
1068 struct siw_device *sdev = to_siw_dev(base_cq->device);
1069 struct siw_ucontext *ctx =
1070 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1071 base_ucontext);
1072
1073 siw_dbg_cq(cq, "free CQ resources\n");
1074
1075 siw_cq_flush(cq);
1076
1077 if (ctx)
1078 rdma_user_mmap_entry_remove(cq->cq_entry);
1079
1080 atomic_dec(&sdev->num_cq);
1081
1082 vfree(cq->queue);
1083 return 0;
1084 }
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1097 struct ib_udata *udata)
1098 {
1099 struct siw_device *sdev = to_siw_dev(base_cq->device);
1100 struct siw_cq *cq = to_siw_cq(base_cq);
1101 int rv, size = attr->cqe;
1102
1103 if (attr->flags)
1104 return -EOPNOTSUPP;
1105
1106 if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
1107 siw_dbg(base_cq->device, "too many CQ's\n");
1108 rv = -ENOMEM;
1109 goto err_out;
1110 }
1111 if (size < 1 || size > sdev->attrs.max_cqe) {
1112 siw_dbg(base_cq->device, "CQ size error: %d\n", size);
1113 rv = -EINVAL;
1114 goto err_out;
1115 }
1116 size = roundup_pow_of_two(size);
1117 cq->base_cq.cqe = size;
1118 cq->num_cqe = size;
1119
1120 if (udata)
1121 cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
1122 sizeof(struct siw_cq_ctrl));
1123 else
1124 cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
1125 sizeof(struct siw_cq_ctrl));
1126
1127 if (cq->queue == NULL) {
1128 rv = -ENOMEM;
1129 goto err_out;
1130 }
1131 get_random_bytes(&cq->id, 4);
1132 siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
1133
1134 spin_lock_init(&cq->lock);
1135
1136 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1137
1138 if (udata) {
1139 struct siw_uresp_create_cq uresp = {};
1140 struct siw_ucontext *ctx =
1141 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1142 base_ucontext);
1143 size_t length = size * sizeof(struct siw_cqe) +
1144 sizeof(struct siw_cq_ctrl);
1145
1146 cq->cq_entry =
1147 siw_mmap_entry_insert(ctx, cq->queue,
1148 length, &uresp.cq_key);
1149 if (!cq->cq_entry) {
1150 rv = -ENOMEM;
1151 goto err_out;
1152 }
1153
1154 uresp.cq_id = cq->id;
1155 uresp.num_cqe = size;
1156
1157 if (udata->outlen < sizeof(uresp)) {
1158 rv = -EINVAL;
1159 goto err_out;
1160 }
1161 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1162 if (rv)
1163 goto err_out;
1164 }
1165 return 0;
1166
1167 err_out:
1168 siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
1169
1170 if (cq->queue) {
1171 struct siw_ucontext *ctx =
1172 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1173 base_ucontext);
1174 if (ctx)
1175 rdma_user_mmap_entry_remove(cq->cq_entry);
1176 vfree(cq->queue);
1177 }
1178 atomic_dec(&sdev->num_cq);
1179
1180 return rv;
1181 }
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc)
1194 {
1195 struct siw_cq *cq = to_siw_cq(base_cq);
1196 int i;
1197
1198 for (i = 0; i < num_cqe; i++) {
1199 if (!siw_reap_cqe(cq, wc))
1200 break;
1201 wc++;
1202 }
1203 return i;
1204 }
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1223 {
1224 struct siw_cq *cq = to_siw_cq(base_cq);
1225
1226 siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1227
1228 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
1229
1230
1231
1232
1233 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1234 else
1235
1236
1237
1238
1239 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1240
1241 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1242 return cq->cq_put - cq->cq_get;
1243
1244 return 0;
1245 }
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
1256 {
1257 struct siw_mr *mr = to_siw_mr(base_mr);
1258 struct siw_device *sdev = to_siw_dev(base_mr->device);
1259
1260 siw_dbg_mem(mr->mem, "deregister MR\n");
1261
1262 atomic_dec(&sdev->num_mr);
1263
1264 siw_mr_drop_mem(mr);
1265 kfree_rcu(mr, rcu);
1266
1267 return 0;
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1283 u64 rnic_va, int rights, struct ib_udata *udata)
1284 {
1285 struct siw_mr *mr = NULL;
1286 struct siw_umem *umem = NULL;
1287 struct siw_ureq_reg_mr ureq;
1288 struct siw_device *sdev = to_siw_dev(pd->device);
1289
1290 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1291 int rv;
1292
1293 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1294 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1295 (unsigned long long)len);
1296
1297 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1298 siw_dbg_pd(pd, "too many mr's\n");
1299 rv = -ENOMEM;
1300 goto err_out;
1301 }
1302 if (!len) {
1303 rv = -EINVAL;
1304 goto err_out;
1305 }
1306 if (mem_limit != RLIM_INFINITY) {
1307 unsigned long num_pages =
1308 (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
1309 mem_limit >>= PAGE_SHIFT;
1310
1311 if (num_pages > mem_limit - current->mm->locked_vm) {
1312 siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n",
1313 num_pages, mem_limit,
1314 current->mm->locked_vm);
1315 rv = -ENOMEM;
1316 goto err_out;
1317 }
1318 }
1319 umem = siw_umem_get(start, len, ib_access_writable(rights));
1320 if (IS_ERR(umem)) {
1321 rv = PTR_ERR(umem);
1322 siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
1323 umem = NULL;
1324 goto err_out;
1325 }
1326 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1327 if (!mr) {
1328 rv = -ENOMEM;
1329 goto err_out;
1330 }
1331 rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
1332 if (rv)
1333 goto err_out;
1334
1335 if (udata) {
1336 struct siw_uresp_reg_mr uresp = {};
1337 struct siw_mem *mem = mr->mem;
1338
1339 if (udata->inlen < sizeof(ureq)) {
1340 rv = -EINVAL;
1341 goto err_out;
1342 }
1343 rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1344 if (rv)
1345 goto err_out;
1346
1347 mr->base_mr.lkey |= ureq.stag_key;
1348 mr->base_mr.rkey |= ureq.stag_key;
1349 mem->stag |= ureq.stag_key;
1350 uresp.stag = mem->stag;
1351
1352 if (udata->outlen < sizeof(uresp)) {
1353 rv = -EINVAL;
1354 goto err_out;
1355 }
1356 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1357 if (rv)
1358 goto err_out;
1359 }
1360 mr->mem->stag_valid = 1;
1361
1362 return &mr->base_mr;
1363
1364 err_out:
1365 atomic_dec(&sdev->num_mr);
1366 if (mr) {
1367 if (mr->mem)
1368 siw_mr_drop_mem(mr);
1369 kfree_rcu(mr, rcu);
1370 } else {
1371 if (umem)
1372 siw_umem_release(umem, false);
1373 }
1374 return ERR_PTR(rv);
1375 }
1376
1377 struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1378 u32 max_sge)
1379 {
1380 struct siw_device *sdev = to_siw_dev(pd->device);
1381 struct siw_mr *mr = NULL;
1382 struct siw_pbl *pbl = NULL;
1383 int rv;
1384
1385 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1386 siw_dbg_pd(pd, "too many mr's\n");
1387 rv = -ENOMEM;
1388 goto err_out;
1389 }
1390 if (mr_type != IB_MR_TYPE_MEM_REG) {
1391 siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type);
1392 rv = -EOPNOTSUPP;
1393 goto err_out;
1394 }
1395 if (max_sge > SIW_MAX_SGE_PBL) {
1396 siw_dbg_pd(pd, "too many sge's: %d\n", max_sge);
1397 rv = -ENOMEM;
1398 goto err_out;
1399 }
1400 pbl = siw_pbl_alloc(max_sge);
1401 if (IS_ERR(pbl)) {
1402 rv = PTR_ERR(pbl);
1403 siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv);
1404 pbl = NULL;
1405 goto err_out;
1406 }
1407 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1408 if (!mr) {
1409 rv = -ENOMEM;
1410 goto err_out;
1411 }
1412 rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
1413 if (rv)
1414 goto err_out;
1415
1416 mr->mem->is_pbl = 1;
1417
1418 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1419
1420 return &mr->base_mr;
1421
1422 err_out:
1423 atomic_dec(&sdev->num_mr);
1424
1425 if (!mr) {
1426 kfree(pbl);
1427 } else {
1428 if (mr->mem)
1429 siw_mr_drop_mem(mr);
1430 kfree_rcu(mr, rcu);
1431 }
1432 siw_dbg_pd(pd, "failed: %d\n", rv);
1433
1434 return ERR_PTR(rv);
1435 }
1436
1437
1438 static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr)
1439 {
1440 return 0;
1441 }
1442
1443 int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1444 unsigned int *sg_off)
1445 {
1446 struct scatterlist *slp;
1447 struct siw_mr *mr = to_siw_mr(base_mr);
1448 struct siw_mem *mem = mr->mem;
1449 struct siw_pbl *pbl = mem->pbl;
1450 struct siw_pble *pble;
1451 unsigned long pbl_size;
1452 int i, rv;
1453
1454 if (!pbl) {
1455 siw_dbg_mem(mem, "no PBL allocated\n");
1456 return -EINVAL;
1457 }
1458 pble = pbl->pbe;
1459
1460 if (pbl->max_buf < num_sle) {
1461 siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
1462 mem->pbl->max_buf, num_sle);
1463 return -ENOMEM;
1464 }
1465 for_each_sg(sl, slp, num_sle, i) {
1466 if (sg_dma_len(slp) == 0) {
1467 siw_dbg_mem(mem, "empty SGE\n");
1468 return -EINVAL;
1469 }
1470 if (i == 0) {
1471 pble->addr = sg_dma_address(slp);
1472 pble->size = sg_dma_len(slp);
1473 pble->pbl_off = 0;
1474 pbl_size = pble->size;
1475 pbl->num_buf = 1;
1476 } else {
1477
1478 if (pble->addr + pble->size == sg_dma_address(slp)) {
1479 pble->size += sg_dma_len(slp);
1480 } else {
1481 pble++;
1482 pbl->num_buf++;
1483 pble->addr = sg_dma_address(slp);
1484 pble->size = sg_dma_len(slp);
1485 pble->pbl_off = pbl_size;
1486 }
1487 pbl_size += sg_dma_len(slp);
1488 }
1489 siw_dbg_mem(mem,
1490 "sge[%d], size %u, addr 0x%p, total %lu\n",
1491 i, pble->size, (void *)(uintptr_t)pble->addr,
1492 pbl_size);
1493 }
1494 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1495 if (rv > 0) {
1496 mem->len = base_mr->length;
1497 mem->va = base_mr->iova;
1498 siw_dbg_mem(mem,
1499 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1500 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1501 pbl->num_buf);
1502 }
1503 return rv;
1504 }
1505
1506
1507
1508
1509
1510
1511 struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights)
1512 {
1513 struct siw_device *sdev = to_siw_dev(pd->device);
1514 struct siw_mr *mr = NULL;
1515 int rv;
1516
1517 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1518 siw_dbg_pd(pd, "too many mr's\n");
1519 rv = -ENOMEM;
1520 goto err_out;
1521 }
1522 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1523 if (!mr) {
1524 rv = -ENOMEM;
1525 goto err_out;
1526 }
1527 rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights);
1528 if (rv)
1529 goto err_out;
1530
1531 mr->mem->stag_valid = 1;
1532
1533 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1534
1535 return &mr->base_mr;
1536
1537 err_out:
1538 if (rv)
1539 kfree(mr);
1540
1541 atomic_dec(&sdev->num_mr);
1542
1543 return ERR_PTR(rv);
1544 }
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556 int siw_create_srq(struct ib_srq *base_srq,
1557 struct ib_srq_init_attr *init_attrs, struct ib_udata *udata)
1558 {
1559 struct siw_srq *srq = to_siw_srq(base_srq);
1560 struct ib_srq_attr *attrs = &init_attrs->attr;
1561 struct siw_device *sdev = to_siw_dev(base_srq->device);
1562 struct siw_ucontext *ctx =
1563 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1564 base_ucontext);
1565 int rv;
1566
1567 if (init_attrs->srq_type != IB_SRQT_BASIC)
1568 return -EOPNOTSUPP;
1569
1570 if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
1571 siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
1572 rv = -ENOMEM;
1573 goto err_out;
1574 }
1575 if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
1576 attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
1577 rv = -EINVAL;
1578 goto err_out;
1579 }
1580 srq->max_sge = attrs->max_sge;
1581 srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
1582 srq->limit = attrs->srq_limit;
1583 if (srq->limit)
1584 srq->armed = true;
1585
1586 srq->is_kernel_res = !udata;
1587
1588 if (udata)
1589 srq->recvq =
1590 vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
1591 else
1592 srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe));
1593
1594 if (srq->recvq == NULL) {
1595 rv = -ENOMEM;
1596 goto err_out;
1597 }
1598 if (udata) {
1599 struct siw_uresp_create_srq uresp = {};
1600 size_t length = srq->num_rqe * sizeof(struct siw_rqe);
1601
1602 srq->srq_entry =
1603 siw_mmap_entry_insert(ctx, srq->recvq,
1604 length, &uresp.srq_key);
1605 if (!srq->srq_entry) {
1606 rv = -ENOMEM;
1607 goto err_out;
1608 }
1609
1610 uresp.num_rqe = srq->num_rqe;
1611
1612 if (udata->outlen < sizeof(uresp)) {
1613 rv = -EINVAL;
1614 goto err_out;
1615 }
1616 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1617 if (rv)
1618 goto err_out;
1619 }
1620 spin_lock_init(&srq->lock);
1621
1622 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1623
1624 return 0;
1625
1626 err_out:
1627 if (srq->recvq) {
1628 if (ctx)
1629 rdma_user_mmap_entry_remove(srq->srq_entry);
1630 vfree(srq->recvq);
1631 }
1632 atomic_dec(&sdev->num_srq);
1633
1634 return rv;
1635 }
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
1647 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1648 {
1649 struct siw_srq *srq = to_siw_srq(base_srq);
1650 unsigned long flags;
1651 int rv = 0;
1652
1653 spin_lock_irqsave(&srq->lock, flags);
1654
1655 if (attr_mask & IB_SRQ_MAX_WR) {
1656
1657 rv = -EOPNOTSUPP;
1658 goto out;
1659 }
1660 if (attr_mask & IB_SRQ_LIMIT) {
1661 if (attrs->srq_limit) {
1662 if (unlikely(attrs->srq_limit > srq->num_rqe)) {
1663 rv = -EINVAL;
1664 goto out;
1665 }
1666 srq->armed = true;
1667 } else {
1668 srq->armed = false;
1669 }
1670 srq->limit = attrs->srq_limit;
1671 }
1672 out:
1673 spin_unlock_irqrestore(&srq->lock, flags);
1674
1675 return rv;
1676 }
1677
1678
1679
1680
1681
1682
1683 int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
1684 {
1685 struct siw_srq *srq = to_siw_srq(base_srq);
1686 unsigned long flags;
1687
1688 spin_lock_irqsave(&srq->lock, flags);
1689
1690 attrs->max_wr = srq->num_rqe;
1691 attrs->max_sge = srq->max_sge;
1692 attrs->srq_limit = srq->limit;
1693
1694 spin_unlock_irqrestore(&srq->lock, flags);
1695
1696 return 0;
1697 }
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
1708 {
1709 struct siw_srq *srq = to_siw_srq(base_srq);
1710 struct siw_device *sdev = to_siw_dev(base_srq->device);
1711 struct siw_ucontext *ctx =
1712 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1713 base_ucontext);
1714
1715 if (ctx)
1716 rdma_user_mmap_entry_remove(srq->srq_entry);
1717 vfree(srq->recvq);
1718 atomic_dec(&sdev->num_srq);
1719 return 0;
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734 int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1735 const struct ib_recv_wr **bad_wr)
1736 {
1737 struct siw_srq *srq = to_siw_srq(base_srq);
1738 unsigned long flags;
1739 int rv = 0;
1740
1741 if (unlikely(!srq->is_kernel_res)) {
1742 siw_dbg_pd(base_srq->pd,
1743 "[SRQ]: no kernel post_recv for mapped srq\n");
1744 rv = -EINVAL;
1745 goto out;
1746 }
1747
1748
1749
1750
1751
1752 spin_lock_irqsave(&srq->lock, flags);
1753
1754 while (wr) {
1755 u32 idx = srq->rq_put % srq->num_rqe;
1756 struct siw_rqe *rqe = &srq->recvq[idx];
1757
1758 if (rqe->flags) {
1759 siw_dbg_pd(base_srq->pd, "SRQ full\n");
1760 rv = -ENOMEM;
1761 break;
1762 }
1763 if (unlikely(wr->num_sge > srq->max_sge)) {
1764 siw_dbg_pd(base_srq->pd,
1765 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1766 rv = -EINVAL;
1767 break;
1768 }
1769 rqe->id = wr->wr_id;
1770 rqe->num_sge = wr->num_sge;
1771 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1772
1773
1774 smp_wmb();
1775
1776 rqe->flags = SIW_WQE_VALID;
1777
1778 srq->rq_put++;
1779 wr = wr->next;
1780 }
1781 spin_unlock_irqrestore(&srq->lock, flags);
1782 out:
1783 if (unlikely(rv < 0)) {
1784 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1785 *bad_wr = wr;
1786 }
1787 return rv;
1788 }
1789
1790 void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
1791 {
1792 struct ib_event event;
1793 struct ib_qp *base_qp = &qp->base_qp;
1794
1795
1796
1797
1798
1799 if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1800 return;
1801
1802 event.event = etype;
1803 event.device = base_qp->device;
1804 event.element.qp = base_qp;
1805
1806 if (base_qp->event_handler) {
1807 siw_dbg_qp(qp, "reporting event %d\n", etype);
1808 base_qp->event_handler(&event, base_qp->qp_context);
1809 }
1810 }
1811
1812 void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype)
1813 {
1814 struct ib_event event;
1815 struct ib_cq *base_cq = &cq->base_cq;
1816
1817 event.event = etype;
1818 event.device = base_cq->device;
1819 event.element.cq = base_cq;
1820
1821 if (base_cq->event_handler) {
1822 siw_dbg_cq(cq, "reporting CQ event %d\n", etype);
1823 base_cq->event_handler(&event, base_cq->cq_context);
1824 }
1825 }
1826
1827 void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
1828 {
1829 struct ib_event event;
1830 struct ib_srq *base_srq = &srq->base_srq;
1831
1832 event.event = etype;
1833 event.device = base_srq->device;
1834 event.element.srq = base_srq;
1835
1836 if (base_srq->event_handler) {
1837 siw_dbg_pd(srq->base_srq.pd,
1838 "reporting SRQ event %d\n", etype);
1839 base_srq->event_handler(&event, base_srq->srq_context);
1840 }
1841 }
1842
1843 void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype)
1844 {
1845 struct ib_event event;
1846
1847 event.event = etype;
1848 event.device = &sdev->base_dev;
1849 event.element.port_num = port;
1850
1851 siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);
1852
1853 ib_dispatch_event(&event);
1854 }