0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include <linux/file.h>
0037 #include <linux/fs.h>
0038 #include <linux/slab.h>
0039 #include <linux/sched.h>
0040
0041 #include <linux/uaccess.h>
0042
0043 #include <rdma/uverbs_types.h>
0044 #include <rdma/uverbs_std_types.h>
0045 #include "rdma_core.h"
0046
0047 #include "uverbs.h"
0048 #include "core_priv.h"
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
0059 size_t resp_len)
0060 {
0061 int ret;
0062
0063 if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
0064 return uverbs_copy_to_struct_or_zero(
0065 attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
0066
0067 if (copy_to_user(attrs->ucore.outbuf, resp,
0068 min(attrs->ucore.outlen, resp_len)))
0069 return -EFAULT;
0070
0071 if (resp_len < attrs->ucore.outlen) {
0072
0073
0074
0075
0076 ret = clear_user(attrs->ucore.outbuf + resp_len,
0077 attrs->ucore.outlen - resp_len);
0078 if (ret)
0079 return -EFAULT;
0080 }
0081
0082 return 0;
0083 }
0084
0085
0086
0087
0088
0089
0090
0091 static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
0092 size_t req_len)
0093 {
0094 if (copy_from_user(req, attrs->ucore.inbuf,
0095 min(attrs->ucore.inlen, req_len)))
0096 return -EFAULT;
0097
0098 if (attrs->ucore.inlen < req_len) {
0099 memset(req + attrs->ucore.inlen, 0,
0100 req_len - attrs->ucore.inlen);
0101 } else if (attrs->ucore.inlen > req_len) {
0102 if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
0103 attrs->ucore.inlen - req_len))
0104 return -EOPNOTSUPP;
0105 }
0106 return 0;
0107 }
0108
0109
0110
0111
0112
0113
0114
0115 static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
0116 size_t resp_len)
0117 {
0118 return min_t(size_t, attrs->ucore.outlen, resp_len);
0119 }
0120
0121
0122
0123
0124
0125 struct uverbs_req_iter {
0126 const void __user *cur;
0127 const void __user *end;
0128 };
0129
0130 static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
0131 struct uverbs_req_iter *iter,
0132 void *req,
0133 size_t req_len)
0134 {
0135 if (attrs->ucore.inlen < req_len)
0136 return -ENOSPC;
0137
0138 if (copy_from_user(req, attrs->ucore.inbuf, req_len))
0139 return -EFAULT;
0140
0141 iter->cur = attrs->ucore.inbuf + req_len;
0142 iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
0143 return 0;
0144 }
0145
0146 static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
0147 size_t len)
0148 {
0149 if (iter->cur + len > iter->end)
0150 return -ENOSPC;
0151
0152 if (copy_from_user(val, iter->cur, len))
0153 return -EFAULT;
0154
0155 iter->cur += len;
0156 return 0;
0157 }
0158
0159 static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
0160 size_t len)
0161 {
0162 const void __user *res = iter->cur;
0163
0164 if (iter->cur + len > iter->end)
0165 return (void __force __user *)ERR_PTR(-ENOSPC);
0166 iter->cur += len;
0167 return res;
0168 }
0169
0170 static int uverbs_request_finish(struct uverbs_req_iter *iter)
0171 {
0172 if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
0173 return -EOPNOTSUPP;
0174 return 0;
0175 }
0176
0177
0178
0179
0180
0181
0182 struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
0183 {
0184 attrs->driver_udata = (struct ib_udata){};
0185 return &attrs->driver_udata;
0186 }
0187
0188 static struct ib_uverbs_completion_event_file *
0189 _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
0190 {
0191 struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
0192 fd, attrs);
0193
0194 if (IS_ERR(uobj))
0195 return (void *)uobj;
0196
0197 uverbs_uobject_get(uobj);
0198 uobj_put_read(uobj);
0199
0200 return container_of(uobj, struct ib_uverbs_completion_event_file,
0201 uobj);
0202 }
0203 #define ib_uverbs_lookup_comp_file(_fd, _ufile) \
0204 _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
0205
0206 int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
0207 {
0208 struct ib_uverbs_file *ufile = attrs->ufile;
0209 struct ib_ucontext *ucontext;
0210 struct ib_device *ib_dev;
0211
0212 ib_dev = srcu_dereference(ufile->device->ib_dev,
0213 &ufile->device->disassociate_srcu);
0214 if (!ib_dev)
0215 return -EIO;
0216
0217 ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
0218 if (!ucontext)
0219 return -ENOMEM;
0220
0221 ucontext->device = ib_dev;
0222 ucontext->ufile = ufile;
0223 xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
0224
0225 rdma_restrack_new(&ucontext->res, RDMA_RESTRACK_CTX);
0226 rdma_restrack_set_name(&ucontext->res, NULL);
0227 attrs->context = ucontext;
0228 return 0;
0229 }
0230
0231 int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
0232 {
0233 struct ib_ucontext *ucontext = attrs->context;
0234 struct ib_uverbs_file *file = attrs->ufile;
0235 int ret;
0236
0237 if (!down_read_trylock(&file->hw_destroy_rwsem))
0238 return -EIO;
0239 mutex_lock(&file->ucontext_lock);
0240 if (file->ucontext) {
0241 ret = -EINVAL;
0242 goto err;
0243 }
0244
0245 ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device,
0246 RDMACG_RESOURCE_HCA_HANDLE);
0247 if (ret)
0248 goto err;
0249
0250 ret = ucontext->device->ops.alloc_ucontext(ucontext,
0251 &attrs->driver_udata);
0252 if (ret)
0253 goto err_uncharge;
0254
0255 rdma_restrack_add(&ucontext->res);
0256
0257
0258
0259
0260
0261 smp_store_release(&file->ucontext, ucontext);
0262
0263 mutex_unlock(&file->ucontext_lock);
0264 up_read(&file->hw_destroy_rwsem);
0265 return 0;
0266
0267 err_uncharge:
0268 ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
0269 RDMACG_RESOURCE_HCA_HANDLE);
0270 err:
0271 mutex_unlock(&file->ucontext_lock);
0272 up_read(&file->hw_destroy_rwsem);
0273 return ret;
0274 }
0275
0276 static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
0277 {
0278 struct ib_uverbs_get_context_resp resp;
0279 struct ib_uverbs_get_context cmd;
0280 struct ib_device *ib_dev;
0281 struct ib_uobject *uobj;
0282 int ret;
0283
0284 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0285 if (ret)
0286 return ret;
0287
0288 ret = ib_alloc_ucontext(attrs);
0289 if (ret)
0290 return ret;
0291
0292 uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
0293 if (IS_ERR(uobj)) {
0294 ret = PTR_ERR(uobj);
0295 goto err_ucontext;
0296 }
0297
0298 resp = (struct ib_uverbs_get_context_resp){
0299 .num_comp_vectors = attrs->ufile->device->num_comp_vectors,
0300 .async_fd = uobj->id,
0301 };
0302 ret = uverbs_response(attrs, &resp, sizeof(resp));
0303 if (ret)
0304 goto err_uobj;
0305
0306 ret = ib_init_ucontext(attrs);
0307 if (ret)
0308 goto err_uobj;
0309
0310 ib_uverbs_init_async_event_file(
0311 container_of(uobj, struct ib_uverbs_async_event_file, uobj));
0312 rdma_alloc_commit_uobject(uobj, attrs);
0313 return 0;
0314
0315 err_uobj:
0316 rdma_alloc_abort_uobject(uobj, attrs, false);
0317 err_ucontext:
0318 rdma_restrack_put(&attrs->context->res);
0319 kfree(attrs->context);
0320 attrs->context = NULL;
0321 return ret;
0322 }
0323
0324 static void copy_query_dev_fields(struct ib_ucontext *ucontext,
0325 struct ib_uverbs_query_device_resp *resp,
0326 struct ib_device_attr *attr)
0327 {
0328 struct ib_device *ib_dev = ucontext->device;
0329
0330 resp->fw_ver = attr->fw_ver;
0331 resp->node_guid = ib_dev->node_guid;
0332 resp->sys_image_guid = attr->sys_image_guid;
0333 resp->max_mr_size = attr->max_mr_size;
0334 resp->page_size_cap = attr->page_size_cap;
0335 resp->vendor_id = attr->vendor_id;
0336 resp->vendor_part_id = attr->vendor_part_id;
0337 resp->hw_ver = attr->hw_ver;
0338 resp->max_qp = attr->max_qp;
0339 resp->max_qp_wr = attr->max_qp_wr;
0340 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
0341 resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge);
0342 resp->max_sge_rd = attr->max_sge_rd;
0343 resp->max_cq = attr->max_cq;
0344 resp->max_cqe = attr->max_cqe;
0345 resp->max_mr = attr->max_mr;
0346 resp->max_pd = attr->max_pd;
0347 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
0348 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
0349 resp->max_res_rd_atom = attr->max_res_rd_atom;
0350 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
0351 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
0352 resp->atomic_cap = attr->atomic_cap;
0353 resp->max_ee = attr->max_ee;
0354 resp->max_rdd = attr->max_rdd;
0355 resp->max_mw = attr->max_mw;
0356 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
0357 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
0358 resp->max_mcast_grp = attr->max_mcast_grp;
0359 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
0360 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
0361 resp->max_ah = attr->max_ah;
0362 resp->max_srq = attr->max_srq;
0363 resp->max_srq_wr = attr->max_srq_wr;
0364 resp->max_srq_sge = attr->max_srq_sge;
0365 resp->max_pkeys = attr->max_pkeys;
0366 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
0367 resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX);
0368 }
0369
0370 static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
0371 {
0372 struct ib_uverbs_query_device cmd;
0373 struct ib_uverbs_query_device_resp resp;
0374 struct ib_ucontext *ucontext;
0375 int ret;
0376
0377 ucontext = ib_uverbs_get_ucontext(attrs);
0378 if (IS_ERR(ucontext))
0379 return PTR_ERR(ucontext);
0380
0381 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0382 if (ret)
0383 return ret;
0384
0385 memset(&resp, 0, sizeof resp);
0386 copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
0387
0388 return uverbs_response(attrs, &resp, sizeof(resp));
0389 }
0390
0391 static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs)
0392 {
0393 struct ib_uverbs_query_port cmd;
0394 struct ib_uverbs_query_port_resp resp;
0395 struct ib_port_attr attr;
0396 int ret;
0397 struct ib_ucontext *ucontext;
0398 struct ib_device *ib_dev;
0399
0400 ucontext = ib_uverbs_get_ucontext(attrs);
0401 if (IS_ERR(ucontext))
0402 return PTR_ERR(ucontext);
0403 ib_dev = ucontext->device;
0404
0405 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0406 if (ret)
0407 return ret;
0408
0409 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
0410 if (ret)
0411 return ret;
0412
0413 memset(&resp, 0, sizeof resp);
0414 copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num);
0415
0416 return uverbs_response(attrs, &resp, sizeof(resp));
0417 }
0418
0419 static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
0420 {
0421 struct ib_uverbs_alloc_pd_resp resp = {};
0422 struct ib_uverbs_alloc_pd cmd;
0423 struct ib_uobject *uobj;
0424 struct ib_pd *pd;
0425 int ret;
0426 struct ib_device *ib_dev;
0427
0428 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0429 if (ret)
0430 return ret;
0431
0432 uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
0433 if (IS_ERR(uobj))
0434 return PTR_ERR(uobj);
0435
0436 pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
0437 if (!pd) {
0438 ret = -ENOMEM;
0439 goto err;
0440 }
0441
0442 pd->device = ib_dev;
0443 pd->uobject = uobj;
0444 atomic_set(&pd->usecnt, 0);
0445
0446 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
0447 rdma_restrack_set_name(&pd->res, NULL);
0448
0449 ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
0450 if (ret)
0451 goto err_alloc;
0452 rdma_restrack_add(&pd->res);
0453
0454 uobj->object = pd;
0455 uobj_finalize_uobj_create(uobj, attrs);
0456
0457 resp.pd_handle = uobj->id;
0458 return uverbs_response(attrs, &resp, sizeof(resp));
0459
0460 err_alloc:
0461 rdma_restrack_put(&pd->res);
0462 kfree(pd);
0463 err:
0464 uobj_alloc_abort(uobj, attrs);
0465 return ret;
0466 }
0467
0468 static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs)
0469 {
0470 struct ib_uverbs_dealloc_pd cmd;
0471 int ret;
0472
0473 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0474 if (ret)
0475 return ret;
0476
0477 return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
0478 }
0479
0480 struct xrcd_table_entry {
0481 struct rb_node node;
0482 struct ib_xrcd *xrcd;
0483 struct inode *inode;
0484 };
0485
0486 static int xrcd_table_insert(struct ib_uverbs_device *dev,
0487 struct inode *inode,
0488 struct ib_xrcd *xrcd)
0489 {
0490 struct xrcd_table_entry *entry, *scan;
0491 struct rb_node **p = &dev->xrcd_tree.rb_node;
0492 struct rb_node *parent = NULL;
0493
0494 entry = kmalloc(sizeof *entry, GFP_KERNEL);
0495 if (!entry)
0496 return -ENOMEM;
0497
0498 entry->xrcd = xrcd;
0499 entry->inode = inode;
0500
0501 while (*p) {
0502 parent = *p;
0503 scan = rb_entry(parent, struct xrcd_table_entry, node);
0504
0505 if (inode < scan->inode) {
0506 p = &(*p)->rb_left;
0507 } else if (inode > scan->inode) {
0508 p = &(*p)->rb_right;
0509 } else {
0510 kfree(entry);
0511 return -EEXIST;
0512 }
0513 }
0514
0515 rb_link_node(&entry->node, parent, p);
0516 rb_insert_color(&entry->node, &dev->xrcd_tree);
0517 igrab(inode);
0518 return 0;
0519 }
0520
0521 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
0522 struct inode *inode)
0523 {
0524 struct xrcd_table_entry *entry;
0525 struct rb_node *p = dev->xrcd_tree.rb_node;
0526
0527 while (p) {
0528 entry = rb_entry(p, struct xrcd_table_entry, node);
0529
0530 if (inode < entry->inode)
0531 p = p->rb_left;
0532 else if (inode > entry->inode)
0533 p = p->rb_right;
0534 else
0535 return entry;
0536 }
0537
0538 return NULL;
0539 }
0540
0541 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
0542 {
0543 struct xrcd_table_entry *entry;
0544
0545 entry = xrcd_table_search(dev, inode);
0546 if (!entry)
0547 return NULL;
0548
0549 return entry->xrcd;
0550 }
0551
0552 static void xrcd_table_delete(struct ib_uverbs_device *dev,
0553 struct inode *inode)
0554 {
0555 struct xrcd_table_entry *entry;
0556
0557 entry = xrcd_table_search(dev, inode);
0558 if (entry) {
0559 iput(inode);
0560 rb_erase(&entry->node, &dev->xrcd_tree);
0561 kfree(entry);
0562 }
0563 }
0564
0565 static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
0566 {
0567 struct ib_uverbs_device *ibudev = attrs->ufile->device;
0568 struct ib_uverbs_open_xrcd_resp resp = {};
0569 struct ib_uverbs_open_xrcd cmd;
0570 struct ib_uxrcd_object *obj;
0571 struct ib_xrcd *xrcd = NULL;
0572 struct inode *inode = NULL;
0573 int new_xrcd = 0;
0574 struct ib_device *ib_dev;
0575 struct fd f = {};
0576 int ret;
0577
0578 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0579 if (ret)
0580 return ret;
0581
0582 mutex_lock(&ibudev->xrcd_tree_mutex);
0583
0584 if (cmd.fd != -1) {
0585
0586 f = fdget(cmd.fd);
0587 if (!f.file) {
0588 ret = -EBADF;
0589 goto err_tree_mutex_unlock;
0590 }
0591
0592 inode = file_inode(f.file);
0593 xrcd = find_xrcd(ibudev, inode);
0594 if (!xrcd && !(cmd.oflags & O_CREAT)) {
0595
0596 ret = -EAGAIN;
0597 goto err_tree_mutex_unlock;
0598 }
0599
0600 if (xrcd && cmd.oflags & O_EXCL) {
0601 ret = -EINVAL;
0602 goto err_tree_mutex_unlock;
0603 }
0604 }
0605
0606 obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
0607 &ib_dev);
0608 if (IS_ERR(obj)) {
0609 ret = PTR_ERR(obj);
0610 goto err_tree_mutex_unlock;
0611 }
0612
0613 if (!xrcd) {
0614 xrcd = ib_alloc_xrcd_user(ib_dev, inode, &attrs->driver_udata);
0615 if (IS_ERR(xrcd)) {
0616 ret = PTR_ERR(xrcd);
0617 goto err;
0618 }
0619 new_xrcd = 1;
0620 }
0621
0622 atomic_set(&obj->refcnt, 0);
0623 obj->uobject.object = xrcd;
0624
0625 if (inode) {
0626 if (new_xrcd) {
0627
0628 ret = xrcd_table_insert(ibudev, inode, xrcd);
0629 if (ret)
0630 goto err_dealloc_xrcd;
0631 }
0632 atomic_inc(&xrcd->usecnt);
0633 }
0634
0635 if (f.file)
0636 fdput(f);
0637
0638 mutex_unlock(&ibudev->xrcd_tree_mutex);
0639 uobj_finalize_uobj_create(&obj->uobject, attrs);
0640
0641 resp.xrcd_handle = obj->uobject.id;
0642 return uverbs_response(attrs, &resp, sizeof(resp));
0643
0644 err_dealloc_xrcd:
0645 ib_dealloc_xrcd_user(xrcd, uverbs_get_cleared_udata(attrs));
0646
0647 err:
0648 uobj_alloc_abort(&obj->uobject, attrs);
0649
0650 err_tree_mutex_unlock:
0651 if (f.file)
0652 fdput(f);
0653
0654 mutex_unlock(&ibudev->xrcd_tree_mutex);
0655
0656 return ret;
0657 }
0658
0659 static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
0660 {
0661 struct ib_uverbs_close_xrcd cmd;
0662 int ret;
0663
0664 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0665 if (ret)
0666 return ret;
0667
0668 return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
0669 }
0670
0671 int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
0672 enum rdma_remove_reason why,
0673 struct uverbs_attr_bundle *attrs)
0674 {
0675 struct inode *inode;
0676 int ret;
0677 struct ib_uverbs_device *dev = attrs->ufile->device;
0678
0679 inode = xrcd->inode;
0680 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
0681 return 0;
0682
0683 ret = ib_dealloc_xrcd_user(xrcd, &attrs->driver_udata);
0684 if (ret) {
0685 atomic_inc(&xrcd->usecnt);
0686 return ret;
0687 }
0688
0689 if (inode)
0690 xrcd_table_delete(dev, inode);
0691
0692 return 0;
0693 }
0694
0695 static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
0696 {
0697 struct ib_uverbs_reg_mr_resp resp = {};
0698 struct ib_uverbs_reg_mr cmd;
0699 struct ib_uobject *uobj;
0700 struct ib_pd *pd;
0701 struct ib_mr *mr;
0702 int ret;
0703 struct ib_device *ib_dev;
0704
0705 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0706 if (ret)
0707 return ret;
0708
0709 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
0710 return -EINVAL;
0711
0712 uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
0713 if (IS_ERR(uobj))
0714 return PTR_ERR(uobj);
0715
0716 ret = ib_check_mr_access(ib_dev, cmd.access_flags);
0717 if (ret)
0718 goto err_free;
0719
0720 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
0721 if (!pd) {
0722 ret = -EINVAL;
0723 goto err_free;
0724 }
0725
0726 mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
0727 cmd.access_flags,
0728 &attrs->driver_udata);
0729 if (IS_ERR(mr)) {
0730 ret = PTR_ERR(mr);
0731 goto err_put;
0732 }
0733
0734 mr->device = pd->device;
0735 mr->pd = pd;
0736 mr->type = IB_MR_TYPE_USER;
0737 mr->dm = NULL;
0738 mr->sig_attrs = NULL;
0739 mr->uobject = uobj;
0740 atomic_inc(&pd->usecnt);
0741 mr->iova = cmd.hca_va;
0742
0743 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
0744 rdma_restrack_set_name(&mr->res, NULL);
0745 rdma_restrack_add(&mr->res);
0746
0747 uobj->object = mr;
0748 uobj_put_obj_read(pd);
0749 uobj_finalize_uobj_create(uobj, attrs);
0750
0751 resp.lkey = mr->lkey;
0752 resp.rkey = mr->rkey;
0753 resp.mr_handle = uobj->id;
0754 return uverbs_response(attrs, &resp, sizeof(resp));
0755
0756 err_put:
0757 uobj_put_obj_read(pd);
0758 err_free:
0759 uobj_alloc_abort(uobj, attrs);
0760 return ret;
0761 }
0762
0763 static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
0764 {
0765 struct ib_uverbs_rereg_mr cmd;
0766 struct ib_uverbs_rereg_mr_resp resp;
0767 struct ib_mr *mr;
0768 int ret;
0769 struct ib_uobject *uobj;
0770 struct ib_uobject *new_uobj;
0771 struct ib_device *ib_dev;
0772 struct ib_pd *orig_pd;
0773 struct ib_pd *new_pd;
0774 struct ib_mr *new_mr;
0775
0776 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0777 if (ret)
0778 return ret;
0779
0780 if (!cmd.flags)
0781 return -EINVAL;
0782
0783 if (cmd.flags & ~IB_MR_REREG_SUPPORTED)
0784 return -EOPNOTSUPP;
0785
0786 if ((cmd.flags & IB_MR_REREG_TRANS) &&
0787 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
0788 return -EINVAL;
0789
0790 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
0791 if (IS_ERR(uobj))
0792 return PTR_ERR(uobj);
0793
0794 mr = uobj->object;
0795
0796 if (mr->dm) {
0797 ret = -EINVAL;
0798 goto put_uobjs;
0799 }
0800
0801 if (cmd.flags & IB_MR_REREG_ACCESS) {
0802 ret = ib_check_mr_access(mr->device, cmd.access_flags);
0803 if (ret)
0804 goto put_uobjs;
0805 }
0806
0807 orig_pd = mr->pd;
0808 if (cmd.flags & IB_MR_REREG_PD) {
0809 new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
0810 attrs);
0811 if (!new_pd) {
0812 ret = -EINVAL;
0813 goto put_uobjs;
0814 }
0815 } else {
0816 new_pd = mr->pd;
0817 }
0818
0819
0820
0821
0822
0823 new_uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
0824 if (IS_ERR(new_uobj)) {
0825 ret = PTR_ERR(new_uobj);
0826 goto put_uobj_pd;
0827 }
0828
0829 new_mr = ib_dev->ops.rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length,
0830 cmd.hca_va, cmd.access_flags, new_pd,
0831 &attrs->driver_udata);
0832 if (IS_ERR(new_mr)) {
0833 ret = PTR_ERR(new_mr);
0834 goto put_new_uobj;
0835 }
0836 if (new_mr) {
0837 new_mr->device = new_pd->device;
0838 new_mr->pd = new_pd;
0839 new_mr->type = IB_MR_TYPE_USER;
0840 new_mr->uobject = uobj;
0841 atomic_inc(&new_pd->usecnt);
0842 new_uobj->object = new_mr;
0843
0844 rdma_restrack_new(&new_mr->res, RDMA_RESTRACK_MR);
0845 rdma_restrack_set_name(&new_mr->res, NULL);
0846 rdma_restrack_add(&new_mr->res);
0847
0848
0849
0850
0851
0852 rdma_assign_uobject(uobj, new_uobj, attrs);
0853 rdma_alloc_commit_uobject(new_uobj, attrs);
0854 uobj_put_destroy(uobj);
0855 new_uobj = NULL;
0856 uobj = NULL;
0857 mr = new_mr;
0858 } else {
0859 if (cmd.flags & IB_MR_REREG_PD) {
0860 atomic_dec(&orig_pd->usecnt);
0861 mr->pd = new_pd;
0862 atomic_inc(&new_pd->usecnt);
0863 }
0864 if (cmd.flags & IB_MR_REREG_TRANS)
0865 mr->iova = cmd.hca_va;
0866 }
0867
0868 memset(&resp, 0, sizeof(resp));
0869 resp.lkey = mr->lkey;
0870 resp.rkey = mr->rkey;
0871
0872 ret = uverbs_response(attrs, &resp, sizeof(resp));
0873
0874 put_new_uobj:
0875 if (new_uobj)
0876 uobj_alloc_abort(new_uobj, attrs);
0877 put_uobj_pd:
0878 if (cmd.flags & IB_MR_REREG_PD)
0879 uobj_put_obj_read(new_pd);
0880
0881 put_uobjs:
0882 if (uobj)
0883 uobj_put_write(uobj);
0884
0885 return ret;
0886 }
0887
0888 static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
0889 {
0890 struct ib_uverbs_dereg_mr cmd;
0891 int ret;
0892
0893 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0894 if (ret)
0895 return ret;
0896
0897 return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
0898 }
0899
0900 static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
0901 {
0902 struct ib_uverbs_alloc_mw cmd;
0903 struct ib_uverbs_alloc_mw_resp resp = {};
0904 struct ib_uobject *uobj;
0905 struct ib_pd *pd;
0906 struct ib_mw *mw;
0907 int ret;
0908 struct ib_device *ib_dev;
0909
0910 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0911 if (ret)
0912 return ret;
0913
0914 uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
0915 if (IS_ERR(uobj))
0916 return PTR_ERR(uobj);
0917
0918 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
0919 if (!pd) {
0920 ret = -EINVAL;
0921 goto err_free;
0922 }
0923
0924 if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
0925 ret = -EINVAL;
0926 goto err_put;
0927 }
0928
0929 mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
0930 if (!mw) {
0931 ret = -ENOMEM;
0932 goto err_put;
0933 }
0934
0935 mw->device = ib_dev;
0936 mw->pd = pd;
0937 mw->uobject = uobj;
0938 mw->type = cmd.mw_type;
0939
0940 ret = pd->device->ops.alloc_mw(mw, &attrs->driver_udata);
0941 if (ret)
0942 goto err_alloc;
0943
0944 atomic_inc(&pd->usecnt);
0945
0946 uobj->object = mw;
0947 uobj_put_obj_read(pd);
0948 uobj_finalize_uobj_create(uobj, attrs);
0949
0950 resp.rkey = mw->rkey;
0951 resp.mw_handle = uobj->id;
0952 return uverbs_response(attrs, &resp, sizeof(resp));
0953
0954 err_alloc:
0955 kfree(mw);
0956 err_put:
0957 uobj_put_obj_read(pd);
0958 err_free:
0959 uobj_alloc_abort(uobj, attrs);
0960 return ret;
0961 }
0962
0963 static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs)
0964 {
0965 struct ib_uverbs_dealloc_mw cmd;
0966 int ret;
0967
0968 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0969 if (ret)
0970 return ret;
0971
0972 return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
0973 }
0974
0975 static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
0976 {
0977 struct ib_uverbs_create_comp_channel cmd;
0978 struct ib_uverbs_create_comp_channel_resp resp;
0979 struct ib_uobject *uobj;
0980 struct ib_uverbs_completion_event_file *ev_file;
0981 struct ib_device *ib_dev;
0982 int ret;
0983
0984 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
0985 if (ret)
0986 return ret;
0987
0988 uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
0989 if (IS_ERR(uobj))
0990 return PTR_ERR(uobj);
0991
0992 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
0993 uobj);
0994 ib_uverbs_init_event_queue(&ev_file->ev_queue);
0995 uobj_finalize_uobj_create(uobj, attrs);
0996
0997 resp.fd = uobj->id;
0998 return uverbs_response(attrs, &resp, sizeof(resp));
0999 }
1000
1001 static int create_cq(struct uverbs_attr_bundle *attrs,
1002 struct ib_uverbs_ex_create_cq *cmd)
1003 {
1004 struct ib_ucq_object *obj;
1005 struct ib_uverbs_completion_event_file *ev_file = NULL;
1006 struct ib_cq *cq;
1007 int ret;
1008 struct ib_uverbs_ex_create_cq_resp resp = {};
1009 struct ib_cq_init_attr attr = {};
1010 struct ib_device *ib_dev;
1011
1012 if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
1013 return -EINVAL;
1014
1015 obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
1016 &ib_dev);
1017 if (IS_ERR(obj))
1018 return PTR_ERR(obj);
1019
1020 if (cmd->comp_channel >= 0) {
1021 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
1022 if (IS_ERR(ev_file)) {
1023 ret = PTR_ERR(ev_file);
1024 goto err;
1025 }
1026 }
1027
1028 obj->uevent.uobject.user_handle = cmd->user_handle;
1029 INIT_LIST_HEAD(&obj->comp_list);
1030 INIT_LIST_HEAD(&obj->uevent.event_list);
1031
1032 attr.cqe = cmd->cqe;
1033 attr.comp_vector = cmd->comp_vector;
1034 attr.flags = cmd->flags;
1035
1036 cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
1037 if (!cq) {
1038 ret = -ENOMEM;
1039 goto err_file;
1040 }
1041 cq->device = ib_dev;
1042 cq->uobject = obj;
1043 cq->comp_handler = ib_uverbs_comp_handler;
1044 cq->event_handler = ib_uverbs_cq_event_handler;
1045 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
1046 atomic_set(&cq->usecnt, 0);
1047
1048 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
1049 rdma_restrack_set_name(&cq->res, NULL);
1050
1051 ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata);
1052 if (ret)
1053 goto err_free;
1054 rdma_restrack_add(&cq->res);
1055
1056 obj->uevent.uobject.object = cq;
1057 obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
1058 if (obj->uevent.event_file)
1059 uverbs_uobject_get(&obj->uevent.event_file->uobj);
1060 uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1061
1062 resp.base.cq_handle = obj->uevent.uobject.id;
1063 resp.base.cqe = cq->cqe;
1064 resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1065 return uverbs_response(attrs, &resp, sizeof(resp));
1066
1067 err_free:
1068 rdma_restrack_put(&cq->res);
1069 kfree(cq);
1070 err_file:
1071 if (ev_file)
1072 ib_uverbs_release_ucq(ev_file, obj);
1073 err:
1074 uobj_alloc_abort(&obj->uevent.uobject, attrs);
1075 return ret;
1076 }
1077
1078 static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs)
1079 {
1080 struct ib_uverbs_create_cq cmd;
1081 struct ib_uverbs_ex_create_cq cmd_ex;
1082 int ret;
1083
1084 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1085 if (ret)
1086 return ret;
1087
1088 memset(&cmd_ex, 0, sizeof(cmd_ex));
1089 cmd_ex.user_handle = cmd.user_handle;
1090 cmd_ex.cqe = cmd.cqe;
1091 cmd_ex.comp_vector = cmd.comp_vector;
1092 cmd_ex.comp_channel = cmd.comp_channel;
1093
1094 return create_cq(attrs, &cmd_ex);
1095 }
1096
1097 static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs)
1098 {
1099 struct ib_uverbs_ex_create_cq cmd;
1100 int ret;
1101
1102 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1103 if (ret)
1104 return ret;
1105
1106 if (cmd.comp_mask)
1107 return -EINVAL;
1108
1109 if (cmd.reserved)
1110 return -EINVAL;
1111
1112 return create_cq(attrs, &cmd);
1113 }
1114
1115 static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
1116 {
1117 struct ib_uverbs_resize_cq cmd;
1118 struct ib_uverbs_resize_cq_resp resp = {};
1119 struct ib_cq *cq;
1120 int ret;
1121
1122 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1123 if (ret)
1124 return ret;
1125
1126 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1127 if (!cq)
1128 return -EINVAL;
1129
1130 ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1131 if (ret)
1132 goto out;
1133
1134 resp.cqe = cq->cqe;
1135
1136 ret = uverbs_response(attrs, &resp, sizeof(resp));
1137 out:
1138 rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1139 UVERBS_LOOKUP_READ);
1140
1141 return ret;
1142 }
1143
1144 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1145 struct ib_wc *wc)
1146 {
1147 struct ib_uverbs_wc tmp;
1148
1149 tmp.wr_id = wc->wr_id;
1150 tmp.status = wc->status;
1151 tmp.opcode = wc->opcode;
1152 tmp.vendor_err = wc->vendor_err;
1153 tmp.byte_len = wc->byte_len;
1154 tmp.ex.imm_data = wc->ex.imm_data;
1155 tmp.qp_num = wc->qp->qp_num;
1156 tmp.src_qp = wc->src_qp;
1157 tmp.wc_flags = wc->wc_flags;
1158 tmp.pkey_index = wc->pkey_index;
1159 if (rdma_cap_opa_ah(ib_dev, wc->port_num))
1160 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
1161 else
1162 tmp.slid = ib_lid_cpu16(wc->slid);
1163 tmp.sl = wc->sl;
1164 tmp.dlid_path_bits = wc->dlid_path_bits;
1165 tmp.port_num = wc->port_num;
1166 tmp.reserved = 0;
1167
1168 if (copy_to_user(dest, &tmp, sizeof tmp))
1169 return -EFAULT;
1170
1171 return 0;
1172 }
1173
1174 static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1175 {
1176 struct ib_uverbs_poll_cq cmd;
1177 struct ib_uverbs_poll_cq_resp resp;
1178 u8 __user *header_ptr;
1179 u8 __user *data_ptr;
1180 struct ib_cq *cq;
1181 struct ib_wc wc;
1182 int ret;
1183
1184 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1185 if (ret)
1186 return ret;
1187
1188 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1189 if (!cq)
1190 return -EINVAL;
1191
1192
1193 header_ptr = attrs->ucore.outbuf;
1194 data_ptr = header_ptr + sizeof resp;
1195
1196 memset(&resp, 0, sizeof resp);
1197 while (resp.count < cmd.ne) {
1198 ret = ib_poll_cq(cq, 1, &wc);
1199 if (ret < 0)
1200 goto out_put;
1201 if (!ret)
1202 break;
1203
1204 ret = copy_wc_to_user(cq->device, data_ptr, &wc);
1205 if (ret)
1206 goto out_put;
1207
1208 data_ptr += sizeof(struct ib_uverbs_wc);
1209 ++resp.count;
1210 }
1211
1212 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1213 ret = -EFAULT;
1214 goto out_put;
1215 }
1216 ret = 0;
1217
1218 if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
1219 ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
1220
1221 out_put:
1222 rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1223 UVERBS_LOOKUP_READ);
1224 return ret;
1225 }
1226
1227 static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
1228 {
1229 struct ib_uverbs_req_notify_cq cmd;
1230 struct ib_cq *cq;
1231 int ret;
1232
1233 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1234 if (ret)
1235 return ret;
1236
1237 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1238 if (!cq)
1239 return -EINVAL;
1240
1241 ib_req_notify_cq(cq, cmd.solicited_only ?
1242 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1243
1244 rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1245 UVERBS_LOOKUP_READ);
1246 return 0;
1247 }
1248
1249 static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
1250 {
1251 struct ib_uverbs_destroy_cq cmd;
1252 struct ib_uverbs_destroy_cq_resp resp;
1253 struct ib_uobject *uobj;
1254 struct ib_ucq_object *obj;
1255 int ret;
1256
1257 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1258 if (ret)
1259 return ret;
1260
1261 uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1262 if (IS_ERR(uobj))
1263 return PTR_ERR(uobj);
1264
1265 obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
1266 memset(&resp, 0, sizeof(resp));
1267 resp.comp_events_reported = obj->comp_events_reported;
1268 resp.async_events_reported = obj->uevent.events_reported;
1269
1270 uobj_put_destroy(uobj);
1271
1272 return uverbs_response(attrs, &resp, sizeof(resp));
1273 }
1274
1275 static int create_qp(struct uverbs_attr_bundle *attrs,
1276 struct ib_uverbs_ex_create_qp *cmd)
1277 {
1278 struct ib_uqp_object *obj;
1279 struct ib_device *device;
1280 struct ib_pd *pd = NULL;
1281 struct ib_xrcd *xrcd = NULL;
1282 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
1283 struct ib_cq *scq = NULL, *rcq = NULL;
1284 struct ib_srq *srq = NULL;
1285 struct ib_qp *qp;
1286 struct ib_qp_init_attr attr = {};
1287 struct ib_uverbs_ex_create_qp_resp resp = {};
1288 int ret;
1289 struct ib_rwq_ind_table *ind_tbl = NULL;
1290 bool has_sq = true;
1291 struct ib_device *ib_dev;
1292
1293 switch (cmd->qp_type) {
1294 case IB_QPT_RAW_PACKET:
1295 if (!capable(CAP_NET_RAW))
1296 return -EPERM;
1297 break;
1298 case IB_QPT_RC:
1299 case IB_QPT_UC:
1300 case IB_QPT_UD:
1301 case IB_QPT_XRC_INI:
1302 case IB_QPT_XRC_TGT:
1303 case IB_QPT_DRIVER:
1304 break;
1305 default:
1306 return -EINVAL;
1307 }
1308
1309 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1310 &ib_dev);
1311 if (IS_ERR(obj))
1312 return PTR_ERR(obj);
1313 obj->uxrcd = NULL;
1314 obj->uevent.uobject.user_handle = cmd->user_handle;
1315 mutex_init(&obj->mcast_lock);
1316
1317 if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) {
1318 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1319 UVERBS_OBJECT_RWQ_IND_TBL,
1320 cmd->rwq_ind_tbl_handle, attrs);
1321 if (!ind_tbl) {
1322 ret = -EINVAL;
1323 goto err_put;
1324 }
1325
1326 attr.rwq_ind_tbl = ind_tbl;
1327 }
1328
1329 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1330 ret = -EINVAL;
1331 goto err_put;
1332 }
1333
1334 if (ind_tbl && !cmd->max_send_wr)
1335 has_sq = false;
1336
1337 if (cmd->qp_type == IB_QPT_XRC_TGT) {
1338 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
1339 attrs);
1340
1341 if (IS_ERR(xrcd_uobj)) {
1342 ret = -EINVAL;
1343 goto err_put;
1344 }
1345
1346 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1347 if (!xrcd) {
1348 ret = -EINVAL;
1349 goto err_put;
1350 }
1351 device = xrcd->device;
1352 } else {
1353 if (cmd->qp_type == IB_QPT_XRC_INI) {
1354 cmd->max_recv_wr = 0;
1355 cmd->max_recv_sge = 0;
1356 } else {
1357 if (cmd->is_srq) {
1358 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
1359 cmd->srq_handle, attrs);
1360 if (!srq || srq->srq_type == IB_SRQT_XRC) {
1361 ret = -EINVAL;
1362 goto err_put;
1363 }
1364 }
1365
1366 if (!ind_tbl) {
1367 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1368 rcq = uobj_get_obj_read(
1369 cq, UVERBS_OBJECT_CQ,
1370 cmd->recv_cq_handle, attrs);
1371 if (!rcq) {
1372 ret = -EINVAL;
1373 goto err_put;
1374 }
1375 }
1376 }
1377 }
1378
1379 if (has_sq)
1380 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
1381 cmd->send_cq_handle, attrs);
1382 if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
1383 rcq = rcq ?: scq;
1384 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
1385 attrs);
1386 if (!pd || (!scq && has_sq)) {
1387 ret = -EINVAL;
1388 goto err_put;
1389 }
1390
1391 device = pd->device;
1392 }
1393
1394 attr.event_handler = ib_uverbs_qp_event_handler;
1395 attr.send_cq = scq;
1396 attr.recv_cq = rcq;
1397 attr.srq = srq;
1398 attr.xrcd = xrcd;
1399 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1400 IB_SIGNAL_REQ_WR;
1401 attr.qp_type = cmd->qp_type;
1402
1403 attr.cap.max_send_wr = cmd->max_send_wr;
1404 attr.cap.max_recv_wr = cmd->max_recv_wr;
1405 attr.cap.max_send_sge = cmd->max_send_sge;
1406 attr.cap.max_recv_sge = cmd->max_recv_sge;
1407 attr.cap.max_inline_data = cmd->max_inline_data;
1408
1409 INIT_LIST_HEAD(&obj->uevent.event_list);
1410 INIT_LIST_HEAD(&obj->mcast_list);
1411
1412 attr.create_flags = cmd->create_flags;
1413 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1414 IB_QP_CREATE_CROSS_CHANNEL |
1415 IB_QP_CREATE_MANAGED_SEND |
1416 IB_QP_CREATE_MANAGED_RECV |
1417 IB_QP_CREATE_SCATTER_FCS |
1418 IB_QP_CREATE_CVLAN_STRIPPING |
1419 IB_QP_CREATE_SOURCE_QPN |
1420 IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1421 ret = -EINVAL;
1422 goto err_put;
1423 }
1424
1425 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1426 if (!capable(CAP_NET_RAW)) {
1427 ret = -EPERM;
1428 goto err_put;
1429 }
1430
1431 attr.source_qpn = cmd->source_qpn;
1432 }
1433
1434 qp = ib_create_qp_user(device, pd, &attr, &attrs->driver_udata, obj,
1435 KBUILD_MODNAME);
1436 if (IS_ERR(qp)) {
1437 ret = PTR_ERR(qp);
1438 goto err_put;
1439 }
1440 ib_qp_usecnt_inc(qp);
1441
1442 obj->uevent.uobject.object = qp;
1443 obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
1444 if (obj->uevent.event_file)
1445 uverbs_uobject_get(&obj->uevent.event_file->uobj);
1446
1447 if (xrcd) {
1448 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1449 uobject);
1450 atomic_inc(&obj->uxrcd->refcnt);
1451 uobj_put_read(xrcd_uobj);
1452 }
1453
1454 if (pd)
1455 uobj_put_obj_read(pd);
1456 if (scq)
1457 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
1458 UVERBS_LOOKUP_READ);
1459 if (rcq && rcq != scq)
1460 rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
1461 UVERBS_LOOKUP_READ);
1462 if (srq)
1463 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
1464 UVERBS_LOOKUP_READ);
1465 if (ind_tbl)
1466 uobj_put_obj_read(ind_tbl);
1467 uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1468
1469 resp.base.qpn = qp->qp_num;
1470 resp.base.qp_handle = obj->uevent.uobject.id;
1471 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1472 resp.base.max_send_sge = attr.cap.max_send_sge;
1473 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1474 resp.base.max_send_wr = attr.cap.max_send_wr;
1475 resp.base.max_inline_data = attr.cap.max_inline_data;
1476 resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1477 return uverbs_response(attrs, &resp, sizeof(resp));
1478
1479 err_put:
1480 if (!IS_ERR(xrcd_uobj))
1481 uobj_put_read(xrcd_uobj);
1482 if (pd)
1483 uobj_put_obj_read(pd);
1484 if (scq)
1485 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
1486 UVERBS_LOOKUP_READ);
1487 if (rcq && rcq != scq)
1488 rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
1489 UVERBS_LOOKUP_READ);
1490 if (srq)
1491 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
1492 UVERBS_LOOKUP_READ);
1493 if (ind_tbl)
1494 uobj_put_obj_read(ind_tbl);
1495
1496 uobj_alloc_abort(&obj->uevent.uobject, attrs);
1497 return ret;
1498 }
1499
1500 static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs)
1501 {
1502 struct ib_uverbs_create_qp cmd;
1503 struct ib_uverbs_ex_create_qp cmd_ex;
1504 int ret;
1505
1506 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1507 if (ret)
1508 return ret;
1509
1510 memset(&cmd_ex, 0, sizeof(cmd_ex));
1511 cmd_ex.user_handle = cmd.user_handle;
1512 cmd_ex.pd_handle = cmd.pd_handle;
1513 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1514 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1515 cmd_ex.srq_handle = cmd.srq_handle;
1516 cmd_ex.max_send_wr = cmd.max_send_wr;
1517 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1518 cmd_ex.max_send_sge = cmd.max_send_sge;
1519 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1520 cmd_ex.max_inline_data = cmd.max_inline_data;
1521 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1522 cmd_ex.qp_type = cmd.qp_type;
1523 cmd_ex.is_srq = cmd.is_srq;
1524
1525 return create_qp(attrs, &cmd_ex);
1526 }
1527
1528 static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs)
1529 {
1530 struct ib_uverbs_ex_create_qp cmd;
1531 int ret;
1532
1533 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1534 if (ret)
1535 return ret;
1536
1537 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1538 return -EINVAL;
1539
1540 if (cmd.reserved)
1541 return -EINVAL;
1542
1543 return create_qp(attrs, &cmd);
1544 }
1545
1546 static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
1547 {
1548 struct ib_uverbs_create_qp_resp resp = {};
1549 struct ib_uverbs_open_qp cmd;
1550 struct ib_uqp_object *obj;
1551 struct ib_xrcd *xrcd;
1552 struct ib_qp *qp;
1553 struct ib_qp_open_attr attr = {};
1554 int ret;
1555 struct ib_uobject *xrcd_uobj;
1556 struct ib_device *ib_dev;
1557
1558 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1559 if (ret)
1560 return ret;
1561
1562 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1563 &ib_dev);
1564 if (IS_ERR(obj))
1565 return PTR_ERR(obj);
1566
1567 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
1568 if (IS_ERR(xrcd_uobj)) {
1569 ret = -EINVAL;
1570 goto err_put;
1571 }
1572
1573 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1574 if (!xrcd) {
1575 ret = -EINVAL;
1576 goto err_xrcd;
1577 }
1578
1579 attr.event_handler = ib_uverbs_qp_event_handler;
1580 attr.qp_num = cmd.qpn;
1581 attr.qp_type = cmd.qp_type;
1582
1583 INIT_LIST_HEAD(&obj->uevent.event_list);
1584 INIT_LIST_HEAD(&obj->mcast_list);
1585
1586 qp = ib_open_qp(xrcd, &attr);
1587 if (IS_ERR(qp)) {
1588 ret = PTR_ERR(qp);
1589 goto err_xrcd;
1590 }
1591
1592 obj->uevent.uobject.object = qp;
1593 obj->uevent.uobject.user_handle = cmd.user_handle;
1594
1595 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1596 atomic_inc(&obj->uxrcd->refcnt);
1597 qp->uobject = obj;
1598 uobj_put_read(xrcd_uobj);
1599 uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1600
1601 resp.qpn = qp->qp_num;
1602 resp.qp_handle = obj->uevent.uobject.id;
1603 return uverbs_response(attrs, &resp, sizeof(resp));
1604
1605 err_xrcd:
1606 uobj_put_read(xrcd_uobj);
1607 err_put:
1608 uobj_alloc_abort(&obj->uevent.uobject, attrs);
1609 return ret;
1610 }
1611
1612 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1613 struct rdma_ah_attr *rdma_attr)
1614 {
1615 const struct ib_global_route *grh;
1616
1617 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
1618 uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
1619 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
1620 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
1621 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
1622 IB_AH_GRH);
1623 if (uverb_attr->is_global) {
1624 grh = rdma_ah_read_grh(rdma_attr);
1625 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1626 uverb_attr->flow_label = grh->flow_label;
1627 uverb_attr->sgid_index = grh->sgid_index;
1628 uverb_attr->hop_limit = grh->hop_limit;
1629 uverb_attr->traffic_class = grh->traffic_class;
1630 }
1631 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
1632 }
1633
1634 static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
1635 {
1636 struct ib_uverbs_query_qp cmd;
1637 struct ib_uverbs_query_qp_resp resp;
1638 struct ib_qp *qp;
1639 struct ib_qp_attr *attr;
1640 struct ib_qp_init_attr *init_attr;
1641 int ret;
1642
1643 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1644 if (ret)
1645 return ret;
1646
1647 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1648 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1649 if (!attr || !init_attr) {
1650 ret = -ENOMEM;
1651 goto out;
1652 }
1653
1654 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1655 if (!qp) {
1656 ret = -EINVAL;
1657 goto out;
1658 }
1659
1660 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1661
1662 rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
1663 UVERBS_LOOKUP_READ);
1664
1665 if (ret)
1666 goto out;
1667
1668 memset(&resp, 0, sizeof resp);
1669
1670 resp.qp_state = attr->qp_state;
1671 resp.cur_qp_state = attr->cur_qp_state;
1672 resp.path_mtu = attr->path_mtu;
1673 resp.path_mig_state = attr->path_mig_state;
1674 resp.qkey = attr->qkey;
1675 resp.rq_psn = attr->rq_psn;
1676 resp.sq_psn = attr->sq_psn;
1677 resp.dest_qp_num = attr->dest_qp_num;
1678 resp.qp_access_flags = attr->qp_access_flags;
1679 resp.pkey_index = attr->pkey_index;
1680 resp.alt_pkey_index = attr->alt_pkey_index;
1681 resp.sq_draining = attr->sq_draining;
1682 resp.max_rd_atomic = attr->max_rd_atomic;
1683 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1684 resp.min_rnr_timer = attr->min_rnr_timer;
1685 resp.port_num = attr->port_num;
1686 resp.timeout = attr->timeout;
1687 resp.retry_cnt = attr->retry_cnt;
1688 resp.rnr_retry = attr->rnr_retry;
1689 resp.alt_port_num = attr->alt_port_num;
1690 resp.alt_timeout = attr->alt_timeout;
1691
1692 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1693 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1694
1695 resp.max_send_wr = init_attr->cap.max_send_wr;
1696 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1697 resp.max_send_sge = init_attr->cap.max_send_sge;
1698 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1699 resp.max_inline_data = init_attr->cap.max_inline_data;
1700 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1701
1702 ret = uverbs_response(attrs, &resp, sizeof(resp));
1703
1704 out:
1705 kfree(attr);
1706 kfree(init_attr);
1707
1708 return ret;
1709 }
1710
1711
1712 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1713 {
1714 switch (qp_type) {
1715 case IB_QPT_XRC_INI:
1716 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1717 case IB_QPT_XRC_TGT:
1718 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1719 IB_QP_RNR_RETRY);
1720 default:
1721 return mask;
1722 }
1723 }
1724
1725 static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1726 struct rdma_ah_attr *rdma_attr,
1727 struct ib_uverbs_qp_dest *uverb_attr)
1728 {
1729 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1730 if (uverb_attr->is_global) {
1731 rdma_ah_set_grh(rdma_attr, NULL,
1732 uverb_attr->flow_label,
1733 uverb_attr->sgid_index,
1734 uverb_attr->hop_limit,
1735 uverb_attr->traffic_class);
1736 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1737 } else {
1738 rdma_ah_set_ah_flags(rdma_attr, 0);
1739 }
1740 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1741 rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1742 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1743 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1744 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1745 rdma_ah_set_make_grd(rdma_attr, false);
1746 }
1747
1748 static int modify_qp(struct uverbs_attr_bundle *attrs,
1749 struct ib_uverbs_ex_modify_qp *cmd)
1750 {
1751 struct ib_qp_attr *attr;
1752 struct ib_qp *qp;
1753 int ret;
1754
1755 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1756 if (!attr)
1757 return -ENOMEM;
1758
1759 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
1760 attrs);
1761 if (!qp) {
1762 ret = -EINVAL;
1763 goto out;
1764 }
1765
1766 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1767 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1768 ret = -EINVAL;
1769 goto release_qp;
1770 }
1771
1772 if ((cmd->base.attr_mask & IB_QP_AV)) {
1773 if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1774 ret = -EINVAL;
1775 goto release_qp;
1776 }
1777
1778 if (cmd->base.attr_mask & IB_QP_STATE &&
1779 cmd->base.qp_state == IB_QPS_RTR) {
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 if (cmd->base.dest.port_num != qp->real_qp->port) {
1795 ret = -EINVAL;
1796 goto release_qp;
1797 }
1798 } else {
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812 if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1813 == (IB_QP_AV | IB_QP_PORT)) &&
1814 cmd->base.port_num != cmd->base.dest.port_num) {
1815 ret = -EINVAL;
1816 goto release_qp;
1817 }
1818 if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1819 == IB_QP_AV) {
1820 cmd->base.attr_mask |= IB_QP_PORT;
1821 cmd->base.port_num = cmd->base.dest.port_num;
1822 }
1823 }
1824 }
1825
1826 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1827 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1828 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
1829 cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
1830 ret = -EINVAL;
1831 goto release_qp;
1832 }
1833
1834 if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
1835 cmd->base.cur_qp_state > IB_QPS_ERR) ||
1836 (cmd->base.attr_mask & IB_QP_STATE &&
1837 cmd->base.qp_state > IB_QPS_ERR)) {
1838 ret = -EINVAL;
1839 goto release_qp;
1840 }
1841
1842 if (cmd->base.attr_mask & IB_QP_STATE)
1843 attr->qp_state = cmd->base.qp_state;
1844 if (cmd->base.attr_mask & IB_QP_CUR_STATE)
1845 attr->cur_qp_state = cmd->base.cur_qp_state;
1846 if (cmd->base.attr_mask & IB_QP_PATH_MTU)
1847 attr->path_mtu = cmd->base.path_mtu;
1848 if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
1849 attr->path_mig_state = cmd->base.path_mig_state;
1850 if (cmd->base.attr_mask & IB_QP_QKEY)
1851 attr->qkey = cmd->base.qkey;
1852 if (cmd->base.attr_mask & IB_QP_RQ_PSN)
1853 attr->rq_psn = cmd->base.rq_psn;
1854 if (cmd->base.attr_mask & IB_QP_SQ_PSN)
1855 attr->sq_psn = cmd->base.sq_psn;
1856 if (cmd->base.attr_mask & IB_QP_DEST_QPN)
1857 attr->dest_qp_num = cmd->base.dest_qp_num;
1858 if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
1859 attr->qp_access_flags = cmd->base.qp_access_flags;
1860 if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
1861 attr->pkey_index = cmd->base.pkey_index;
1862 if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1863 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1864 if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1865 attr->max_rd_atomic = cmd->base.max_rd_atomic;
1866 if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1867 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1868 if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
1869 attr->min_rnr_timer = cmd->base.min_rnr_timer;
1870 if (cmd->base.attr_mask & IB_QP_PORT)
1871 attr->port_num = cmd->base.port_num;
1872 if (cmd->base.attr_mask & IB_QP_TIMEOUT)
1873 attr->timeout = cmd->base.timeout;
1874 if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
1875 attr->retry_cnt = cmd->base.retry_cnt;
1876 if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
1877 attr->rnr_retry = cmd->base.rnr_retry;
1878 if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
1879 attr->alt_port_num = cmd->base.alt_port_num;
1880 attr->alt_timeout = cmd->base.alt_timeout;
1881 attr->alt_pkey_index = cmd->base.alt_pkey_index;
1882 }
1883 if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
1884 attr->rate_limit = cmd->rate_limit;
1885
1886 if (cmd->base.attr_mask & IB_QP_AV)
1887 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
1888 &cmd->base.dest);
1889
1890 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1891 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
1892 &cmd->base.alt_dest);
1893
1894 ret = ib_modify_qp_with_udata(qp, attr,
1895 modify_qp_mask(qp->qp_type,
1896 cmd->base.attr_mask),
1897 &attrs->driver_udata);
1898
1899 release_qp:
1900 rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
1901 UVERBS_LOOKUP_READ);
1902 out:
1903 kfree(attr);
1904
1905 return ret;
1906 }
1907
1908 static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
1909 {
1910 struct ib_uverbs_ex_modify_qp cmd;
1911 int ret;
1912
1913 ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
1914 if (ret)
1915 return ret;
1916
1917 if (cmd.base.attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1918 return -EOPNOTSUPP;
1919
1920 return modify_qp(attrs, &cmd);
1921 }
1922
1923 static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
1924 {
1925 struct ib_uverbs_ex_modify_qp cmd;
1926 struct ib_uverbs_ex_modify_qp_resp resp = {
1927 .response_length = uverbs_response_length(attrs, sizeof(resp))
1928 };
1929 int ret;
1930
1931 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1932 if (ret)
1933 return ret;
1934
1935
1936
1937
1938
1939 if (cmd.base.attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
1940 return -EOPNOTSUPP;
1941
1942 ret = modify_qp(attrs, &cmd);
1943 if (ret)
1944 return ret;
1945
1946 return uverbs_response(attrs, &resp, sizeof(resp));
1947 }
1948
1949 static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
1950 {
1951 struct ib_uverbs_destroy_qp cmd;
1952 struct ib_uverbs_destroy_qp_resp resp;
1953 struct ib_uobject *uobj;
1954 struct ib_uqp_object *obj;
1955 int ret;
1956
1957 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1958 if (ret)
1959 return ret;
1960
1961 uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1962 if (IS_ERR(uobj))
1963 return PTR_ERR(uobj);
1964
1965 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1966 memset(&resp, 0, sizeof(resp));
1967 resp.events_reported = obj->uevent.events_reported;
1968
1969 uobj_put_destroy(uobj);
1970
1971 return uverbs_response(attrs, &resp, sizeof(resp));
1972 }
1973
1974 static void *alloc_wr(size_t wr_size, __u32 num_sge)
1975 {
1976 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof(struct ib_sge))) /
1977 sizeof(struct ib_sge))
1978 return NULL;
1979
1980 return kmalloc(ALIGN(wr_size, sizeof(struct ib_sge)) +
1981 num_sge * sizeof(struct ib_sge),
1982 GFP_KERNEL);
1983 }
1984
1985 static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
1986 {
1987 struct ib_uverbs_post_send cmd;
1988 struct ib_uverbs_post_send_resp resp;
1989 struct ib_uverbs_send_wr *user_wr;
1990 struct ib_send_wr *wr = NULL, *last, *next;
1991 const struct ib_send_wr *bad_wr;
1992 struct ib_qp *qp;
1993 int i, sg_ind;
1994 int is_ud;
1995 int ret, ret2;
1996 size_t next_size;
1997 const struct ib_sge __user *sgls;
1998 const void __user *wqes;
1999 struct uverbs_req_iter iter;
2000
2001 ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2002 if (ret)
2003 return ret;
2004 wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
2005 if (IS_ERR(wqes))
2006 return PTR_ERR(wqes);
2007 sgls = uverbs_request_next_ptr(
2008 &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
2009 if (IS_ERR(sgls))
2010 return PTR_ERR(sgls);
2011 ret = uverbs_request_finish(&iter);
2012 if (ret)
2013 return ret;
2014
2015 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2016 if (!user_wr)
2017 return -ENOMEM;
2018
2019 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2020 if (!qp) {
2021 ret = -EINVAL;
2022 goto out;
2023 }
2024
2025 is_ud = qp->qp_type == IB_QPT_UD;
2026 sg_ind = 0;
2027 last = NULL;
2028 for (i = 0; i < cmd.wr_count; ++i) {
2029 if (copy_from_user(user_wr, wqes + i * cmd.wqe_size,
2030 cmd.wqe_size)) {
2031 ret = -EFAULT;
2032 goto out_put;
2033 }
2034
2035 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2036 ret = -EINVAL;
2037 goto out_put;
2038 }
2039
2040 if (is_ud) {
2041 struct ib_ud_wr *ud;
2042
2043 if (user_wr->opcode != IB_WR_SEND &&
2044 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2045 ret = -EINVAL;
2046 goto out_put;
2047 }
2048
2049 next_size = sizeof(*ud);
2050 ud = alloc_wr(next_size, user_wr->num_sge);
2051 if (!ud) {
2052 ret = -ENOMEM;
2053 goto out_put;
2054 }
2055
2056 ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
2057 user_wr->wr.ud.ah, attrs);
2058 if (!ud->ah) {
2059 kfree(ud);
2060 ret = -EINVAL;
2061 goto out_put;
2062 }
2063 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2064 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2065
2066 next = &ud->wr;
2067 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2068 user_wr->opcode == IB_WR_RDMA_WRITE ||
2069 user_wr->opcode == IB_WR_RDMA_READ) {
2070 struct ib_rdma_wr *rdma;
2071
2072 next_size = sizeof(*rdma);
2073 rdma = alloc_wr(next_size, user_wr->num_sge);
2074 if (!rdma) {
2075 ret = -ENOMEM;
2076 goto out_put;
2077 }
2078
2079 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2080 rdma->rkey = user_wr->wr.rdma.rkey;
2081
2082 next = &rdma->wr;
2083 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2084 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2085 struct ib_atomic_wr *atomic;
2086
2087 next_size = sizeof(*atomic);
2088 atomic = alloc_wr(next_size, user_wr->num_sge);
2089 if (!atomic) {
2090 ret = -ENOMEM;
2091 goto out_put;
2092 }
2093
2094 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2095 atomic->compare_add = user_wr->wr.atomic.compare_add;
2096 atomic->swap = user_wr->wr.atomic.swap;
2097 atomic->rkey = user_wr->wr.atomic.rkey;
2098
2099 next = &atomic->wr;
2100 } else if (user_wr->opcode == IB_WR_SEND ||
2101 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2102 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2103 next_size = sizeof(*next);
2104 next = alloc_wr(next_size, user_wr->num_sge);
2105 if (!next) {
2106 ret = -ENOMEM;
2107 goto out_put;
2108 }
2109 } else {
2110 ret = -EINVAL;
2111 goto out_put;
2112 }
2113
2114 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2115 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2116 next->ex.imm_data =
2117 (__be32 __force) user_wr->ex.imm_data;
2118 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2119 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2120 }
2121
2122 if (!last)
2123 wr = next;
2124 else
2125 last->next = next;
2126 last = next;
2127
2128 next->next = NULL;
2129 next->wr_id = user_wr->wr_id;
2130 next->num_sge = user_wr->num_sge;
2131 next->opcode = user_wr->opcode;
2132 next->send_flags = user_wr->send_flags;
2133
2134 if (next->num_sge) {
2135 next->sg_list = (void *) next +
2136 ALIGN(next_size, sizeof(struct ib_sge));
2137 if (copy_from_user(next->sg_list, sgls + sg_ind,
2138 next->num_sge *
2139 sizeof(struct ib_sge))) {
2140 ret = -EFAULT;
2141 goto out_put;
2142 }
2143 sg_ind += next->num_sge;
2144 } else
2145 next->sg_list = NULL;
2146 }
2147
2148 resp.bad_wr = 0;
2149 ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
2150 if (ret)
2151 for (next = wr; next; next = next->next) {
2152 ++resp.bad_wr;
2153 if (next == bad_wr)
2154 break;
2155 }
2156
2157 ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2158 if (ret2)
2159 ret = ret2;
2160
2161 out_put:
2162 rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2163 UVERBS_LOOKUP_READ);
2164
2165 while (wr) {
2166 if (is_ud && ud_wr(wr)->ah)
2167 uobj_put_obj_read(ud_wr(wr)->ah);
2168 next = wr->next;
2169 kfree(wr);
2170 wr = next;
2171 }
2172
2173 out:
2174 kfree(user_wr);
2175
2176 return ret;
2177 }
2178
2179 static struct ib_recv_wr *
2180 ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
2181 u32 wqe_size, u32 sge_count)
2182 {
2183 struct ib_uverbs_recv_wr *user_wr;
2184 struct ib_recv_wr *wr = NULL, *last, *next;
2185 int sg_ind;
2186 int i;
2187 int ret;
2188 const struct ib_sge __user *sgls;
2189 const void __user *wqes;
2190
2191 if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
2192 return ERR_PTR(-EINVAL);
2193
2194 wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
2195 if (IS_ERR(wqes))
2196 return ERR_CAST(wqes);
2197 sgls = uverbs_request_next_ptr(
2198 iter, sge_count * sizeof(struct ib_uverbs_sge));
2199 if (IS_ERR(sgls))
2200 return ERR_CAST(sgls);
2201 ret = uverbs_request_finish(iter);
2202 if (ret)
2203 return ERR_PTR(ret);
2204
2205 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2206 if (!user_wr)
2207 return ERR_PTR(-ENOMEM);
2208
2209 sg_ind = 0;
2210 last = NULL;
2211 for (i = 0; i < wr_count; ++i) {
2212 if (copy_from_user(user_wr, wqes + i * wqe_size,
2213 wqe_size)) {
2214 ret = -EFAULT;
2215 goto err;
2216 }
2217
2218 if (user_wr->num_sge + sg_ind > sge_count) {
2219 ret = -EINVAL;
2220 goto err;
2221 }
2222
2223 if (user_wr->num_sge >=
2224 (U32_MAX - ALIGN(sizeof(*next), sizeof(struct ib_sge))) /
2225 sizeof(struct ib_sge)) {
2226 ret = -EINVAL;
2227 goto err;
2228 }
2229
2230 next = kmalloc(ALIGN(sizeof(*next), sizeof(struct ib_sge)) +
2231 user_wr->num_sge * sizeof(struct ib_sge),
2232 GFP_KERNEL);
2233 if (!next) {
2234 ret = -ENOMEM;
2235 goto err;
2236 }
2237
2238 if (!last)
2239 wr = next;
2240 else
2241 last->next = next;
2242 last = next;
2243
2244 next->next = NULL;
2245 next->wr_id = user_wr->wr_id;
2246 next->num_sge = user_wr->num_sge;
2247
2248 if (next->num_sge) {
2249 next->sg_list = (void *)next +
2250 ALIGN(sizeof(*next), sizeof(struct ib_sge));
2251 if (copy_from_user(next->sg_list, sgls + sg_ind,
2252 next->num_sge *
2253 sizeof(struct ib_sge))) {
2254 ret = -EFAULT;
2255 goto err;
2256 }
2257 sg_ind += next->num_sge;
2258 } else
2259 next->sg_list = NULL;
2260 }
2261
2262 kfree(user_wr);
2263 return wr;
2264
2265 err:
2266 kfree(user_wr);
2267
2268 while (wr) {
2269 next = wr->next;
2270 kfree(wr);
2271 wr = next;
2272 }
2273
2274 return ERR_PTR(ret);
2275 }
2276
2277 static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
2278 {
2279 struct ib_uverbs_post_recv cmd;
2280 struct ib_uverbs_post_recv_resp resp;
2281 struct ib_recv_wr *wr, *next;
2282 const struct ib_recv_wr *bad_wr;
2283 struct ib_qp *qp;
2284 int ret, ret2;
2285 struct uverbs_req_iter iter;
2286
2287 ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2288 if (ret)
2289 return ret;
2290
2291 wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
2292 cmd.sge_count);
2293 if (IS_ERR(wr))
2294 return PTR_ERR(wr);
2295
2296 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2297 if (!qp) {
2298 ret = -EINVAL;
2299 goto out;
2300 }
2301
2302 resp.bad_wr = 0;
2303 ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
2304
2305 rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2306 UVERBS_LOOKUP_READ);
2307 if (ret) {
2308 for (next = wr; next; next = next->next) {
2309 ++resp.bad_wr;
2310 if (next == bad_wr)
2311 break;
2312 }
2313 }
2314
2315 ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2316 if (ret2)
2317 ret = ret2;
2318 out:
2319 while (wr) {
2320 next = wr->next;
2321 kfree(wr);
2322 wr = next;
2323 }
2324
2325 return ret;
2326 }
2327
2328 static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
2329 {
2330 struct ib_uverbs_post_srq_recv cmd;
2331 struct ib_uverbs_post_srq_recv_resp resp;
2332 struct ib_recv_wr *wr, *next;
2333 const struct ib_recv_wr *bad_wr;
2334 struct ib_srq *srq;
2335 int ret, ret2;
2336 struct uverbs_req_iter iter;
2337
2338 ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2339 if (ret)
2340 return ret;
2341
2342 wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
2343 cmd.sge_count);
2344 if (IS_ERR(wr))
2345 return PTR_ERR(wr);
2346
2347 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
2348 if (!srq) {
2349 ret = -EINVAL;
2350 goto out;
2351 }
2352
2353 resp.bad_wr = 0;
2354 ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
2355
2356 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
2357 UVERBS_LOOKUP_READ);
2358
2359 if (ret)
2360 for (next = wr; next; next = next->next) {
2361 ++resp.bad_wr;
2362 if (next == bad_wr)
2363 break;
2364 }
2365
2366 ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2367 if (ret2)
2368 ret = ret2;
2369
2370 out:
2371 while (wr) {
2372 next = wr->next;
2373 kfree(wr);
2374 wr = next;
2375 }
2376
2377 return ret;
2378 }
2379
2380 static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
2381 {
2382 struct ib_uverbs_create_ah cmd;
2383 struct ib_uverbs_create_ah_resp resp;
2384 struct ib_uobject *uobj;
2385 struct ib_pd *pd;
2386 struct ib_ah *ah;
2387 struct rdma_ah_attr attr = {};
2388 int ret;
2389 struct ib_device *ib_dev;
2390
2391 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2392 if (ret)
2393 return ret;
2394
2395 uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
2396 if (IS_ERR(uobj))
2397 return PTR_ERR(uobj);
2398
2399 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
2400 ret = -EINVAL;
2401 goto err;
2402 }
2403
2404 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2405 if (!pd) {
2406 ret = -EINVAL;
2407 goto err;
2408 }
2409
2410 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2411 rdma_ah_set_make_grd(&attr, false);
2412 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2413 rdma_ah_set_sl(&attr, cmd.attr.sl);
2414 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2415 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2416 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2417
2418 if (cmd.attr.is_global) {
2419 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2420 cmd.attr.grh.sgid_index,
2421 cmd.attr.grh.hop_limit,
2422 cmd.attr.grh.traffic_class);
2423 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2424 } else {
2425 rdma_ah_set_ah_flags(&attr, 0);
2426 }
2427
2428 ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata);
2429 if (IS_ERR(ah)) {
2430 ret = PTR_ERR(ah);
2431 goto err_put;
2432 }
2433
2434 ah->uobject = uobj;
2435 uobj->user_handle = cmd.user_handle;
2436 uobj->object = ah;
2437 uobj_put_obj_read(pd);
2438 uobj_finalize_uobj_create(uobj, attrs);
2439
2440 resp.ah_handle = uobj->id;
2441 return uverbs_response(attrs, &resp, sizeof(resp));
2442
2443 err_put:
2444 uobj_put_obj_read(pd);
2445 err:
2446 uobj_alloc_abort(uobj, attrs);
2447 return ret;
2448 }
2449
2450 static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs)
2451 {
2452 struct ib_uverbs_destroy_ah cmd;
2453 int ret;
2454
2455 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2456 if (ret)
2457 return ret;
2458
2459 return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
2460 }
2461
2462 static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
2463 {
2464 struct ib_uverbs_attach_mcast cmd;
2465 struct ib_qp *qp;
2466 struct ib_uqp_object *obj;
2467 struct ib_uverbs_mcast_entry *mcast;
2468 int ret;
2469
2470 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2471 if (ret)
2472 return ret;
2473
2474 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2475 if (!qp)
2476 return -EINVAL;
2477
2478 obj = qp->uobject;
2479
2480 mutex_lock(&obj->mcast_lock);
2481 list_for_each_entry(mcast, &obj->mcast_list, list)
2482 if (cmd.mlid == mcast->lid &&
2483 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2484 ret = 0;
2485 goto out_put;
2486 }
2487
2488 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2489 if (!mcast) {
2490 ret = -ENOMEM;
2491 goto out_put;
2492 }
2493
2494 mcast->lid = cmd.mlid;
2495 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2496
2497 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2498 if (!ret)
2499 list_add_tail(&mcast->list, &obj->mcast_list);
2500 else
2501 kfree(mcast);
2502
2503 out_put:
2504 mutex_unlock(&obj->mcast_lock);
2505 rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2506 UVERBS_LOOKUP_READ);
2507
2508 return ret;
2509 }
2510
2511 static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
2512 {
2513 struct ib_uverbs_detach_mcast cmd;
2514 struct ib_uqp_object *obj;
2515 struct ib_qp *qp;
2516 struct ib_uverbs_mcast_entry *mcast;
2517 int ret;
2518 bool found = false;
2519
2520 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2521 if (ret)
2522 return ret;
2523
2524 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2525 if (!qp)
2526 return -EINVAL;
2527
2528 obj = qp->uobject;
2529 mutex_lock(&obj->mcast_lock);
2530
2531 list_for_each_entry(mcast, &obj->mcast_list, list)
2532 if (cmd.mlid == mcast->lid &&
2533 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2534 list_del(&mcast->list);
2535 kfree(mcast);
2536 found = true;
2537 break;
2538 }
2539
2540 if (!found) {
2541 ret = -EINVAL;
2542 goto out_put;
2543 }
2544
2545 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2546
2547 out_put:
2548 mutex_unlock(&obj->mcast_lock);
2549 rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2550 UVERBS_LOOKUP_READ);
2551 return ret;
2552 }
2553
2554 struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2555 {
2556 struct ib_uflow_resources *resources;
2557
2558 resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2559
2560 if (!resources)
2561 return NULL;
2562
2563 if (!num_specs)
2564 goto out;
2565
2566 resources->counters =
2567 kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
2568 resources->collection =
2569 kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
2570
2571 if (!resources->counters || !resources->collection)
2572 goto err;
2573
2574 out:
2575 resources->max = num_specs;
2576 return resources;
2577
2578 err:
2579 kfree(resources->counters);
2580 kfree(resources);
2581
2582 return NULL;
2583 }
2584 EXPORT_SYMBOL(flow_resources_alloc);
2585
2586 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
2587 {
2588 unsigned int i;
2589
2590 if (!uflow_res)
2591 return;
2592
2593 for (i = 0; i < uflow_res->collection_num; i++)
2594 atomic_dec(&uflow_res->collection[i]->usecnt);
2595
2596 for (i = 0; i < uflow_res->counters_num; i++)
2597 atomic_dec(&uflow_res->counters[i]->usecnt);
2598
2599 kfree(uflow_res->collection);
2600 kfree(uflow_res->counters);
2601 kfree(uflow_res);
2602 }
2603 EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
2604
2605 void flow_resources_add(struct ib_uflow_resources *uflow_res,
2606 enum ib_flow_spec_type type,
2607 void *ibobj)
2608 {
2609 WARN_ON(uflow_res->num >= uflow_res->max);
2610
2611 switch (type) {
2612 case IB_FLOW_SPEC_ACTION_HANDLE:
2613 atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
2614 uflow_res->collection[uflow_res->collection_num++] =
2615 (struct ib_flow_action *)ibobj;
2616 break;
2617 case IB_FLOW_SPEC_ACTION_COUNT:
2618 atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
2619 uflow_res->counters[uflow_res->counters_num++] =
2620 (struct ib_counters *)ibobj;
2621 break;
2622 default:
2623 WARN_ON(1);
2624 }
2625
2626 uflow_res->num++;
2627 }
2628 EXPORT_SYMBOL(flow_resources_add);
2629
2630 static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
2631 struct ib_uverbs_flow_spec *kern_spec,
2632 union ib_flow_spec *ib_spec,
2633 struct ib_uflow_resources *uflow_res)
2634 {
2635 ib_spec->type = kern_spec->type;
2636 switch (ib_spec->type) {
2637 case IB_FLOW_SPEC_ACTION_TAG:
2638 if (kern_spec->flow_tag.size !=
2639 sizeof(struct ib_uverbs_flow_spec_action_tag))
2640 return -EINVAL;
2641
2642 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2643 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2644 break;
2645 case IB_FLOW_SPEC_ACTION_DROP:
2646 if (kern_spec->drop.size !=
2647 sizeof(struct ib_uverbs_flow_spec_action_drop))
2648 return -EINVAL;
2649
2650 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2651 break;
2652 case IB_FLOW_SPEC_ACTION_HANDLE:
2653 if (kern_spec->action.size !=
2654 sizeof(struct ib_uverbs_flow_spec_action_handle))
2655 return -EOPNOTSUPP;
2656 ib_spec->action.act = uobj_get_obj_read(flow_action,
2657 UVERBS_OBJECT_FLOW_ACTION,
2658 kern_spec->action.handle,
2659 attrs);
2660 if (!ib_spec->action.act)
2661 return -EINVAL;
2662 ib_spec->action.size =
2663 sizeof(struct ib_flow_spec_action_handle);
2664 flow_resources_add(uflow_res,
2665 IB_FLOW_SPEC_ACTION_HANDLE,
2666 ib_spec->action.act);
2667 uobj_put_obj_read(ib_spec->action.act);
2668 break;
2669 case IB_FLOW_SPEC_ACTION_COUNT:
2670 if (kern_spec->flow_count.size !=
2671 sizeof(struct ib_uverbs_flow_spec_action_count))
2672 return -EINVAL;
2673 ib_spec->flow_count.counters =
2674 uobj_get_obj_read(counters,
2675 UVERBS_OBJECT_COUNTERS,
2676 kern_spec->flow_count.handle,
2677 attrs);
2678 if (!ib_spec->flow_count.counters)
2679 return -EINVAL;
2680 ib_spec->flow_count.size =
2681 sizeof(struct ib_flow_spec_action_count);
2682 flow_resources_add(uflow_res,
2683 IB_FLOW_SPEC_ACTION_COUNT,
2684 ib_spec->flow_count.counters);
2685 uobj_put_obj_read(ib_spec->flow_count.counters);
2686 break;
2687 default:
2688 return -EINVAL;
2689 }
2690 return 0;
2691 }
2692
2693 static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
2694 u16 ib_real_filter_sz)
2695 {
2696
2697
2698
2699
2700
2701 if (kern_filter_size > ib_real_filter_sz) {
2702 if (memchr_inv(kern_spec_filter +
2703 ib_real_filter_sz, 0,
2704 kern_filter_size - ib_real_filter_sz))
2705 return -EINVAL;
2706 return ib_real_filter_sz;
2707 }
2708 return kern_filter_size;
2709 }
2710
2711 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
2712 const void *kern_spec_mask,
2713 const void *kern_spec_val,
2714 size_t kern_filter_sz,
2715 union ib_flow_spec *ib_spec)
2716 {
2717 ssize_t actual_filter_sz;
2718 ssize_t ib_filter_sz;
2719
2720
2721 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2722 return -EINVAL;
2723
2724 ib_spec->type = type;
2725
2726 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2727 return -EINVAL;
2728
2729 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2730 case IB_FLOW_SPEC_ETH:
2731 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2732 actual_filter_sz = spec_filter_size(kern_spec_mask,
2733 kern_filter_sz,
2734 ib_filter_sz);
2735 if (actual_filter_sz <= 0)
2736 return -EINVAL;
2737 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2738 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2739 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2740 break;
2741 case IB_FLOW_SPEC_IPV4:
2742 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2743 actual_filter_sz = spec_filter_size(kern_spec_mask,
2744 kern_filter_sz,
2745 ib_filter_sz);
2746 if (actual_filter_sz <= 0)
2747 return -EINVAL;
2748 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2749 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2750 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2751 break;
2752 case IB_FLOW_SPEC_IPV6:
2753 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2754 actual_filter_sz = spec_filter_size(kern_spec_mask,
2755 kern_filter_sz,
2756 ib_filter_sz);
2757 if (actual_filter_sz <= 0)
2758 return -EINVAL;
2759 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2760 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2761 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2762
2763 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2764 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2765 return -EINVAL;
2766 break;
2767 case IB_FLOW_SPEC_TCP:
2768 case IB_FLOW_SPEC_UDP:
2769 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2770 actual_filter_sz = spec_filter_size(kern_spec_mask,
2771 kern_filter_sz,
2772 ib_filter_sz);
2773 if (actual_filter_sz <= 0)
2774 return -EINVAL;
2775 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2776 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2777 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2778 break;
2779 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2780 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2781 actual_filter_sz = spec_filter_size(kern_spec_mask,
2782 kern_filter_sz,
2783 ib_filter_sz);
2784 if (actual_filter_sz <= 0)
2785 return -EINVAL;
2786 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2787 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2788 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2789
2790 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2791 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2792 return -EINVAL;
2793 break;
2794 case IB_FLOW_SPEC_ESP:
2795 ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
2796 actual_filter_sz = spec_filter_size(kern_spec_mask,
2797 kern_filter_sz,
2798 ib_filter_sz);
2799 if (actual_filter_sz <= 0)
2800 return -EINVAL;
2801 ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
2802 memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
2803 memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
2804 break;
2805 case IB_FLOW_SPEC_GRE:
2806 ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
2807 actual_filter_sz = spec_filter_size(kern_spec_mask,
2808 kern_filter_sz,
2809 ib_filter_sz);
2810 if (actual_filter_sz <= 0)
2811 return -EINVAL;
2812 ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
2813 memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
2814 memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
2815 break;
2816 case IB_FLOW_SPEC_MPLS:
2817 ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
2818 actual_filter_sz = spec_filter_size(kern_spec_mask,
2819 kern_filter_sz,
2820 ib_filter_sz);
2821 if (actual_filter_sz <= 0)
2822 return -EINVAL;
2823 ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
2824 memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
2825 memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
2826 break;
2827 default:
2828 return -EINVAL;
2829 }
2830 return 0;
2831 }
2832
2833 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2834 union ib_flow_spec *ib_spec)
2835 {
2836 size_t kern_filter_sz;
2837 void *kern_spec_mask;
2838 void *kern_spec_val;
2839
2840 if (check_sub_overflow((size_t)kern_spec->hdr.size,
2841 sizeof(struct ib_uverbs_flow_spec_hdr),
2842 &kern_filter_sz))
2843 return -EINVAL;
2844
2845 kern_filter_sz /= 2;
2846
2847 kern_spec_val = (void *)kern_spec +
2848 sizeof(struct ib_uverbs_flow_spec_hdr);
2849 kern_spec_mask = kern_spec_val + kern_filter_sz;
2850
2851 return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
2852 kern_spec_mask,
2853 kern_spec_val,
2854 kern_filter_sz, ib_spec);
2855 }
2856
2857 static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
2858 struct ib_uverbs_flow_spec *kern_spec,
2859 union ib_flow_spec *ib_spec,
2860 struct ib_uflow_resources *uflow_res)
2861 {
2862 if (kern_spec->reserved)
2863 return -EINVAL;
2864
2865 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2866 return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
2867 uflow_res);
2868 else
2869 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2870 }
2871
2872 static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
2873 {
2874 struct ib_uverbs_ex_create_wq cmd;
2875 struct ib_uverbs_ex_create_wq_resp resp = {};
2876 struct ib_uwq_object *obj;
2877 int err = 0;
2878 struct ib_cq *cq;
2879 struct ib_pd *pd;
2880 struct ib_wq *wq;
2881 struct ib_wq_init_attr wq_init_attr = {};
2882 struct ib_device *ib_dev;
2883
2884 err = uverbs_request(attrs, &cmd, sizeof(cmd));
2885 if (err)
2886 return err;
2887
2888 if (cmd.comp_mask)
2889 return -EOPNOTSUPP;
2890
2891 obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
2892 &ib_dev);
2893 if (IS_ERR(obj))
2894 return PTR_ERR(obj);
2895
2896 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2897 if (!pd) {
2898 err = -EINVAL;
2899 goto err_uobj;
2900 }
2901
2902 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
2903 if (!cq) {
2904 err = -EINVAL;
2905 goto err_put_pd;
2906 }
2907
2908 wq_init_attr.cq = cq;
2909 wq_init_attr.max_sge = cmd.max_sge;
2910 wq_init_attr.max_wr = cmd.max_wr;
2911 wq_init_attr.wq_type = cmd.wq_type;
2912 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2913 wq_init_attr.create_flags = cmd.create_flags;
2914 INIT_LIST_HEAD(&obj->uevent.event_list);
2915 obj->uevent.uobject.user_handle = cmd.user_handle;
2916
2917 wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
2918 if (IS_ERR(wq)) {
2919 err = PTR_ERR(wq);
2920 goto err_put_cq;
2921 }
2922
2923 wq->uobject = obj;
2924 obj->uevent.uobject.object = wq;
2925 wq->wq_type = wq_init_attr.wq_type;
2926 wq->cq = cq;
2927 wq->pd = pd;
2928 wq->device = pd->device;
2929 atomic_set(&wq->usecnt, 0);
2930 atomic_inc(&pd->usecnt);
2931 atomic_inc(&cq->usecnt);
2932 obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
2933 if (obj->uevent.event_file)
2934 uverbs_uobject_get(&obj->uevent.event_file->uobj);
2935
2936 uobj_put_obj_read(pd);
2937 rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
2938 UVERBS_LOOKUP_READ);
2939 uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
2940
2941 resp.wq_handle = obj->uevent.uobject.id;
2942 resp.max_sge = wq_init_attr.max_sge;
2943 resp.max_wr = wq_init_attr.max_wr;
2944 resp.wqn = wq->wq_num;
2945 resp.response_length = uverbs_response_length(attrs, sizeof(resp));
2946 return uverbs_response(attrs, &resp, sizeof(resp));
2947
2948 err_put_cq:
2949 rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
2950 UVERBS_LOOKUP_READ);
2951 err_put_pd:
2952 uobj_put_obj_read(pd);
2953 err_uobj:
2954 uobj_alloc_abort(&obj->uevent.uobject, attrs);
2955
2956 return err;
2957 }
2958
2959 static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs)
2960 {
2961 struct ib_uverbs_ex_destroy_wq cmd;
2962 struct ib_uverbs_ex_destroy_wq_resp resp = {};
2963 struct ib_uobject *uobj;
2964 struct ib_uwq_object *obj;
2965 int ret;
2966
2967 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2968 if (ret)
2969 return ret;
2970
2971 if (cmd.comp_mask)
2972 return -EOPNOTSUPP;
2973
2974 resp.response_length = uverbs_response_length(attrs, sizeof(resp));
2975 uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
2976 if (IS_ERR(uobj))
2977 return PTR_ERR(uobj);
2978
2979 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
2980 resp.events_reported = obj->uevent.events_reported;
2981
2982 uobj_put_destroy(uobj);
2983
2984 return uverbs_response(attrs, &resp, sizeof(resp));
2985 }
2986
2987 static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
2988 {
2989 struct ib_uverbs_ex_modify_wq cmd;
2990 struct ib_wq *wq;
2991 struct ib_wq_attr wq_attr = {};
2992 int ret;
2993
2994 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2995 if (ret)
2996 return ret;
2997
2998 if (!cmd.attr_mask)
2999 return -EINVAL;
3000
3001 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
3002 return -EINVAL;
3003
3004 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
3005 if (!wq)
3006 return -EINVAL;
3007
3008 if (cmd.attr_mask & IB_WQ_FLAGS) {
3009 wq_attr.flags = cmd.flags;
3010 wq_attr.flags_mask = cmd.flags_mask;
3011 }
3012
3013 if (cmd.attr_mask & IB_WQ_CUR_STATE) {
3014 if (cmd.curr_wq_state > IB_WQS_ERR)
3015 return -EINVAL;
3016
3017 wq_attr.curr_wq_state = cmd.curr_wq_state;
3018 } else {
3019 wq_attr.curr_wq_state = wq->state;
3020 }
3021
3022 if (cmd.attr_mask & IB_WQ_STATE) {
3023 if (cmd.wq_state > IB_WQS_ERR)
3024 return -EINVAL;
3025
3026 wq_attr.wq_state = cmd.wq_state;
3027 } else {
3028 wq_attr.wq_state = wq_attr.curr_wq_state;
3029 }
3030
3031 ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
3032 &attrs->driver_udata);
3033 rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
3034 UVERBS_LOOKUP_READ);
3035 return ret;
3036 }
3037
3038 static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3039 {
3040 struct ib_uverbs_ex_create_rwq_ind_table cmd;
3041 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3042 struct ib_uobject *uobj;
3043 int err;
3044 struct ib_rwq_ind_table_init_attr init_attr = {};
3045 struct ib_rwq_ind_table *rwq_ind_tbl;
3046 struct ib_wq **wqs = NULL;
3047 u32 *wqs_handles = NULL;
3048 struct ib_wq *wq = NULL;
3049 int i, num_read_wqs;
3050 u32 num_wq_handles;
3051 struct uverbs_req_iter iter;
3052 struct ib_device *ib_dev;
3053
3054 err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3055 if (err)
3056 return err;
3057
3058 if (cmd.comp_mask)
3059 return -EOPNOTSUPP;
3060
3061 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3062 return -EINVAL;
3063
3064 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3065 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3066 GFP_KERNEL);
3067 if (!wqs_handles)
3068 return -ENOMEM;
3069
3070 err = uverbs_request_next(&iter, wqs_handles,
3071 num_wq_handles * sizeof(__u32));
3072 if (err)
3073 goto err_free;
3074
3075 err = uverbs_request_finish(&iter);
3076 if (err)
3077 goto err_free;
3078
3079 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3080 if (!wqs) {
3081 err = -ENOMEM;
3082 goto err_free;
3083 }
3084
3085 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3086 num_read_wqs++) {
3087 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
3088 wqs_handles[num_read_wqs], attrs);
3089 if (!wq) {
3090 err = -EINVAL;
3091 goto put_wqs;
3092 }
3093
3094 wqs[num_read_wqs] = wq;
3095 atomic_inc(&wqs[num_read_wqs]->usecnt);
3096 }
3097
3098 uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
3099 if (IS_ERR(uobj)) {
3100 err = PTR_ERR(uobj);
3101 goto put_wqs;
3102 }
3103
3104 rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table);
3105 if (!rwq_ind_tbl) {
3106 err = -ENOMEM;
3107 goto err_uobj;
3108 }
3109
3110 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3111 init_attr.ind_tbl = wqs;
3112
3113 rwq_ind_tbl->ind_tbl = wqs;
3114 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3115 rwq_ind_tbl->uobject = uobj;
3116 uobj->object = rwq_ind_tbl;
3117 rwq_ind_tbl->device = ib_dev;
3118 atomic_set(&rwq_ind_tbl->usecnt, 0);
3119
3120 err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr,
3121 &attrs->driver_udata);
3122 if (err)
3123 goto err_create;
3124
3125 for (i = 0; i < num_wq_handles; i++)
3126 rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
3127 UVERBS_LOOKUP_READ);
3128 kfree(wqs_handles);
3129 uobj_finalize_uobj_create(uobj, attrs);
3130
3131 resp.ind_tbl_handle = uobj->id;
3132 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3133 resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3134 return uverbs_response(attrs, &resp, sizeof(resp));
3135
3136 err_create:
3137 kfree(rwq_ind_tbl);
3138 err_uobj:
3139 uobj_alloc_abort(uobj, attrs);
3140 put_wqs:
3141 for (i = 0; i < num_read_wqs; i++) {
3142 rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
3143 UVERBS_LOOKUP_READ);
3144 atomic_dec(&wqs[i]->usecnt);
3145 }
3146 err_free:
3147 kfree(wqs_handles);
3148 kfree(wqs);
3149 return err;
3150 }
3151
3152 static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3153 {
3154 struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
3155 int ret;
3156
3157 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3158 if (ret)
3159 return ret;
3160
3161 if (cmd.comp_mask)
3162 return -EOPNOTSUPP;
3163
3164 return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
3165 cmd.ind_tbl_handle, attrs);
3166 }
3167
3168 static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
3169 {
3170 struct ib_uverbs_create_flow cmd;
3171 struct ib_uverbs_create_flow_resp resp = {};
3172 struct ib_uobject *uobj;
3173 struct ib_flow *flow_id;
3174 struct ib_uverbs_flow_attr *kern_flow_attr;
3175 struct ib_flow_attr *flow_attr;
3176 struct ib_qp *qp;
3177 struct ib_uflow_resources *uflow_res;
3178 struct ib_uverbs_flow_spec_hdr *kern_spec;
3179 struct uverbs_req_iter iter;
3180 int err;
3181 void *ib_spec;
3182 int i;
3183 struct ib_device *ib_dev;
3184
3185 err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3186 if (err)
3187 return err;
3188
3189 if (cmd.comp_mask)
3190 return -EINVAL;
3191
3192 if (!capable(CAP_NET_RAW))
3193 return -EPERM;
3194
3195 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3196 return -EINVAL;
3197
3198 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3199 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3200 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3201 return -EINVAL;
3202
3203 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3204 return -EINVAL;
3205
3206 if (cmd.flow_attr.size >
3207 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3208 return -EINVAL;
3209
3210 if (cmd.flow_attr.reserved[0] ||
3211 cmd.flow_attr.reserved[1])
3212 return -EINVAL;
3213
3214 if (cmd.flow_attr.num_of_specs) {
3215 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3216 GFP_KERNEL);
3217 if (!kern_flow_attr)
3218 return -ENOMEM;
3219
3220 *kern_flow_attr = cmd.flow_attr;
3221 err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
3222 cmd.flow_attr.size);
3223 if (err)
3224 goto err_free_attr;
3225 } else {
3226 kern_flow_attr = &cmd.flow_attr;
3227 }
3228
3229 err = uverbs_request_finish(&iter);
3230 if (err)
3231 goto err_free_attr;
3232
3233 uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
3234 if (IS_ERR(uobj)) {
3235 err = PTR_ERR(uobj);
3236 goto err_free_attr;
3237 }
3238
3239 if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
3240 err = -EINVAL;
3241 goto err_uobj;
3242 }
3243
3244 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
3245 if (!qp) {
3246 err = -EINVAL;
3247 goto err_uobj;
3248 }
3249
3250 if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
3251 err = -EINVAL;
3252 goto err_put;
3253 }
3254
3255 flow_attr = kzalloc(struct_size(flow_attr, flows,
3256 cmd.flow_attr.num_of_specs), GFP_KERNEL);
3257 if (!flow_attr) {
3258 err = -ENOMEM;
3259 goto err_put;
3260 }
3261 uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
3262 if (!uflow_res) {
3263 err = -ENOMEM;
3264 goto err_free_flow_attr;
3265 }
3266
3267 flow_attr->type = kern_flow_attr->type;
3268 flow_attr->priority = kern_flow_attr->priority;
3269 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3270 flow_attr->port = kern_flow_attr->port;
3271 flow_attr->flags = kern_flow_attr->flags;
3272 flow_attr->size = sizeof(*flow_attr);
3273
3274 kern_spec = kern_flow_attr->flow_specs;
3275 ib_spec = flow_attr + 1;
3276 for (i = 0; i < flow_attr->num_of_specs &&
3277 cmd.flow_attr.size >= sizeof(*kern_spec) &&
3278 cmd.flow_attr.size >= kern_spec->size;
3279 i++) {
3280 err = kern_spec_to_ib_spec(
3281 attrs, (struct ib_uverbs_flow_spec *)kern_spec,
3282 ib_spec, uflow_res);
3283 if (err)
3284 goto err_free;
3285
3286 flow_attr->size +=
3287 ((union ib_flow_spec *) ib_spec)->size;
3288 cmd.flow_attr.size -= kern_spec->size;
3289 kern_spec = ((void *)kern_spec) + kern_spec->size;
3290 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3291 }
3292 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3293 pr_warn("create flow failed, flow %d: %u bytes left from uverb cmd\n",
3294 i, cmd.flow_attr.size);
3295 err = -EINVAL;
3296 goto err_free;
3297 }
3298
3299 flow_id = qp->device->ops.create_flow(qp, flow_attr,
3300 &attrs->driver_udata);
3301
3302 if (IS_ERR(flow_id)) {
3303 err = PTR_ERR(flow_id);
3304 goto err_free;
3305 }
3306
3307 ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
3308
3309 rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
3310 UVERBS_LOOKUP_READ);
3311 kfree(flow_attr);
3312
3313 if (cmd.flow_attr.num_of_specs)
3314 kfree(kern_flow_attr);
3315 uobj_finalize_uobj_create(uobj, attrs);
3316
3317 resp.flow_handle = uobj->id;
3318 return uverbs_response(attrs, &resp, sizeof(resp));
3319
3320 err_free:
3321 ib_uverbs_flow_resources_free(uflow_res);
3322 err_free_flow_attr:
3323 kfree(flow_attr);
3324 err_put:
3325 rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
3326 UVERBS_LOOKUP_READ);
3327 err_uobj:
3328 uobj_alloc_abort(uobj, attrs);
3329 err_free_attr:
3330 if (cmd.flow_attr.num_of_specs)
3331 kfree(kern_flow_attr);
3332 return err;
3333 }
3334
3335 static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs)
3336 {
3337 struct ib_uverbs_destroy_flow cmd;
3338 int ret;
3339
3340 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3341 if (ret)
3342 return ret;
3343
3344 if (cmd.comp_mask)
3345 return -EINVAL;
3346
3347 return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
3348 }
3349
3350 static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
3351 struct ib_uverbs_create_xsrq *cmd,
3352 struct ib_udata *udata)
3353 {
3354 struct ib_uverbs_create_srq_resp resp = {};
3355 struct ib_usrq_object *obj;
3356 struct ib_pd *pd;
3357 struct ib_srq *srq;
3358 struct ib_srq_init_attr attr;
3359 int ret;
3360 struct ib_uobject *xrcd_uobj;
3361 struct ib_device *ib_dev;
3362
3363 obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
3364 &ib_dev);
3365 if (IS_ERR(obj))
3366 return PTR_ERR(obj);
3367
3368 if (cmd->srq_type == IB_SRQT_TM)
3369 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3370
3371 if (cmd->srq_type == IB_SRQT_XRC) {
3372 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
3373 attrs);
3374 if (IS_ERR(xrcd_uobj)) {
3375 ret = -EINVAL;
3376 goto err;
3377 }
3378
3379 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3380 if (!attr.ext.xrc.xrcd) {
3381 ret = -EINVAL;
3382 goto err_put_xrcd;
3383 }
3384
3385 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3386 atomic_inc(&obj->uxrcd->refcnt);
3387 }
3388
3389 if (ib_srq_has_cq(cmd->srq_type)) {
3390 attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
3391 cmd->cq_handle, attrs);
3392 if (!attr.ext.cq) {
3393 ret = -EINVAL;
3394 goto err_put_xrcd;
3395 }
3396 }
3397
3398 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
3399 if (!pd) {
3400 ret = -EINVAL;
3401 goto err_put_cq;
3402 }
3403
3404 attr.event_handler = ib_uverbs_srq_event_handler;
3405 attr.srq_type = cmd->srq_type;
3406 attr.attr.max_wr = cmd->max_wr;
3407 attr.attr.max_sge = cmd->max_sge;
3408 attr.attr.srq_limit = cmd->srq_limit;
3409
3410 INIT_LIST_HEAD(&obj->uevent.event_list);
3411 obj->uevent.uobject.user_handle = cmd->user_handle;
3412
3413 srq = ib_create_srq_user(pd, &attr, obj, udata);
3414 if (IS_ERR(srq)) {
3415 ret = PTR_ERR(srq);
3416 goto err_put_pd;
3417 }
3418
3419 obj->uevent.uobject.object = srq;
3420 obj->uevent.uobject.user_handle = cmd->user_handle;
3421 obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
3422 if (obj->uevent.event_file)
3423 uverbs_uobject_get(&obj->uevent.event_file->uobj);
3424
3425 if (cmd->srq_type == IB_SRQT_XRC)
3426 resp.srqn = srq->ext.xrc.srq_num;
3427
3428 if (cmd->srq_type == IB_SRQT_XRC)
3429 uobj_put_read(xrcd_uobj);
3430
3431 if (ib_srq_has_cq(cmd->srq_type))
3432 rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
3433 UVERBS_LOOKUP_READ);
3434
3435 uobj_put_obj_read(pd);
3436 uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
3437
3438 resp.srq_handle = obj->uevent.uobject.id;
3439 resp.max_wr = attr.attr.max_wr;
3440 resp.max_sge = attr.attr.max_sge;
3441 return uverbs_response(attrs, &resp, sizeof(resp));
3442
3443 err_put_pd:
3444 uobj_put_obj_read(pd);
3445 err_put_cq:
3446 if (ib_srq_has_cq(cmd->srq_type))
3447 rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
3448 UVERBS_LOOKUP_READ);
3449
3450 err_put_xrcd:
3451 if (cmd->srq_type == IB_SRQT_XRC) {
3452 atomic_dec(&obj->uxrcd->refcnt);
3453 uobj_put_read(xrcd_uobj);
3454 }
3455
3456 err:
3457 uobj_alloc_abort(&obj->uevent.uobject, attrs);
3458 return ret;
3459 }
3460
3461 static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs)
3462 {
3463 struct ib_uverbs_create_srq cmd;
3464 struct ib_uverbs_create_xsrq xcmd;
3465 int ret;
3466
3467 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3468 if (ret)
3469 return ret;
3470
3471 memset(&xcmd, 0, sizeof(xcmd));
3472 xcmd.response = cmd.response;
3473 xcmd.user_handle = cmd.user_handle;
3474 xcmd.srq_type = IB_SRQT_BASIC;
3475 xcmd.pd_handle = cmd.pd_handle;
3476 xcmd.max_wr = cmd.max_wr;
3477 xcmd.max_sge = cmd.max_sge;
3478 xcmd.srq_limit = cmd.srq_limit;
3479
3480 return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
3481 }
3482
3483 static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs)
3484 {
3485 struct ib_uverbs_create_xsrq cmd;
3486 int ret;
3487
3488 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3489 if (ret)
3490 return ret;
3491
3492 return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
3493 }
3494
3495 static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
3496 {
3497 struct ib_uverbs_modify_srq cmd;
3498 struct ib_srq *srq;
3499 struct ib_srq_attr attr;
3500 int ret;
3501
3502 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3503 if (ret)
3504 return ret;
3505
3506 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3507 if (!srq)
3508 return -EINVAL;
3509
3510 attr.max_wr = cmd.max_wr;
3511 attr.srq_limit = cmd.srq_limit;
3512
3513 ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
3514 &attrs->driver_udata);
3515
3516 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
3517 UVERBS_LOOKUP_READ);
3518
3519 return ret;
3520 }
3521
3522 static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
3523 {
3524 struct ib_uverbs_query_srq cmd;
3525 struct ib_uverbs_query_srq_resp resp;
3526 struct ib_srq_attr attr;
3527 struct ib_srq *srq;
3528 int ret;
3529
3530 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3531 if (ret)
3532 return ret;
3533
3534 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3535 if (!srq)
3536 return -EINVAL;
3537
3538 ret = ib_query_srq(srq, &attr);
3539
3540 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
3541 UVERBS_LOOKUP_READ);
3542
3543 if (ret)
3544 return ret;
3545
3546 memset(&resp, 0, sizeof resp);
3547
3548 resp.max_wr = attr.max_wr;
3549 resp.max_sge = attr.max_sge;
3550 resp.srq_limit = attr.srq_limit;
3551
3552 return uverbs_response(attrs, &resp, sizeof(resp));
3553 }
3554
3555 static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs)
3556 {
3557 struct ib_uverbs_destroy_srq cmd;
3558 struct ib_uverbs_destroy_srq_resp resp;
3559 struct ib_uobject *uobj;
3560 struct ib_uevent_object *obj;
3561 int ret;
3562
3563 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3564 if (ret)
3565 return ret;
3566
3567 uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3568 if (IS_ERR(uobj))
3569 return PTR_ERR(uobj);
3570
3571 obj = container_of(uobj, struct ib_uevent_object, uobject);
3572 memset(&resp, 0, sizeof(resp));
3573 resp.events_reported = obj->events_reported;
3574
3575 uobj_put_destroy(uobj);
3576
3577 return uverbs_response(attrs, &resp, sizeof(resp));
3578 }
3579
3580 static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
3581 {
3582 struct ib_uverbs_ex_query_device_resp resp = {};
3583 struct ib_uverbs_ex_query_device cmd;
3584 struct ib_device_attr attr = {0};
3585 struct ib_ucontext *ucontext;
3586 struct ib_device *ib_dev;
3587 int err;
3588
3589 ucontext = ib_uverbs_get_ucontext(attrs);
3590 if (IS_ERR(ucontext))
3591 return PTR_ERR(ucontext);
3592 ib_dev = ucontext->device;
3593
3594 err = uverbs_request(attrs, &cmd, sizeof(cmd));
3595 if (err)
3596 return err;
3597
3598 if (cmd.comp_mask)
3599 return -EINVAL;
3600
3601 if (cmd.reserved)
3602 return -EINVAL;
3603
3604 err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
3605 if (err)
3606 return err;
3607
3608 copy_query_dev_fields(ucontext, &resp.base, &attr);
3609
3610 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3611 resp.odp_caps.per_transport_caps.rc_odp_caps =
3612 attr.odp_caps.per_transport_caps.rc_odp_caps;
3613 resp.odp_caps.per_transport_caps.uc_odp_caps =
3614 attr.odp_caps.per_transport_caps.uc_odp_caps;
3615 resp.odp_caps.per_transport_caps.ud_odp_caps =
3616 attr.odp_caps.per_transport_caps.ud_odp_caps;
3617 resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
3618
3619 resp.timestamp_mask = attr.timestamp_mask;
3620 resp.hca_core_clock = attr.hca_core_clock;
3621 resp.device_cap_flags_ex = attr.device_cap_flags;
3622 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3623 resp.rss_caps.max_rwq_indirection_tables =
3624 attr.rss_caps.max_rwq_indirection_tables;
3625 resp.rss_caps.max_rwq_indirection_table_size =
3626 attr.rss_caps.max_rwq_indirection_table_size;
3627 resp.max_wq_type_rq = attr.max_wq_type_rq;
3628 resp.raw_packet_caps = attr.raw_packet_caps;
3629 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3630 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3631 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3632 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3633 resp.tm_caps.flags = attr.tm_caps.flags;
3634 resp.cq_moderation_caps.max_cq_moderation_count =
3635 attr.cq_caps.max_cq_moderation_count;
3636 resp.cq_moderation_caps.max_cq_moderation_period =
3637 attr.cq_caps.max_cq_moderation_period;
3638 resp.max_dm_size = attr.max_dm_size;
3639 resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3640
3641 return uverbs_response(attrs, &resp, sizeof(resp));
3642 }
3643
3644 static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
3645 {
3646 struct ib_uverbs_ex_modify_cq cmd;
3647 struct ib_cq *cq;
3648 int ret;
3649
3650 ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3651 if (ret)
3652 return ret;
3653
3654 if (!cmd.attr_mask || cmd.reserved)
3655 return -EINVAL;
3656
3657 if (cmd.attr_mask > IB_CQ_MODERATE)
3658 return -EOPNOTSUPP;
3659
3660 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
3661 if (!cq)
3662 return -EINVAL;
3663
3664 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3665
3666 rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
3667 UVERBS_LOOKUP_READ);
3668 return ret;
3669 }
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680 #define UAPI_DEF_WRITE_IO(req, resp) \
3681 .write.has_resp = 1 + \
3682 BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) + \
3683 BUILD_BUG_ON_ZERO(sizeof_field(req, response) != \
3684 sizeof(u64)), \
3685 .write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
3686
3687 #define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)
3688
3689 #define UAPI_DEF_WRITE_UDATA_IO(req, resp) \
3690 UAPI_DEF_WRITE_IO(req, resp), \
3691 .write.has_udata = \
3692 1 + \
3693 BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \
3694 sizeof(req)) + \
3695 BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) != \
3696 sizeof(resp))
3697
3698 #define UAPI_DEF_WRITE_UDATA_I(req) \
3699 UAPI_DEF_WRITE_I(req), \
3700 .write.has_udata = \
3701 1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \
3702 sizeof(req))
3703
3704
3705
3706
3707
3708 #define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member) \
3709 .write.has_resp = 1, \
3710 .write.req_size = offsetofend(req, req_last_member), \
3711 .write.resp_size = offsetofend(resp, resp_last_member)
3712
3713 #define UAPI_DEF_WRITE_I_EX(req, req_last_member) \
3714 .write.req_size = offsetofend(req, req_last_member)
3715
3716 const struct uapi_definition uverbs_def_write_intf[] = {
3717 DECLARE_UVERBS_OBJECT(
3718 UVERBS_OBJECT_AH,
3719 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
3720 ib_uverbs_create_ah,
3721 UAPI_DEF_WRITE_UDATA_IO(
3722 struct ib_uverbs_create_ah,
3723 struct ib_uverbs_create_ah_resp)),
3724 DECLARE_UVERBS_WRITE(
3725 IB_USER_VERBS_CMD_DESTROY_AH,
3726 ib_uverbs_destroy_ah,
3727 UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah)),
3728 UAPI_DEF_OBJ_NEEDS_FN(create_user_ah),
3729 UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
3730
3731 DECLARE_UVERBS_OBJECT(
3732 UVERBS_OBJECT_COMP_CHANNEL,
3733 DECLARE_UVERBS_WRITE(
3734 IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
3735 ib_uverbs_create_comp_channel,
3736 UAPI_DEF_WRITE_IO(
3737 struct ib_uverbs_create_comp_channel,
3738 struct ib_uverbs_create_comp_channel_resp))),
3739
3740 DECLARE_UVERBS_OBJECT(
3741 UVERBS_OBJECT_CQ,
3742 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
3743 ib_uverbs_create_cq,
3744 UAPI_DEF_WRITE_UDATA_IO(
3745 struct ib_uverbs_create_cq,
3746 struct ib_uverbs_create_cq_resp),
3747 UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3748 DECLARE_UVERBS_WRITE(
3749 IB_USER_VERBS_CMD_DESTROY_CQ,
3750 ib_uverbs_destroy_cq,
3751 UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
3752 struct ib_uverbs_destroy_cq_resp),
3753 UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
3754 DECLARE_UVERBS_WRITE(
3755 IB_USER_VERBS_CMD_POLL_CQ,
3756 ib_uverbs_poll_cq,
3757 UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
3758 struct ib_uverbs_poll_cq_resp),
3759 UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
3760 DECLARE_UVERBS_WRITE(
3761 IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
3762 ib_uverbs_req_notify_cq,
3763 UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
3764 UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
3765 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
3766 ib_uverbs_resize_cq,
3767 UAPI_DEF_WRITE_UDATA_IO(
3768 struct ib_uverbs_resize_cq,
3769 struct ib_uverbs_resize_cq_resp),
3770 UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
3771 DECLARE_UVERBS_WRITE_EX(
3772 IB_USER_VERBS_EX_CMD_CREATE_CQ,
3773 ib_uverbs_ex_create_cq,
3774 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
3775 reserved,
3776 struct ib_uverbs_ex_create_cq_resp,
3777 response_length),
3778 UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3779 DECLARE_UVERBS_WRITE_EX(
3780 IB_USER_VERBS_EX_CMD_MODIFY_CQ,
3781 ib_uverbs_ex_modify_cq,
3782 UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
3783 UAPI_DEF_METHOD_NEEDS_FN(modify_cq))),
3784
3785 DECLARE_UVERBS_OBJECT(
3786 UVERBS_OBJECT_DEVICE,
3787 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
3788 ib_uverbs_get_context,
3789 UAPI_DEF_WRITE_UDATA_IO(
3790 struct ib_uverbs_get_context,
3791 struct ib_uverbs_get_context_resp)),
3792 DECLARE_UVERBS_WRITE(
3793 IB_USER_VERBS_CMD_QUERY_DEVICE,
3794 ib_uverbs_query_device,
3795 UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
3796 struct ib_uverbs_query_device_resp)),
3797 DECLARE_UVERBS_WRITE(
3798 IB_USER_VERBS_CMD_QUERY_PORT,
3799 ib_uverbs_query_port,
3800 UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
3801 struct ib_uverbs_query_port_resp),
3802 UAPI_DEF_METHOD_NEEDS_FN(query_port)),
3803 DECLARE_UVERBS_WRITE_EX(
3804 IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
3805 ib_uverbs_ex_query_device,
3806 UAPI_DEF_WRITE_IO_EX(
3807 struct ib_uverbs_ex_query_device,
3808 reserved,
3809 struct ib_uverbs_ex_query_device_resp,
3810 response_length),
3811 UAPI_DEF_METHOD_NEEDS_FN(query_device)),
3812 UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
3813 UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
3814
3815 DECLARE_UVERBS_OBJECT(
3816 UVERBS_OBJECT_FLOW,
3817 DECLARE_UVERBS_WRITE_EX(
3818 IB_USER_VERBS_EX_CMD_CREATE_FLOW,
3819 ib_uverbs_ex_create_flow,
3820 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
3821 flow_attr,
3822 struct ib_uverbs_create_flow_resp,
3823 flow_handle),
3824 UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
3825 DECLARE_UVERBS_WRITE_EX(
3826 IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
3827 ib_uverbs_ex_destroy_flow,
3828 UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
3829 UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
3830
3831 DECLARE_UVERBS_OBJECT(
3832 UVERBS_OBJECT_MR,
3833 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
3834 ib_uverbs_dereg_mr,
3835 UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
3836 UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
3837 DECLARE_UVERBS_WRITE(
3838 IB_USER_VERBS_CMD_REG_MR,
3839 ib_uverbs_reg_mr,
3840 UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
3841 struct ib_uverbs_reg_mr_resp),
3842 UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
3843 DECLARE_UVERBS_WRITE(
3844 IB_USER_VERBS_CMD_REREG_MR,
3845 ib_uverbs_rereg_mr,
3846 UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
3847 struct ib_uverbs_rereg_mr_resp),
3848 UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
3849
3850 DECLARE_UVERBS_OBJECT(
3851 UVERBS_OBJECT_MW,
3852 DECLARE_UVERBS_WRITE(
3853 IB_USER_VERBS_CMD_ALLOC_MW,
3854 ib_uverbs_alloc_mw,
3855 UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
3856 struct ib_uverbs_alloc_mw_resp),
3857 UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
3858 DECLARE_UVERBS_WRITE(
3859 IB_USER_VERBS_CMD_DEALLOC_MW,
3860 ib_uverbs_dealloc_mw,
3861 UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
3862 UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
3863
3864 DECLARE_UVERBS_OBJECT(
3865 UVERBS_OBJECT_PD,
3866 DECLARE_UVERBS_WRITE(
3867 IB_USER_VERBS_CMD_ALLOC_PD,
3868 ib_uverbs_alloc_pd,
3869 UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
3870 struct ib_uverbs_alloc_pd_resp),
3871 UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
3872 DECLARE_UVERBS_WRITE(
3873 IB_USER_VERBS_CMD_DEALLOC_PD,
3874 ib_uverbs_dealloc_pd,
3875 UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
3876 UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
3877
3878 DECLARE_UVERBS_OBJECT(
3879 UVERBS_OBJECT_QP,
3880 DECLARE_UVERBS_WRITE(
3881 IB_USER_VERBS_CMD_ATTACH_MCAST,
3882 ib_uverbs_attach_mcast,
3883 UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
3884 UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
3885 UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3886 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
3887 ib_uverbs_create_qp,
3888 UAPI_DEF_WRITE_UDATA_IO(
3889 struct ib_uverbs_create_qp,
3890 struct ib_uverbs_create_qp_resp),
3891 UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3892 DECLARE_UVERBS_WRITE(
3893 IB_USER_VERBS_CMD_DESTROY_QP,
3894 ib_uverbs_destroy_qp,
3895 UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
3896 struct ib_uverbs_destroy_qp_resp),
3897 UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
3898 DECLARE_UVERBS_WRITE(
3899 IB_USER_VERBS_CMD_DETACH_MCAST,
3900 ib_uverbs_detach_mcast,
3901 UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
3902 UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3903 DECLARE_UVERBS_WRITE(
3904 IB_USER_VERBS_CMD_MODIFY_QP,
3905 ib_uverbs_modify_qp,
3906 UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
3907 UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
3908 DECLARE_UVERBS_WRITE(
3909 IB_USER_VERBS_CMD_POST_RECV,
3910 ib_uverbs_post_recv,
3911 UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
3912 struct ib_uverbs_post_recv_resp),
3913 UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
3914 DECLARE_UVERBS_WRITE(
3915 IB_USER_VERBS_CMD_POST_SEND,
3916 ib_uverbs_post_send,
3917 UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
3918 struct ib_uverbs_post_send_resp),
3919 UAPI_DEF_METHOD_NEEDS_FN(post_send)),
3920 DECLARE_UVERBS_WRITE(
3921 IB_USER_VERBS_CMD_QUERY_QP,
3922 ib_uverbs_query_qp,
3923 UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
3924 struct ib_uverbs_query_qp_resp),
3925 UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
3926 DECLARE_UVERBS_WRITE_EX(
3927 IB_USER_VERBS_EX_CMD_CREATE_QP,
3928 ib_uverbs_ex_create_qp,
3929 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
3930 comp_mask,
3931 struct ib_uverbs_ex_create_qp_resp,
3932 response_length),
3933 UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3934 DECLARE_UVERBS_WRITE_EX(
3935 IB_USER_VERBS_EX_CMD_MODIFY_QP,
3936 ib_uverbs_ex_modify_qp,
3937 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
3938 base,
3939 struct ib_uverbs_ex_modify_qp_resp,
3940 response_length),
3941 UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
3942
3943 DECLARE_UVERBS_OBJECT(
3944 UVERBS_OBJECT_RWQ_IND_TBL,
3945 DECLARE_UVERBS_WRITE_EX(
3946 IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
3947 ib_uverbs_ex_create_rwq_ind_table,
3948 UAPI_DEF_WRITE_IO_EX(
3949 struct ib_uverbs_ex_create_rwq_ind_table,
3950 log_ind_tbl_size,
3951 struct ib_uverbs_ex_create_rwq_ind_table_resp,
3952 ind_tbl_num),
3953 UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
3954 DECLARE_UVERBS_WRITE_EX(
3955 IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
3956 ib_uverbs_ex_destroy_rwq_ind_table,
3957 UAPI_DEF_WRITE_I(
3958 struct ib_uverbs_ex_destroy_rwq_ind_table),
3959 UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
3960
3961 DECLARE_UVERBS_OBJECT(
3962 UVERBS_OBJECT_WQ,
3963 DECLARE_UVERBS_WRITE_EX(
3964 IB_USER_VERBS_EX_CMD_CREATE_WQ,
3965 ib_uverbs_ex_create_wq,
3966 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
3967 max_sge,
3968 struct ib_uverbs_ex_create_wq_resp,
3969 wqn),
3970 UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
3971 DECLARE_UVERBS_WRITE_EX(
3972 IB_USER_VERBS_EX_CMD_DESTROY_WQ,
3973 ib_uverbs_ex_destroy_wq,
3974 UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
3975 wq_handle,
3976 struct ib_uverbs_ex_destroy_wq_resp,
3977 reserved),
3978 UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
3979 DECLARE_UVERBS_WRITE_EX(
3980 IB_USER_VERBS_EX_CMD_MODIFY_WQ,
3981 ib_uverbs_ex_modify_wq,
3982 UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
3983 curr_wq_state),
3984 UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
3985
3986 DECLARE_UVERBS_OBJECT(
3987 UVERBS_OBJECT_SRQ,
3988 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
3989 ib_uverbs_create_srq,
3990 UAPI_DEF_WRITE_UDATA_IO(
3991 struct ib_uverbs_create_srq,
3992 struct ib_uverbs_create_srq_resp),
3993 UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
3994 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
3995 ib_uverbs_create_xsrq,
3996 UAPI_DEF_WRITE_UDATA_IO(
3997 struct ib_uverbs_create_xsrq,
3998 struct ib_uverbs_create_srq_resp),
3999 UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4000 DECLARE_UVERBS_WRITE(
4001 IB_USER_VERBS_CMD_DESTROY_SRQ,
4002 ib_uverbs_destroy_srq,
4003 UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
4004 struct ib_uverbs_destroy_srq_resp),
4005 UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
4006 DECLARE_UVERBS_WRITE(
4007 IB_USER_VERBS_CMD_MODIFY_SRQ,
4008 ib_uverbs_modify_srq,
4009 UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
4010 UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
4011 DECLARE_UVERBS_WRITE(
4012 IB_USER_VERBS_CMD_POST_SRQ_RECV,
4013 ib_uverbs_post_srq_recv,
4014 UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
4015 struct ib_uverbs_post_srq_recv_resp),
4016 UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
4017 DECLARE_UVERBS_WRITE(
4018 IB_USER_VERBS_CMD_QUERY_SRQ,
4019 ib_uverbs_query_srq,
4020 UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
4021 struct ib_uverbs_query_srq_resp),
4022 UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
4023
4024 DECLARE_UVERBS_OBJECT(
4025 UVERBS_OBJECT_XRCD,
4026 DECLARE_UVERBS_WRITE(
4027 IB_USER_VERBS_CMD_CLOSE_XRCD,
4028 ib_uverbs_close_xrcd,
4029 UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd)),
4030 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
4031 ib_uverbs_open_qp,
4032 UAPI_DEF_WRITE_UDATA_IO(
4033 struct ib_uverbs_open_qp,
4034 struct ib_uverbs_create_qp_resp)),
4035 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
4036 ib_uverbs_open_xrcd,
4037 UAPI_DEF_WRITE_UDATA_IO(
4038 struct ib_uverbs_open_xrcd,
4039 struct ib_uverbs_open_xrcd_resp)),
4040 UAPI_DEF_OBJ_NEEDS_FN(alloc_xrcd),
4041 UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),
4042
4043 {},
4044 };