0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 #include <asm/page.h>
0047 #include <linux/io.h>
0048 #include <linux/wait.h>
0049 #include <rdma/ib_addr.h>
0050 #include <rdma/ib_smi.h>
0051 #include <rdma/ib_user_verbs.h>
0052
0053 #include "pvrdma.h"
0054
0055
0056
0057
0058
0059
0060
0061
0062 int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
0063 {
0064 struct pvrdma_dev *dev = to_vdev(ibsrq->device);
0065 struct pvrdma_srq *srq = to_vsrq(ibsrq);
0066 union pvrdma_cmd_req req;
0067 union pvrdma_cmd_resp rsp;
0068 struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
0069 struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp;
0070 int ret;
0071
0072 memset(cmd, 0, sizeof(*cmd));
0073 cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ;
0074 cmd->srq_handle = srq->srq_handle;
0075
0076 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
0077 if (ret < 0) {
0078 dev_warn(&dev->pdev->dev,
0079 "could not query shared receive queue, error: %d\n",
0080 ret);
0081 return -EINVAL;
0082 }
0083
0084 srq_attr->srq_limit = resp->attrs.srq_limit;
0085 srq_attr->max_wr = resp->attrs.max_wr;
0086 srq_attr->max_sge = resp->attrs.max_sge;
0087
0088 return 0;
0089 }
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
0100 struct ib_udata *udata)
0101 {
0102 struct pvrdma_srq *srq = to_vsrq(ibsrq);
0103 struct pvrdma_dev *dev = to_vdev(ibsrq->device);
0104 union pvrdma_cmd_req req;
0105 union pvrdma_cmd_resp rsp;
0106 struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
0107 struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
0108 struct pvrdma_create_srq_resp srq_resp = {};
0109 struct pvrdma_create_srq ucmd;
0110 unsigned long flags;
0111 int ret;
0112
0113 if (!udata) {
0114
0115 dev_warn(&dev->pdev->dev,
0116 "no shared receive queue support for kernel client\n");
0117 return -EOPNOTSUPP;
0118 }
0119
0120 if (init_attr->srq_type != IB_SRQT_BASIC) {
0121 dev_warn(&dev->pdev->dev,
0122 "shared receive queue type %d not supported\n",
0123 init_attr->srq_type);
0124 return -EOPNOTSUPP;
0125 }
0126
0127 if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
0128 init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
0129 dev_warn(&dev->pdev->dev,
0130 "shared receive queue size invalid\n");
0131 return -EINVAL;
0132 }
0133
0134 if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
0135 return -ENOMEM;
0136
0137 spin_lock_init(&srq->lock);
0138 refcount_set(&srq->refcnt, 1);
0139 init_completion(&srq->free);
0140
0141 dev_dbg(&dev->pdev->dev,
0142 "create shared receive queue from user space\n");
0143
0144 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
0145 ret = -EFAULT;
0146 goto err_srq;
0147 }
0148
0149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
0150 if (IS_ERR(srq->umem)) {
0151 ret = PTR_ERR(srq->umem);
0152 goto err_srq;
0153 }
0154
0155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
0156
0157 if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
0158 dev_warn(&dev->pdev->dev,
0159 "overflow pages in shared receive queue\n");
0160 ret = -EINVAL;
0161 goto err_umem;
0162 }
0163
0164 ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
0165 if (ret) {
0166 dev_warn(&dev->pdev->dev,
0167 "could not allocate page directory\n");
0168 goto err_umem;
0169 }
0170
0171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
0172
0173 memset(cmd, 0, sizeof(*cmd));
0174 cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
0175 cmd->srq_type = init_attr->srq_type;
0176 cmd->nchunks = srq->npages;
0177 cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
0178 cmd->attrs.max_wr = init_attr->attr.max_wr;
0179 cmd->attrs.max_sge = init_attr->attr.max_sge;
0180 cmd->attrs.srq_limit = init_attr->attr.srq_limit;
0181 cmd->pdir_dma = srq->pdir.dir_dma;
0182
0183 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
0184 if (ret < 0) {
0185 dev_warn(&dev->pdev->dev,
0186 "could not create shared receive queue, error: %d\n",
0187 ret);
0188 goto err_page_dir;
0189 }
0190
0191 srq->srq_handle = resp->srqn;
0192 srq_resp.srqn = resp->srqn;
0193 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
0194 dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
0195 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
0196
0197
0198 if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
0199 dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
0200 pvrdma_destroy_srq(&srq->ibsrq, udata);
0201 return -EINVAL;
0202 }
0203
0204 return 0;
0205
0206 err_page_dir:
0207 pvrdma_page_dir_cleanup(dev, &srq->pdir);
0208 err_umem:
0209 ib_umem_release(srq->umem);
0210 err_srq:
0211 atomic_dec(&dev->num_srqs);
0212
0213 return ret;
0214 }
0215
0216 static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
0217 {
0218 unsigned long flags;
0219
0220 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
0221 dev->srq_tbl[srq->srq_handle] = NULL;
0222 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
0223
0224 if (refcount_dec_and_test(&srq->refcnt))
0225 complete(&srq->free);
0226 wait_for_completion(&srq->free);
0227
0228
0229 ib_umem_release(srq->umem);
0230
0231 pvrdma_page_dir_cleanup(dev, &srq->pdir);
0232
0233 atomic_dec(&dev->num_srqs);
0234 }
0235
0236
0237
0238
0239
0240
0241
0242
0243 int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
0244 {
0245 struct pvrdma_srq *vsrq = to_vsrq(srq);
0246 union pvrdma_cmd_req req;
0247 struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
0248 struct pvrdma_dev *dev = to_vdev(srq->device);
0249 int ret;
0250
0251 memset(cmd, 0, sizeof(*cmd));
0252 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ;
0253 cmd->srq_handle = vsrq->srq_handle;
0254
0255 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
0256 if (ret < 0)
0257 dev_warn(&dev->pdev->dev,
0258 "destroy shared receive queue failed, error: %d\n",
0259 ret);
0260
0261 pvrdma_free_srq(dev, vsrq);
0262 return 0;
0263 }
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274 int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
0275 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
0276 {
0277 struct pvrdma_srq *vsrq = to_vsrq(ibsrq);
0278 union pvrdma_cmd_req req;
0279 struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
0280 struct pvrdma_dev *dev = to_vdev(ibsrq->device);
0281 int ret;
0282
0283
0284 if (!(attr_mask & IB_SRQ_LIMIT))
0285 return -EINVAL;
0286
0287 memset(cmd, 0, sizeof(*cmd));
0288 cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ;
0289 cmd->srq_handle = vsrq->srq_handle;
0290 cmd->attrs.srq_limit = attr->srq_limit;
0291 cmd->attr_mask = attr_mask;
0292
0293 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
0294 if (ret < 0) {
0295 dev_warn(&dev->pdev->dev,
0296 "could not modify shared receive queue, error: %d\n",
0297 ret);
0298
0299 return -EINVAL;
0300 }
0301
0302 return ret;
0303 }