0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 #include <linux/list.h>
0047 #include <linux/slab.h>
0048
0049 #include "pvrdma.h"
0050
0051
0052
0053
0054
0055
0056
0057
0058 struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
0059 {
0060 struct pvrdma_dev *dev = to_vdev(pd->device);
0061 struct pvrdma_user_mr *mr;
0062 union pvrdma_cmd_req req;
0063 union pvrdma_cmd_resp rsp;
0064 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
0065 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
0066 int ret;
0067
0068
0069 if (acc & ~IB_ACCESS_LOCAL_WRITE) {
0070 dev_warn(&dev->pdev->dev,
0071 "unsupported dma mr access flags %#x\n", acc);
0072 return ERR_PTR(-EOPNOTSUPP);
0073 }
0074
0075 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0076 if (!mr)
0077 return ERR_PTR(-ENOMEM);
0078
0079 memset(cmd, 0, sizeof(*cmd));
0080 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
0081 cmd->pd_handle = to_vpd(pd)->pd_handle;
0082 cmd->access_flags = acc;
0083 cmd->flags = PVRDMA_MR_FLAG_DMA;
0084
0085 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
0086 if (ret < 0) {
0087 dev_warn(&dev->pdev->dev,
0088 "could not get DMA mem region, error: %d\n", ret);
0089 kfree(mr);
0090 return ERR_PTR(ret);
0091 }
0092
0093 mr->mmr.mr_handle = resp->mr_handle;
0094 mr->ibmr.lkey = resp->lkey;
0095 mr->ibmr.rkey = resp->rkey;
0096
0097 return &mr->ibmr;
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111 struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
0112 u64 virt_addr, int access_flags,
0113 struct ib_udata *udata)
0114 {
0115 struct pvrdma_dev *dev = to_vdev(pd->device);
0116 struct pvrdma_user_mr *mr = NULL;
0117 struct ib_umem *umem;
0118 union pvrdma_cmd_req req;
0119 union pvrdma_cmd_resp rsp;
0120 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
0121 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
0122 int ret, npages;
0123
0124 if (length == 0 || length > dev->dsr->caps.max_mr_size) {
0125 dev_warn(&dev->pdev->dev, "invalid mem region length\n");
0126 return ERR_PTR(-EINVAL);
0127 }
0128
0129 umem = ib_umem_get(pd->device, start, length, access_flags);
0130 if (IS_ERR(umem)) {
0131 dev_warn(&dev->pdev->dev,
0132 "could not get umem for mem region\n");
0133 return ERR_CAST(umem);
0134 }
0135
0136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
0137 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
0138 dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
0139 npages);
0140 ret = -EINVAL;
0141 goto err_umem;
0142 }
0143
0144 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0145 if (!mr) {
0146 ret = -ENOMEM;
0147 goto err_umem;
0148 }
0149
0150 mr->mmr.iova = virt_addr;
0151 mr->mmr.size = length;
0152 mr->umem = umem;
0153
0154 ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false);
0155 if (ret) {
0156 dev_warn(&dev->pdev->dev,
0157 "could not allocate page directory\n");
0158 goto err_umem;
0159 }
0160
0161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
0162 if (ret)
0163 goto err_pdir;
0164
0165 memset(cmd, 0, sizeof(*cmd));
0166 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
0167 cmd->start = start;
0168 cmd->length = length;
0169 cmd->pd_handle = to_vpd(pd)->pd_handle;
0170 cmd->access_flags = access_flags;
0171 cmd->nchunks = npages;
0172 cmd->pdir_dma = mr->pdir.dir_dma;
0173
0174 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
0175 if (ret < 0) {
0176 dev_warn(&dev->pdev->dev,
0177 "could not register mem region, error: %d\n", ret);
0178 goto err_pdir;
0179 }
0180
0181 mr->mmr.mr_handle = resp->mr_handle;
0182 mr->ibmr.lkey = resp->lkey;
0183 mr->ibmr.rkey = resp->rkey;
0184
0185 return &mr->ibmr;
0186
0187 err_pdir:
0188 pvrdma_page_dir_cleanup(dev, &mr->pdir);
0189 err_umem:
0190 ib_umem_release(umem);
0191 kfree(mr);
0192
0193 return ERR_PTR(ret);
0194 }
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
0205 u32 max_num_sg)
0206 {
0207 struct pvrdma_dev *dev = to_vdev(pd->device);
0208 struct pvrdma_user_mr *mr;
0209 union pvrdma_cmd_req req;
0210 union pvrdma_cmd_resp rsp;
0211 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
0212 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
0213 int size = max_num_sg * sizeof(u64);
0214 int ret;
0215
0216 if (mr_type != IB_MR_TYPE_MEM_REG ||
0217 max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
0218 return ERR_PTR(-EINVAL);
0219
0220 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0221 if (!mr)
0222 return ERR_PTR(-ENOMEM);
0223
0224 mr->pages = kzalloc(size, GFP_KERNEL);
0225 if (!mr->pages) {
0226 ret = -ENOMEM;
0227 goto freemr;
0228 }
0229
0230 ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
0231 if (ret) {
0232 dev_warn(&dev->pdev->dev,
0233 "failed to allocate page dir for mr\n");
0234 ret = -ENOMEM;
0235 goto freepages;
0236 }
0237
0238 memset(cmd, 0, sizeof(*cmd));
0239 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
0240 cmd->pd_handle = to_vpd(pd)->pd_handle;
0241 cmd->access_flags = 0;
0242 cmd->flags = PVRDMA_MR_FLAG_FRMR;
0243 cmd->nchunks = max_num_sg;
0244
0245 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
0246 if (ret < 0) {
0247 dev_warn(&dev->pdev->dev,
0248 "could not create FR mem region, error: %d\n", ret);
0249 goto freepdir;
0250 }
0251
0252 mr->max_pages = max_num_sg;
0253 mr->mmr.mr_handle = resp->mr_handle;
0254 mr->ibmr.lkey = resp->lkey;
0255 mr->ibmr.rkey = resp->rkey;
0256 mr->page_shift = PAGE_SHIFT;
0257 mr->umem = NULL;
0258
0259 return &mr->ibmr;
0260
0261 freepdir:
0262 pvrdma_page_dir_cleanup(dev, &mr->pdir);
0263 freepages:
0264 kfree(mr->pages);
0265 freemr:
0266 kfree(mr);
0267 return ERR_PTR(ret);
0268 }
0269
0270
0271
0272
0273
0274
0275
0276
0277 int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
0278 {
0279 struct pvrdma_user_mr *mr = to_vmr(ibmr);
0280 struct pvrdma_dev *dev = to_vdev(ibmr->device);
0281 union pvrdma_cmd_req req;
0282 struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
0283 int ret;
0284
0285 memset(cmd, 0, sizeof(*cmd));
0286 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
0287 cmd->mr_handle = mr->mmr.mr_handle;
0288 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
0289 if (ret < 0)
0290 dev_warn(&dev->pdev->dev,
0291 "could not deregister mem region, error: %d\n", ret);
0292
0293 pvrdma_page_dir_cleanup(dev, &mr->pdir);
0294 ib_umem_release(mr->umem);
0295
0296 kfree(mr->pages);
0297 kfree(mr);
0298
0299 return 0;
0300 }
0301
0302 static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
0303 {
0304 struct pvrdma_user_mr *mr = to_vmr(ibmr);
0305
0306 if (mr->npages == mr->max_pages)
0307 return -ENOMEM;
0308
0309 mr->pages[mr->npages++] = addr;
0310 return 0;
0311 }
0312
0313 int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
0314 unsigned int *sg_offset)
0315 {
0316 struct pvrdma_user_mr *mr = to_vmr(ibmr);
0317 struct pvrdma_dev *dev = to_vdev(ibmr->device);
0318 int ret;
0319
0320 mr->npages = 0;
0321
0322 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
0323 if (ret < 0)
0324 dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
0325
0326 return ret;
0327 }