Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
0003  *
0004  * This program is free software; you can redistribute it and/or
0005  * modify it under the terms of EITHER the GNU General Public License
0006  * version 2 as published by the Free Software Foundation or the BSD
0007  * 2-Clause License. This program is distributed in the hope that it
0008  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
0009  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
0010  * See the GNU General Public License version 2 for more details at
0011  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
0012  *
0013  * You should have received a copy of the GNU General Public License
0014  * along with this program available in the file COPYING in the main
0015  * directory of this source tree.
0016  *
0017  * The BSD 2-Clause License
0018  *
0019  *     Redistribution and use in source and binary forms, with or
0020  *     without modification, are permitted provided that the following
0021  *     conditions are met:
0022  *
0023  *      - Redistributions of source code must retain the above
0024  *        copyright notice, this list of conditions and the following
0025  *        disclaimer.
0026  *
0027  *      - Redistributions in binary form must reproduce the above
0028  *        copyright notice, this list of conditions and the following
0029  *        disclaimer in the documentation and/or other materials
0030  *        provided with the distribution.
0031  *
0032  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
0033  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
0034  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
0035  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
0036  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
0037  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
0038  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
0039  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
0040  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
0041  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0042  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
0043  * OF THE POSSIBILITY OF SUCH DAMAGE.
0044  */
0045 
0046 #include <linux/list.h>
0047 #include <linux/slab.h>
0048 
0049 #include "pvrdma.h"
0050 
0051 /**
0052  * pvrdma_get_dma_mr - get a DMA memory region
0053  * @pd: protection domain
0054  * @acc: access flags
0055  *
0056  * @return: ib_mr pointer on success, otherwise returns an errno.
0057  */
0058 struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
0059 {
0060     struct pvrdma_dev *dev = to_vdev(pd->device);
0061     struct pvrdma_user_mr *mr;
0062     union pvrdma_cmd_req req;
0063     union pvrdma_cmd_resp rsp;
0064     struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
0065     struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
0066     int ret;
0067 
0068     /* Support only LOCAL_WRITE flag for DMA MRs */
0069     if (acc & ~IB_ACCESS_LOCAL_WRITE) {
0070         dev_warn(&dev->pdev->dev,
0071              "unsupported dma mr access flags %#x\n", acc);
0072         return ERR_PTR(-EOPNOTSUPP);
0073     }
0074 
0075     mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0076     if (!mr)
0077         return ERR_PTR(-ENOMEM);
0078 
0079     memset(cmd, 0, sizeof(*cmd));
0080     cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
0081     cmd->pd_handle = to_vpd(pd)->pd_handle;
0082     cmd->access_flags = acc;
0083     cmd->flags = PVRDMA_MR_FLAG_DMA;
0084 
0085     ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
0086     if (ret < 0) {
0087         dev_warn(&dev->pdev->dev,
0088              "could not get DMA mem region, error: %d\n", ret);
0089         kfree(mr);
0090         return ERR_PTR(ret);
0091     }
0092 
0093     mr->mmr.mr_handle = resp->mr_handle;
0094     mr->ibmr.lkey = resp->lkey;
0095     mr->ibmr.rkey = resp->rkey;
0096 
0097     return &mr->ibmr;
0098 }
0099 
0100 /**
0101  * pvrdma_reg_user_mr - register a userspace memory region
0102  * @pd: protection domain
0103  * @start: starting address
0104  * @length: length of region
0105  * @virt_addr: I/O virtual address
0106  * @access_flags: access flags for memory region
0107  * @udata: user data
0108  *
0109  * @return: ib_mr pointer on success, otherwise returns an errno.
0110  */
0111 struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
0112                  u64 virt_addr, int access_flags,
0113                  struct ib_udata *udata)
0114 {
0115     struct pvrdma_dev *dev = to_vdev(pd->device);
0116     struct pvrdma_user_mr *mr = NULL;
0117     struct ib_umem *umem;
0118     union pvrdma_cmd_req req;
0119     union pvrdma_cmd_resp rsp;
0120     struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
0121     struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
0122     int ret, npages;
0123 
0124     if (length == 0 || length > dev->dsr->caps.max_mr_size) {
0125         dev_warn(&dev->pdev->dev, "invalid mem region length\n");
0126         return ERR_PTR(-EINVAL);
0127     }
0128 
0129     umem = ib_umem_get(pd->device, start, length, access_flags);
0130     if (IS_ERR(umem)) {
0131         dev_warn(&dev->pdev->dev,
0132              "could not get umem for mem region\n");
0133         return ERR_CAST(umem);
0134     }
0135 
0136     npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
0137     if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
0138         dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
0139              npages);
0140         ret = -EINVAL;
0141         goto err_umem;
0142     }
0143 
0144     mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0145     if (!mr) {
0146         ret = -ENOMEM;
0147         goto err_umem;
0148     }
0149 
0150     mr->mmr.iova = virt_addr;
0151     mr->mmr.size = length;
0152     mr->umem = umem;
0153 
0154     ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false);
0155     if (ret) {
0156         dev_warn(&dev->pdev->dev,
0157              "could not allocate page directory\n");
0158         goto err_umem;
0159     }
0160 
0161     ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
0162     if (ret)
0163         goto err_pdir;
0164 
0165     memset(cmd, 0, sizeof(*cmd));
0166     cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
0167     cmd->start = start;
0168     cmd->length = length;
0169     cmd->pd_handle = to_vpd(pd)->pd_handle;
0170     cmd->access_flags = access_flags;
0171     cmd->nchunks = npages;
0172     cmd->pdir_dma = mr->pdir.dir_dma;
0173 
0174     ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
0175     if (ret < 0) {
0176         dev_warn(&dev->pdev->dev,
0177              "could not register mem region, error: %d\n", ret);
0178         goto err_pdir;
0179     }
0180 
0181     mr->mmr.mr_handle = resp->mr_handle;
0182     mr->ibmr.lkey = resp->lkey;
0183     mr->ibmr.rkey = resp->rkey;
0184 
0185     return &mr->ibmr;
0186 
0187 err_pdir:
0188     pvrdma_page_dir_cleanup(dev, &mr->pdir);
0189 err_umem:
0190     ib_umem_release(umem);
0191     kfree(mr);
0192 
0193     return ERR_PTR(ret);
0194 }
0195 
0196 /**
0197  * pvrdma_alloc_mr - allocate a memory region
0198  * @pd: protection domain
0199  * @mr_type: type of memory region
0200  * @max_num_sg: maximum number of pages
0201  *
0202  * @return: ib_mr pointer on success, otherwise returns an errno.
0203  */
0204 struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
0205                   u32 max_num_sg)
0206 {
0207     struct pvrdma_dev *dev = to_vdev(pd->device);
0208     struct pvrdma_user_mr *mr;
0209     union pvrdma_cmd_req req;
0210     union pvrdma_cmd_resp rsp;
0211     struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
0212     struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
0213     int size = max_num_sg * sizeof(u64);
0214     int ret;
0215 
0216     if (mr_type != IB_MR_TYPE_MEM_REG ||
0217         max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
0218         return ERR_PTR(-EINVAL);
0219 
0220     mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0221     if (!mr)
0222         return ERR_PTR(-ENOMEM);
0223 
0224     mr->pages = kzalloc(size, GFP_KERNEL);
0225     if (!mr->pages) {
0226         ret = -ENOMEM;
0227         goto freemr;
0228     }
0229 
0230     ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
0231     if (ret) {
0232         dev_warn(&dev->pdev->dev,
0233              "failed to allocate page dir for mr\n");
0234         ret = -ENOMEM;
0235         goto freepages;
0236     }
0237 
0238     memset(cmd, 0, sizeof(*cmd));
0239     cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
0240     cmd->pd_handle = to_vpd(pd)->pd_handle;
0241     cmd->access_flags = 0;
0242     cmd->flags = PVRDMA_MR_FLAG_FRMR;
0243     cmd->nchunks = max_num_sg;
0244 
0245     ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
0246     if (ret < 0) {
0247         dev_warn(&dev->pdev->dev,
0248              "could not create FR mem region, error: %d\n", ret);
0249         goto freepdir;
0250     }
0251 
0252     mr->max_pages = max_num_sg;
0253     mr->mmr.mr_handle = resp->mr_handle;
0254     mr->ibmr.lkey = resp->lkey;
0255     mr->ibmr.rkey = resp->rkey;
0256     mr->page_shift = PAGE_SHIFT;
0257     mr->umem = NULL;
0258 
0259     return &mr->ibmr;
0260 
0261 freepdir:
0262     pvrdma_page_dir_cleanup(dev, &mr->pdir);
0263 freepages:
0264     kfree(mr->pages);
0265 freemr:
0266     kfree(mr);
0267     return ERR_PTR(ret);
0268 }
0269 
0270 /**
0271  * pvrdma_dereg_mr - deregister a memory region
0272  * @ibmr: memory region
0273  * @udata: pointer to user data
0274  *
0275  * @return: 0 on success.
0276  */
0277 int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
0278 {
0279     struct pvrdma_user_mr *mr = to_vmr(ibmr);
0280     struct pvrdma_dev *dev = to_vdev(ibmr->device);
0281     union pvrdma_cmd_req req;
0282     struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
0283     int ret;
0284 
0285     memset(cmd, 0, sizeof(*cmd));
0286     cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
0287     cmd->mr_handle = mr->mmr.mr_handle;
0288     ret = pvrdma_cmd_post(dev, &req, NULL, 0);
0289     if (ret < 0)
0290         dev_warn(&dev->pdev->dev,
0291              "could not deregister mem region, error: %d\n", ret);
0292 
0293     pvrdma_page_dir_cleanup(dev, &mr->pdir);
0294     ib_umem_release(mr->umem);
0295 
0296     kfree(mr->pages);
0297     kfree(mr);
0298 
0299     return 0;
0300 }
0301 
0302 static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
0303 {
0304     struct pvrdma_user_mr *mr = to_vmr(ibmr);
0305 
0306     if (mr->npages == mr->max_pages)
0307         return -ENOMEM;
0308 
0309     mr->pages[mr->npages++] = addr;
0310     return 0;
0311 }
0312 
0313 int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
0314              unsigned int *sg_offset)
0315 {
0316     struct pvrdma_user_mr *mr = to_vmr(ibmr);
0317     struct pvrdma_dev *dev = to_vdev(ibmr->device);
0318     int ret;
0319 
0320     mr->npages = 0;
0321 
0322     ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
0323     if (ret < 0)
0324         dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
0325 
0326     return ret;
0327 }