Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2016 Hisilicon Limited.
0003  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
0004  *
0005  * This software is available to you under a choice of one of two
0006  * licenses.  You may choose to be licensed under the terms of the GNU
0007  * General Public License (GPL) Version 2, available from the file
0008  * COPYING in the main directory of this source tree, or the
0009  * OpenIB.org BSD license below:
0010  *
0011  *     Redistribution and use in source and binary forms, with or
0012  *     without modification, are permitted provided that the following
0013  *     conditions are met:
0014  *
0015  *      - Redistributions of source code must retain the above
0016  *        copyright notice, this list of conditions and the following
0017  *        disclaimer.
0018  *
0019  *      - Redistributions in binary form must reproduce the above
0020  *        copyright notice, this list of conditions and the following
0021  *        disclaimer in the documentation and/or other materials
0022  *        provided with the distribution.
0023  *
0024  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0025  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0026  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0027  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0028  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0029  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0030  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0031  * SOFTWARE.
0032  */
0033 
0034 #include <linux/vmalloc.h>
0035 #include <rdma/ib_umem.h>
0036 #include "hns_roce_device.h"
0037 
0038 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
0039 {
0040     struct hns_roce_buf_list *trunks;
0041     u32 i;
0042 
0043     if (!buf)
0044         return;
0045 
0046     trunks = buf->trunk_list;
0047     if (trunks) {
0048         buf->trunk_list = NULL;
0049         for (i = 0; i < buf->ntrunks; i++)
0050             dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
0051                       trunks[i].buf, trunks[i].map);
0052 
0053         kfree(trunks);
0054     }
0055 
0056     kfree(buf);
0057 }
0058 
0059 /*
0060  * Allocate the dma buffer for storing ROCEE table entries
0061  *
0062  * @size: required size
0063  * @page_shift: the unit size in a continuous dma address range
0064  * @flags: HNS_ROCE_BUF_ flags to control the allocation flow.
0065  */
0066 struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
0067                     u32 page_shift, u32 flags)
0068 {
0069     u32 trunk_size, page_size, alloced_size;
0070     struct hns_roce_buf_list *trunks;
0071     struct hns_roce_buf *buf;
0072     gfp_t gfp_flags;
0073     u32 ntrunk, i;
0074 
0075     /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
0076     if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
0077         return ERR_PTR(-EINVAL);
0078 
0079     gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
0080     buf = kzalloc(sizeof(*buf), gfp_flags);
0081     if (!buf)
0082         return ERR_PTR(-ENOMEM);
0083 
0084     buf->page_shift = page_shift;
0085     page_size = 1 << buf->page_shift;
0086 
0087     /* Calc the trunk size and num by required size and page_shift */
0088     if (flags & HNS_ROCE_BUF_DIRECT) {
0089         buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
0090         ntrunk = 1;
0091     } else {
0092         buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
0093         ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
0094     }
0095 
0096     trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
0097     if (!trunks) {
0098         kfree(buf);
0099         return ERR_PTR(-ENOMEM);
0100     }
0101 
0102     trunk_size = 1 << buf->trunk_shift;
0103     alloced_size = 0;
0104     for (i = 0; i < ntrunk; i++) {
0105         trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
0106                            &trunks[i].map, gfp_flags);
0107         if (!trunks[i].buf)
0108             break;
0109 
0110         alloced_size += trunk_size;
0111     }
0112 
0113     buf->ntrunks = i;
0114 
0115     /* In nofail mode, it's only failed when the alloced size is 0 */
0116     if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
0117         for (i = 0; i < buf->ntrunks; i++)
0118             dma_free_coherent(hr_dev->dev, trunk_size,
0119                       trunks[i].buf, trunks[i].map);
0120 
0121         kfree(trunks);
0122         kfree(buf);
0123         return ERR_PTR(-ENOMEM);
0124     }
0125 
0126     buf->npages = DIV_ROUND_UP(alloced_size, page_size);
0127     buf->trunk_list = trunks;
0128 
0129     return buf;
0130 }
0131 
0132 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
0133                int buf_cnt, struct hns_roce_buf *buf,
0134                unsigned int page_shift)
0135 {
0136     unsigned int offset, max_size;
0137     int total = 0;
0138     int i;
0139 
0140     if (page_shift > buf->trunk_shift) {
0141         dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n",
0142             page_shift, buf->trunk_shift);
0143         return -EINVAL;
0144     }
0145 
0146     offset = 0;
0147     max_size = buf->ntrunks << buf->trunk_shift;
0148     for (i = 0; i < buf_cnt && offset < max_size; i++) {
0149         bufs[total++] = hns_roce_buf_dma_addr(buf, offset);
0150         offset += (1 << page_shift);
0151     }
0152 
0153     return total;
0154 }
0155 
0156 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
0157                int buf_cnt, struct ib_umem *umem,
0158                unsigned int page_shift)
0159 {
0160     struct ib_block_iter biter;
0161     int total = 0;
0162 
0163     /* convert system page cnt to hw page cnt */
0164     rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
0165         bufs[total++] = rdma_block_iter_dma_address(&biter);
0166         if (total >= buf_cnt)
0167             goto done;
0168     }
0169 
0170 done:
0171     return total;
0172 }
0173 
0174 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
0175 {
0176     if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
0177         ida_destroy(&hr_dev->xrcd_ida.ida);
0178 
0179     if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
0180         ida_destroy(&hr_dev->srq_table.srq_ida.ida);
0181     hns_roce_cleanup_qp_table(hr_dev);
0182     hns_roce_cleanup_cq_table(hr_dev);
0183     ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
0184     ida_destroy(&hr_dev->pd_ida.ida);
0185     ida_destroy(&hr_dev->uar_ida.ida);
0186 }