0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/vmalloc.h>
0035 #include <rdma/ib_umem.h>
0036 #include "hns_roce_device.h"
0037
0038 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
0039 {
0040 struct hns_roce_buf_list *trunks;
0041 u32 i;
0042
0043 if (!buf)
0044 return;
0045
0046 trunks = buf->trunk_list;
0047 if (trunks) {
0048 buf->trunk_list = NULL;
0049 for (i = 0; i < buf->ntrunks; i++)
0050 dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
0051 trunks[i].buf, trunks[i].map);
0052
0053 kfree(trunks);
0054 }
0055
0056 kfree(buf);
0057 }
0058
0059
0060
0061
0062
0063
0064
0065
0066 struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
0067 u32 page_shift, u32 flags)
0068 {
0069 u32 trunk_size, page_size, alloced_size;
0070 struct hns_roce_buf_list *trunks;
0071 struct hns_roce_buf *buf;
0072 gfp_t gfp_flags;
0073 u32 ntrunk, i;
0074
0075
0076 if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
0077 return ERR_PTR(-EINVAL);
0078
0079 gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
0080 buf = kzalloc(sizeof(*buf), gfp_flags);
0081 if (!buf)
0082 return ERR_PTR(-ENOMEM);
0083
0084 buf->page_shift = page_shift;
0085 page_size = 1 << buf->page_shift;
0086
0087
0088 if (flags & HNS_ROCE_BUF_DIRECT) {
0089 buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
0090 ntrunk = 1;
0091 } else {
0092 buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
0093 ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
0094 }
0095
0096 trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
0097 if (!trunks) {
0098 kfree(buf);
0099 return ERR_PTR(-ENOMEM);
0100 }
0101
0102 trunk_size = 1 << buf->trunk_shift;
0103 alloced_size = 0;
0104 for (i = 0; i < ntrunk; i++) {
0105 trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
0106 &trunks[i].map, gfp_flags);
0107 if (!trunks[i].buf)
0108 break;
0109
0110 alloced_size += trunk_size;
0111 }
0112
0113 buf->ntrunks = i;
0114
0115
0116 if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
0117 for (i = 0; i < buf->ntrunks; i++)
0118 dma_free_coherent(hr_dev->dev, trunk_size,
0119 trunks[i].buf, trunks[i].map);
0120
0121 kfree(trunks);
0122 kfree(buf);
0123 return ERR_PTR(-ENOMEM);
0124 }
0125
0126 buf->npages = DIV_ROUND_UP(alloced_size, page_size);
0127 buf->trunk_list = trunks;
0128
0129 return buf;
0130 }
0131
0132 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
0133 int buf_cnt, struct hns_roce_buf *buf,
0134 unsigned int page_shift)
0135 {
0136 unsigned int offset, max_size;
0137 int total = 0;
0138 int i;
0139
0140 if (page_shift > buf->trunk_shift) {
0141 dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n",
0142 page_shift, buf->trunk_shift);
0143 return -EINVAL;
0144 }
0145
0146 offset = 0;
0147 max_size = buf->ntrunks << buf->trunk_shift;
0148 for (i = 0; i < buf_cnt && offset < max_size; i++) {
0149 bufs[total++] = hns_roce_buf_dma_addr(buf, offset);
0150 offset += (1 << page_shift);
0151 }
0152
0153 return total;
0154 }
0155
0156 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
0157 int buf_cnt, struct ib_umem *umem,
0158 unsigned int page_shift)
0159 {
0160 struct ib_block_iter biter;
0161 int total = 0;
0162
0163
0164 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
0165 bufs[total++] = rdma_block_iter_dma_address(&biter);
0166 if (total >= buf_cnt)
0167 goto done;
0168 }
0169
0170 done:
0171 return total;
0172 }
0173
0174 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
0175 {
0176 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
0177 ida_destroy(&hr_dev->xrcd_ida.ida);
0178
0179 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
0180 ida_destroy(&hr_dev->srq_table.srq_ida.ida);
0181 hns_roce_cleanup_qp_table(hr_dev);
0182 hns_roce_cleanup_cq_table(hr_dev);
0183 ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
0184 ida_destroy(&hr_dev->pd_ida.ida);
0185 ida_destroy(&hr_dev->uar_ida.ida);
0186 }