0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 #include <linux/errno.h>
0047 #include <linux/slab.h>
0048 #include <linux/bitmap.h>
0049
0050 #include "pvrdma.h"
0051
0052 int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
0053 u64 npages, bool alloc_pages)
0054 {
0055 u64 i;
0056
0057 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES)
0058 return -EINVAL;
0059
0060 memset(pdir, 0, sizeof(*pdir));
0061
0062 pdir->dir = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
0063 &pdir->dir_dma, GFP_KERNEL);
0064 if (!pdir->dir)
0065 goto err;
0066
0067 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1;
0068 pdir->tables = kcalloc(pdir->ntables, sizeof(*pdir->tables),
0069 GFP_KERNEL);
0070 if (!pdir->tables)
0071 goto err;
0072
0073 for (i = 0; i < pdir->ntables; i++) {
0074 pdir->tables[i] = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
0075 (dma_addr_t *)&pdir->dir[i],
0076 GFP_KERNEL);
0077 if (!pdir->tables[i])
0078 goto err;
0079 }
0080
0081 pdir->npages = npages;
0082
0083 if (alloc_pages) {
0084 pdir->pages = kcalloc(npages, sizeof(*pdir->pages),
0085 GFP_KERNEL);
0086 if (!pdir->pages)
0087 goto err;
0088
0089 for (i = 0; i < pdir->npages; i++) {
0090 dma_addr_t page_dma;
0091
0092 pdir->pages[i] = dma_alloc_coherent(&dev->pdev->dev,
0093 PAGE_SIZE,
0094 &page_dma,
0095 GFP_KERNEL);
0096 if (!pdir->pages[i])
0097 goto err;
0098
0099 pvrdma_page_dir_insert_dma(pdir, i, page_dma);
0100 }
0101 }
0102
0103 return 0;
0104
0105 err:
0106 pvrdma_page_dir_cleanup(dev, pdir);
0107
0108 return -ENOMEM;
0109 }
0110
0111 static u64 *pvrdma_page_dir_table(struct pvrdma_page_dir *pdir, u64 idx)
0112 {
0113 return pdir->tables[PVRDMA_PAGE_DIR_TABLE(idx)];
0114 }
0115
0116 dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx)
0117 {
0118 return pvrdma_page_dir_table(pdir, idx)[PVRDMA_PAGE_DIR_PAGE(idx)];
0119 }
0120
0121 static void pvrdma_page_dir_cleanup_pages(struct pvrdma_dev *dev,
0122 struct pvrdma_page_dir *pdir)
0123 {
0124 if (pdir->pages) {
0125 u64 i;
0126
0127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) {
0128 dma_addr_t page_dma = pvrdma_page_dir_get_dma(pdir, i);
0129
0130 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
0131 pdir->pages[i], page_dma);
0132 }
0133
0134 kfree(pdir->pages);
0135 }
0136 }
0137
0138 static void pvrdma_page_dir_cleanup_tables(struct pvrdma_dev *dev,
0139 struct pvrdma_page_dir *pdir)
0140 {
0141 if (pdir->tables) {
0142 int i;
0143
0144 pvrdma_page_dir_cleanup_pages(dev, pdir);
0145
0146 for (i = 0; i < pdir->ntables; i++) {
0147 u64 *table = pdir->tables[i];
0148
0149 if (table)
0150 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
0151 table, pdir->dir[i]);
0152 }
0153
0154 kfree(pdir->tables);
0155 }
0156 }
0157
0158 void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
0159 struct pvrdma_page_dir *pdir)
0160 {
0161 if (pdir->dir) {
0162 pvrdma_page_dir_cleanup_tables(dev, pdir);
0163 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
0164 pdir->dir, pdir->dir_dma);
0165 }
0166 }
0167
0168 int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
0169 dma_addr_t daddr)
0170 {
0171 u64 *table;
0172
0173 if (idx >= pdir->npages)
0174 return -EINVAL;
0175
0176 table = pvrdma_page_dir_table(pdir, idx);
0177 table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr;
0178
0179 return 0;
0180 }
0181
0182 int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
0183 struct ib_umem *umem, u64 offset)
0184 {
0185 struct ib_block_iter biter;
0186 u64 i = offset;
0187 int ret = 0;
0188
0189 if (offset >= pdir->npages)
0190 return -EINVAL;
0191
0192 rdma_umem_for_each_dma_block (umem, &biter, PAGE_SIZE) {
0193 ret = pvrdma_page_dir_insert_dma(
0194 pdir, i, rdma_block_iter_dma_address(&biter));
0195 if (ret)
0196 goto exit;
0197
0198 i++;
0199 }
0200
0201 exit:
0202 return ret;
0203 }
0204
0205 int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
0206 u64 *page_list,
0207 int num_pages)
0208 {
0209 int i;
0210 int ret;
0211
0212 if (num_pages > pdir->npages)
0213 return -EINVAL;
0214
0215 for (i = 0; i < num_pages; i++) {
0216 ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]);
0217 if (ret)
0218 return ret;
0219 }
0220
0221 return 0;
0222 }
0223
0224 void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, const struct pvrdma_qp_cap *src)
0225 {
0226 dst->max_send_wr = src->max_send_wr;
0227 dst->max_recv_wr = src->max_recv_wr;
0228 dst->max_send_sge = src->max_send_sge;
0229 dst->max_recv_sge = src->max_recv_sge;
0230 dst->max_inline_data = src->max_inline_data;
0231 }
0232
0233 void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, const struct ib_qp_cap *src)
0234 {
0235 dst->max_send_wr = src->max_send_wr;
0236 dst->max_recv_wr = src->max_recv_wr;
0237 dst->max_send_sge = src->max_send_sge;
0238 dst->max_recv_sge = src->max_recv_sge;
0239 dst->max_inline_data = src->max_inline_data;
0240 }
0241
0242 void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src)
0243 {
0244 BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
0245 memcpy(dst, src, sizeof(*src));
0246 }
0247
0248 void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src)
0249 {
0250 BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
0251 memcpy(dst, src, sizeof(*src));
0252 }
0253
0254 void pvrdma_global_route_to_ib(struct ib_global_route *dst,
0255 const struct pvrdma_global_route *src)
0256 {
0257 pvrdma_gid_to_ib(&dst->dgid, &src->dgid);
0258 dst->flow_label = src->flow_label;
0259 dst->sgid_index = src->sgid_index;
0260 dst->hop_limit = src->hop_limit;
0261 dst->traffic_class = src->traffic_class;
0262 }
0263
0264 void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
0265 const struct ib_global_route *src)
0266 {
0267 ib_gid_to_pvrdma(&dst->dgid, &src->dgid);
0268 dst->flow_label = src->flow_label;
0269 dst->sgid_index = src->sgid_index;
0270 dst->hop_limit = src->hop_limit;
0271 dst->traffic_class = src->traffic_class;
0272 }
0273
0274 void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
0275 const struct pvrdma_ah_attr *src)
0276 {
0277 dst->type = RDMA_AH_ATTR_TYPE_ROCE;
0278 pvrdma_global_route_to_ib(rdma_ah_retrieve_grh(dst), &src->grh);
0279 rdma_ah_set_dlid(dst, src->dlid);
0280 rdma_ah_set_sl(dst, src->sl);
0281 rdma_ah_set_path_bits(dst, src->src_path_bits);
0282 rdma_ah_set_static_rate(dst, src->static_rate);
0283 rdma_ah_set_ah_flags(dst, src->ah_flags);
0284 rdma_ah_set_port_num(dst, src->port_num);
0285 memcpy(dst->roce.dmac, &src->dmac, ETH_ALEN);
0286 }
0287
0288 void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
0289 const struct rdma_ah_attr *src)
0290 {
0291 ib_global_route_to_pvrdma(&dst->grh, rdma_ah_read_grh(src));
0292 dst->dlid = rdma_ah_get_dlid(src);
0293 dst->sl = rdma_ah_get_sl(src);
0294 dst->src_path_bits = rdma_ah_get_path_bits(src);
0295 dst->static_rate = rdma_ah_get_static_rate(src);
0296 dst->ah_flags = rdma_ah_get_ah_flags(src);
0297 dst->port_num = rdma_ah_get_port_num(src);
0298 memcpy(&dst->dmac, src->roce.dmac, sizeof(dst->dmac));
0299 }
0300
0301 u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type)
0302 {
0303 return (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
0304 PVRDMA_GID_TYPE_FLAG_ROCE_V2 :
0305 PVRDMA_GID_TYPE_FLAG_ROCE_V1;
0306 }