0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/vmalloc.h>
0035 #include <rdma/ib_umem.h>
0036 #include "hns_roce_device.h"
0037 #include "hns_roce_cmd.h"
0038 #include "hns_roce_hem.h"
0039
0040 static u32 hw_index_to_key(int ind)
0041 {
0042 return ((u32)ind >> 24) | ((u32)ind << 8);
0043 }
0044
0045 unsigned long key_to_hw_index(u32 key)
0046 {
0047 return (key << 24) | (key >> 8);
0048 }
0049
0050 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
0051 {
0052 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
0053 struct ib_device *ibdev = &hr_dev->ib_dev;
0054 int err;
0055 int id;
0056
0057
0058 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
0059 GFP_KERNEL);
0060 if (id < 0) {
0061 ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
0062 return -ENOMEM;
0063 }
0064
0065 mr->key = hw_index_to_key(id);
0066
0067 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
0068 (unsigned long)id);
0069 if (err) {
0070 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
0071 goto err_free_bitmap;
0072 }
0073
0074 return 0;
0075 err_free_bitmap:
0076 ida_free(&mtpt_ida->ida, id);
0077 return err;
0078 }
0079
0080 static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
0081 {
0082 unsigned long obj = key_to_hw_index(mr->key);
0083
0084 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
0085 ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
0086 }
0087
0088 static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
0089 struct ib_udata *udata, u64 start)
0090 {
0091 struct ib_device *ibdev = &hr_dev->ib_dev;
0092 bool is_fast = mr->type == MR_TYPE_FRMR;
0093 struct hns_roce_buf_attr buf_attr = {};
0094 int err;
0095
0096 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
0097 buf_attr.page_shift = is_fast ? PAGE_SHIFT :
0098 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
0099 buf_attr.region[0].size = mr->size;
0100 buf_attr.region[0].hopnum = mr->pbl_hop_num;
0101 buf_attr.region_count = 1;
0102 buf_attr.user_access = mr->access;
0103
0104 buf_attr.mtt_only = is_fast;
0105
0106 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
0107 hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
0108 udata, start);
0109 if (err)
0110 ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
0111 else
0112 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
0113
0114 return err;
0115 }
0116
0117 static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
0118 {
0119 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
0120 }
0121
0122 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
0123 {
0124 struct ib_device *ibdev = &hr_dev->ib_dev;
0125 int ret;
0126
0127 if (mr->enabled) {
0128 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
0129 key_to_hw_index(mr->key) &
0130 (hr_dev->caps.num_mtpts - 1));
0131 if (ret)
0132 ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
0133 ret);
0134 }
0135
0136 free_mr_pbl(hr_dev, mr);
0137 free_mr_key(hr_dev, mr);
0138 }
0139
0140 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
0141 struct hns_roce_mr *mr)
0142 {
0143 unsigned long mtpt_idx = key_to_hw_index(mr->key);
0144 struct hns_roce_cmd_mailbox *mailbox;
0145 struct device *dev = hr_dev->dev;
0146 int ret;
0147
0148
0149 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
0150 if (IS_ERR(mailbox))
0151 return PTR_ERR(mailbox);
0152
0153 if (mr->type != MR_TYPE_FRMR)
0154 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
0155 else
0156 ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
0157 if (ret) {
0158 dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
0159 goto err_page;
0160 }
0161
0162 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
0163 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
0164 if (ret) {
0165 dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
0166 goto err_page;
0167 }
0168
0169 mr->enabled = 1;
0170
0171 err_page:
0172 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
0173
0174 return ret;
0175 }
0176
0177 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
0178 {
0179 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
0180
0181 ida_init(&mtpt_ida->ida);
0182 mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
0183 mtpt_ida->min = hr_dev->caps.reserved_mrws;
0184 }
0185
0186 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
0187 {
0188 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
0189 struct hns_roce_mr *mr;
0190 int ret;
0191
0192 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0193 if (mr == NULL)
0194 return ERR_PTR(-ENOMEM);
0195
0196 mr->type = MR_TYPE_DMA;
0197 mr->pd = to_hr_pd(pd)->pdn;
0198 mr->access = acc;
0199
0200
0201 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
0202 ret = alloc_mr_key(hr_dev, mr);
0203 if (ret)
0204 goto err_free;
0205
0206 ret = hns_roce_mr_enable(hr_dev, mr);
0207 if (ret)
0208 goto err_mr;
0209
0210 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
0211
0212 return &mr->ibmr;
0213 err_mr:
0214 free_mr_key(hr_dev, mr);
0215
0216 err_free:
0217 kfree(mr);
0218 return ERR_PTR(ret);
0219 }
0220
0221 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
0222 u64 virt_addr, int access_flags,
0223 struct ib_udata *udata)
0224 {
0225 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
0226 struct hns_roce_mr *mr;
0227 int ret;
0228
0229 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0230 if (!mr)
0231 return ERR_PTR(-ENOMEM);
0232
0233 mr->iova = virt_addr;
0234 mr->size = length;
0235 mr->pd = to_hr_pd(pd)->pdn;
0236 mr->access = access_flags;
0237 mr->type = MR_TYPE_MR;
0238
0239 ret = alloc_mr_key(hr_dev, mr);
0240 if (ret)
0241 goto err_alloc_mr;
0242
0243 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
0244 if (ret)
0245 goto err_alloc_key;
0246
0247 ret = hns_roce_mr_enable(hr_dev, mr);
0248 if (ret)
0249 goto err_alloc_pbl;
0250
0251 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
0252 mr->ibmr.length = length;
0253
0254 return &mr->ibmr;
0255
0256 err_alloc_pbl:
0257 free_mr_pbl(hr_dev, mr);
0258 err_alloc_key:
0259 free_mr_key(hr_dev, mr);
0260 err_alloc_mr:
0261 kfree(mr);
0262 return ERR_PTR(ret);
0263 }
0264
0265 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
0266 u64 length, u64 virt_addr,
0267 int mr_access_flags, struct ib_pd *pd,
0268 struct ib_udata *udata)
0269 {
0270 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
0271 struct ib_device *ib_dev = &hr_dev->ib_dev;
0272 struct hns_roce_mr *mr = to_hr_mr(ibmr);
0273 struct hns_roce_cmd_mailbox *mailbox;
0274 unsigned long mtpt_idx;
0275 int ret;
0276
0277 if (!mr->enabled)
0278 return ERR_PTR(-EINVAL);
0279
0280 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
0281 if (IS_ERR(mailbox))
0282 return ERR_CAST(mailbox);
0283
0284 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
0285
0286 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
0287 mtpt_idx);
0288 if (ret)
0289 goto free_cmd_mbox;
0290
0291 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
0292 mtpt_idx);
0293 if (ret)
0294 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
0295
0296 mr->enabled = 0;
0297 mr->iova = virt_addr;
0298 mr->size = length;
0299
0300 if (flags & IB_MR_REREG_PD)
0301 mr->pd = to_hr_pd(pd)->pdn;
0302
0303 if (flags & IB_MR_REREG_ACCESS)
0304 mr->access = mr_access_flags;
0305
0306 if (flags & IB_MR_REREG_TRANS) {
0307 free_mr_pbl(hr_dev, mr);
0308 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
0309 if (ret) {
0310 ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
0311 ret);
0312 goto free_cmd_mbox;
0313 }
0314 }
0315
0316 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
0317 if (ret) {
0318 ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
0319 goto free_cmd_mbox;
0320 }
0321
0322 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
0323 mtpt_idx);
0324 if (ret) {
0325 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
0326 goto free_cmd_mbox;
0327 }
0328
0329 mr->enabled = 1;
0330
0331 free_cmd_mbox:
0332 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
0333
0334 if (ret)
0335 return ERR_PTR(ret);
0336 return NULL;
0337 }
0338
0339 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
0340 {
0341 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
0342 struct hns_roce_mr *mr = to_hr_mr(ibmr);
0343
0344 if (hr_dev->hw->dereg_mr)
0345 hr_dev->hw->dereg_mr(hr_dev);
0346
0347 hns_roce_mr_free(hr_dev, mr);
0348 kfree(mr);
0349
0350 return 0;
0351 }
0352
0353 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
0354 u32 max_num_sg)
0355 {
0356 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
0357 struct device *dev = hr_dev->dev;
0358 struct hns_roce_mr *mr;
0359 int ret;
0360
0361 if (mr_type != IB_MR_TYPE_MEM_REG)
0362 return ERR_PTR(-EINVAL);
0363
0364 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
0365 dev_err(dev, "max_num_sg larger than %d\n",
0366 HNS_ROCE_FRMR_MAX_PA);
0367 return ERR_PTR(-EINVAL);
0368 }
0369
0370 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
0371 if (!mr)
0372 return ERR_PTR(-ENOMEM);
0373
0374 mr->type = MR_TYPE_FRMR;
0375 mr->pd = to_hr_pd(pd)->pdn;
0376 mr->size = max_num_sg * (1 << PAGE_SHIFT);
0377
0378
0379 ret = alloc_mr_key(hr_dev, mr);
0380 if (ret)
0381 goto err_free;
0382
0383 ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
0384 if (ret)
0385 goto err_key;
0386
0387 ret = hns_roce_mr_enable(hr_dev, mr);
0388 if (ret)
0389 goto err_pbl;
0390
0391 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
0392 mr->ibmr.length = mr->size;
0393
0394 return &mr->ibmr;
0395
0396 err_key:
0397 free_mr_key(hr_dev, mr);
0398 err_pbl:
0399 free_mr_pbl(hr_dev, mr);
0400 err_free:
0401 kfree(mr);
0402 return ERR_PTR(ret);
0403 }
0404
0405 static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
0406 {
0407 struct hns_roce_mr *mr = to_hr_mr(ibmr);
0408
0409 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
0410 mr->page_list[mr->npages++] = addr;
0411 return 0;
0412 }
0413
0414 return -ENOBUFS;
0415 }
0416
0417 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
0418 unsigned int *sg_offset)
0419 {
0420 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
0421 struct ib_device *ibdev = &hr_dev->ib_dev;
0422 struct hns_roce_mr *mr = to_hr_mr(ibmr);
0423 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
0424 int ret = 0;
0425
0426 mr->npages = 0;
0427 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
0428 sizeof(dma_addr_t), GFP_KERNEL);
0429 if (!mr->page_list)
0430 return ret;
0431
0432 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
0433 if (ret < 1) {
0434 ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
0435 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
0436 goto err_page_list;
0437 }
0438
0439 mtr->hem_cfg.region[0].offset = 0;
0440 mtr->hem_cfg.region[0].count = mr->npages;
0441 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
0442 mtr->hem_cfg.region_count = 1;
0443 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
0444 if (ret) {
0445 ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
0446 ret = 0;
0447 } else {
0448 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
0449 ret = mr->npages;
0450 }
0451
0452 err_page_list:
0453 kvfree(mr->page_list);
0454 mr->page_list = NULL;
0455
0456 return ret;
0457 }
0458
0459 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
0460 struct hns_roce_mw *mw)
0461 {
0462 struct device *dev = hr_dev->dev;
0463 int ret;
0464
0465 if (mw->enabled) {
0466 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
0467 key_to_hw_index(mw->rkey) &
0468 (hr_dev->caps.num_mtpts - 1));
0469 if (ret)
0470 dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
0471
0472 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
0473 key_to_hw_index(mw->rkey));
0474 }
0475
0476 ida_free(&hr_dev->mr_table.mtpt_ida.ida,
0477 (int)key_to_hw_index(mw->rkey));
0478 }
0479
0480 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
0481 struct hns_roce_mw *mw)
0482 {
0483 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
0484 struct hns_roce_cmd_mailbox *mailbox;
0485 struct device *dev = hr_dev->dev;
0486 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
0487 int ret;
0488
0489
0490 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
0491 if (ret)
0492 return ret;
0493
0494 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
0495 if (IS_ERR(mailbox)) {
0496 ret = PTR_ERR(mailbox);
0497 goto err_table;
0498 }
0499
0500 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
0501 if (ret) {
0502 dev_err(dev, "MW write mtpt fail!\n");
0503 goto err_page;
0504 }
0505
0506 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
0507 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
0508 if (ret) {
0509 dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
0510 goto err_page;
0511 }
0512
0513 mw->enabled = 1;
0514
0515 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
0516
0517 return 0;
0518
0519 err_page:
0520 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
0521
0522 err_table:
0523 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
0524
0525 return ret;
0526 }
0527
0528 int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
0529 {
0530 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
0531 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
0532 struct ib_device *ibdev = &hr_dev->ib_dev;
0533 struct hns_roce_mw *mw = to_hr_mw(ibmw);
0534 int ret;
0535 int id;
0536
0537
0538 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
0539 GFP_KERNEL);
0540 if (id < 0) {
0541 ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
0542 return -ENOMEM;
0543 }
0544
0545 mw->rkey = hw_index_to_key(id);
0546
0547 ibmw->rkey = mw->rkey;
0548 mw->pdn = to_hr_pd(ibmw->pd)->pdn;
0549 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
0550 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
0551 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
0552
0553 ret = hns_roce_mw_enable(hr_dev, mw);
0554 if (ret)
0555 goto err_mw;
0556
0557 return 0;
0558
0559 err_mw:
0560 hns_roce_mw_free(hr_dev, mw);
0561 return ret;
0562 }
0563
0564 int hns_roce_dealloc_mw(struct ib_mw *ibmw)
0565 {
0566 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
0567 struct hns_roce_mw *mw = to_hr_mw(ibmw);
0568
0569 hns_roce_mw_free(hr_dev, mw);
0570 return 0;
0571 }
0572
0573 static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
0574 struct hns_roce_buf_region *region, dma_addr_t *pages,
0575 int max_count)
0576 {
0577 int count, npage;
0578 int offset, end;
0579 __le64 *mtts;
0580 u64 addr;
0581 int i;
0582
0583 offset = region->offset;
0584 end = offset + region->count;
0585 npage = 0;
0586 while (offset < end && npage < max_count) {
0587 count = 0;
0588 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
0589 offset, &count, NULL);
0590 if (!mtts)
0591 return -ENOBUFS;
0592
0593 for (i = 0; i < count && npage < max_count; i++) {
0594 addr = pages[npage];
0595
0596 mtts[i] = cpu_to_le64(addr);
0597 npage++;
0598 }
0599 offset += count;
0600 }
0601
0602 return npage;
0603 }
0604
0605 static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
0606 {
0607 int i;
0608
0609 for (i = 0; i < attr->region_count; i++)
0610 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
0611 attr->region[i].hopnum > 0)
0612 return true;
0613
0614
0615
0616
0617
0618 return false;
0619 }
0620
0621 static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
0622 {
0623 size_t size = 0;
0624 int i;
0625
0626 for (i = 0; i < attr->region_count; i++)
0627 size += attr->region[i].size;
0628
0629 return size;
0630 }
0631
0632
0633
0634
0635
0636 static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
0637 unsigned int page_shift)
0638 {
0639 size_t page_size = 1 << page_shift;
0640 int i;
0641
0642 for (i = 1; i < page_count; i++)
0643 if (pages[i] - pages[i - 1] != page_size)
0644 return i;
0645
0646 return 0;
0647 }
0648
0649 static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
0650 {
0651
0652 if (mtr->umem) {
0653 ib_umem_release(mtr->umem);
0654 mtr->umem = NULL;
0655 }
0656
0657
0658 if (mtr->kmem) {
0659 hns_roce_buf_free(hr_dev, mtr->kmem);
0660 mtr->kmem = NULL;
0661 }
0662 }
0663
0664 static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
0665 struct hns_roce_buf_attr *buf_attr,
0666 struct ib_udata *udata, unsigned long user_addr)
0667 {
0668 struct ib_device *ibdev = &hr_dev->ib_dev;
0669 size_t total_size;
0670
0671 total_size = mtr_bufs_size(buf_attr);
0672
0673 if (udata) {
0674 mtr->kmem = NULL;
0675 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
0676 buf_attr->user_access);
0677 if (IS_ERR_OR_NULL(mtr->umem)) {
0678 ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
0679 PTR_ERR(mtr->umem));
0680 return -ENOMEM;
0681 }
0682 } else {
0683 mtr->umem = NULL;
0684 mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
0685 buf_attr->page_shift,
0686 mtr->hem_cfg.is_direct ?
0687 HNS_ROCE_BUF_DIRECT : 0);
0688 if (IS_ERR(mtr->kmem)) {
0689 ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
0690 PTR_ERR(mtr->kmem));
0691 return PTR_ERR(mtr->kmem);
0692 }
0693 }
0694
0695 return 0;
0696 }
0697
0698 static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
0699 int page_count, unsigned int page_shift)
0700 {
0701 struct ib_device *ibdev = &hr_dev->ib_dev;
0702 dma_addr_t *pages;
0703 int npage;
0704 int ret;
0705
0706
0707 pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
0708 if (!pages)
0709 return -ENOMEM;
0710
0711 if (mtr->umem)
0712 npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count,
0713 mtr->umem, page_shift);
0714 else
0715 npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
0716 mtr->kmem, page_shift);
0717
0718 if (npage != page_count) {
0719 ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
0720 page_count);
0721 ret = -ENOBUFS;
0722 goto err_alloc_list;
0723 }
0724
0725 if (mtr->hem_cfg.is_direct && npage > 1) {
0726 ret = mtr_check_direct_pages(pages, npage, page_shift);
0727 if (ret) {
0728 ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
0729 mtr->umem ? "umtr" : "kmtr", ret, npage);
0730 ret = -ENOBUFS;
0731 goto err_alloc_list;
0732 }
0733 }
0734
0735 ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
0736 if (ret)
0737 ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
0738
0739 err_alloc_list:
0740 kvfree(pages);
0741
0742 return ret;
0743 }
0744
0745 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
0746 dma_addr_t *pages, unsigned int page_cnt)
0747 {
0748 struct ib_device *ibdev = &hr_dev->ib_dev;
0749 struct hns_roce_buf_region *r;
0750 unsigned int i, mapped_cnt;
0751 int ret = 0;
0752
0753
0754
0755
0756
0757 if (mtr->hem_cfg.is_direct) {
0758 mtr->hem_cfg.root_ba = pages[0];
0759 return 0;
0760 }
0761
0762 for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
0763 mapped_cnt < page_cnt; i++) {
0764 r = &mtr->hem_cfg.region[i];
0765
0766 if (!r->hopnum) {
0767 mapped_cnt += r->count;
0768 continue;
0769 }
0770
0771 if (r->offset + r->count > page_cnt) {
0772 ret = -EINVAL;
0773 ibdev_err(ibdev,
0774 "failed to check mtr%u count %u + %u > %u.\n",
0775 i, r->offset, r->count, page_cnt);
0776 return ret;
0777 }
0778
0779 ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
0780 page_cnt - mapped_cnt);
0781 if (ret < 0) {
0782 ibdev_err(ibdev,
0783 "failed to map mtr%u offset %u, ret = %d.\n",
0784 i, r->offset, ret);
0785 return ret;
0786 }
0787 mapped_cnt += ret;
0788 ret = 0;
0789 }
0790
0791 if (mapped_cnt < page_cnt) {
0792 ret = -ENOBUFS;
0793 ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
0794 mapped_cnt, page_cnt);
0795 }
0796
0797 return ret;
0798 }
0799
0800 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
0801 u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
0802 {
0803 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
0804 int mtt_count, left;
0805 u32 start_index;
0806 int total = 0;
0807 __le64 *mtts;
0808 u32 npage;
0809 u64 addr;
0810
0811 if (!mtt_buf || mtt_max < 1)
0812 goto done;
0813
0814
0815 if (cfg->is_direct) {
0816 start_index = offset >> HNS_HW_PAGE_SHIFT;
0817 for (mtt_count = 0; mtt_count < cfg->region_count &&
0818 total < mtt_max; mtt_count++) {
0819 npage = cfg->region[mtt_count].offset;
0820 if (npage < start_index)
0821 continue;
0822
0823 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
0824 mtt_buf[total] = addr;
0825
0826 total++;
0827 }
0828
0829 goto done;
0830 }
0831
0832 start_index = offset >> cfg->buf_pg_shift;
0833 left = mtt_max;
0834 while (left > 0) {
0835 mtt_count = 0;
0836 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
0837 start_index + total,
0838 &mtt_count, NULL);
0839 if (!mtts || !mtt_count)
0840 goto done;
0841
0842 npage = min(mtt_count, left);
0843 left -= npage;
0844 for (mtt_count = 0; mtt_count < npage; mtt_count++)
0845 mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
0846 }
0847
0848 done:
0849 if (base_addr)
0850 *base_addr = cfg->root_ba;
0851
0852 return total;
0853 }
0854
0855 static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
0856 struct hns_roce_buf_attr *attr,
0857 struct hns_roce_hem_cfg *cfg,
0858 unsigned int *buf_page_shift, u64 unalinged_size)
0859 {
0860 struct hns_roce_buf_region *r;
0861 u64 first_region_padding;
0862 int page_cnt, region_cnt;
0863 unsigned int page_shift;
0864 size_t buf_size;
0865
0866
0867 cfg->is_direct = !mtr_has_mtt(attr);
0868 buf_size = mtr_bufs_size(attr);
0869 if (cfg->is_direct) {
0870
0871
0872
0873
0874
0875
0876 page_shift = HNS_HW_PAGE_SHIFT;
0877
0878
0879 cfg->buf_pg_count = 1;
0880 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
0881 order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
0882 first_region_padding = 0;
0883 } else {
0884 page_shift = attr->page_shift;
0885 cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
0886 1 << page_shift);
0887 cfg->buf_pg_shift = page_shift;
0888 first_region_padding = unalinged_size;
0889 }
0890
0891
0892
0893
0894 for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
0895 region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
0896 r = &cfg->region[region_cnt];
0897 r->offset = page_cnt;
0898 buf_size = hr_hw_page_align(attr->region[region_cnt].size +
0899 first_region_padding);
0900 r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
0901 first_region_padding = 0;
0902 page_cnt += r->count;
0903 r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
0904 r->count);
0905 }
0906
0907 cfg->region_count = region_cnt;
0908 *buf_page_shift = page_shift;
0909
0910 return page_cnt;
0911 }
0912
0913 static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
0914 unsigned int ba_page_shift)
0915 {
0916 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
0917 int ret;
0918
0919 hns_roce_hem_list_init(&mtr->hem_list);
0920 if (!cfg->is_direct) {
0921 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
0922 cfg->region, cfg->region_count,
0923 ba_page_shift);
0924 if (ret)
0925 return ret;
0926 cfg->root_ba = mtr->hem_list.root_ba;
0927 cfg->ba_pg_shift = ba_page_shift;
0928 } else {
0929 cfg->ba_pg_shift = cfg->buf_pg_shift;
0930 }
0931
0932 return 0;
0933 }
0934
0935 static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
0936 {
0937 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
0938 }
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
0951 struct hns_roce_buf_attr *buf_attr,
0952 unsigned int ba_page_shift, struct ib_udata *udata,
0953 unsigned long user_addr)
0954 {
0955 struct ib_device *ibdev = &hr_dev->ib_dev;
0956 unsigned int buf_page_shift = 0;
0957 int buf_page_cnt;
0958 int ret;
0959
0960 buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
0961 &buf_page_shift,
0962 udata ? user_addr & ~PAGE_MASK : 0);
0963 if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
0964 ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
0965 buf_page_cnt, buf_page_shift);
0966 return -EINVAL;
0967 }
0968
0969 ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
0970 if (ret) {
0971 ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
0972 return ret;
0973 }
0974
0975
0976
0977
0978 if (buf_attr->mtt_only) {
0979 mtr->umem = NULL;
0980 mtr->kmem = NULL;
0981 return 0;
0982 }
0983
0984 ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
0985 if (ret) {
0986 ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
0987 goto err_alloc_mtt;
0988 }
0989
0990
0991 ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
0992 if (ret)
0993 ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
0994 else
0995 return 0;
0996
0997 mtr_free_bufs(hr_dev, mtr);
0998 err_alloc_mtt:
0999 mtr_free_mtt(hr_dev, mtr);
1000 return ret;
1001 }
1002
1003 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1004 {
1005
1006 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1007
1008
1009 mtr_free_bufs(hr_dev, mtr);
1010 }