0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <rdma/ib_umem.h>
0034 #include <rdma/uverbs_ioctl.h>
0035 #include "hns_roce_device.h"
0036 #include "hns_roce_cmd.h"
0037 #include "hns_roce_hem.h"
0038 #include "hns_roce_common.h"
0039
0040 static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
0041 {
0042 u32 least_load = bank[0].inuse;
0043 u8 bankid = 0;
0044 u32 bankcnt;
0045 u8 i;
0046
0047 for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
0048 bankcnt = bank[i].inuse;
0049 if (bankcnt < least_load) {
0050 least_load = bankcnt;
0051 bankid = i;
0052 }
0053 }
0054
0055 return bankid;
0056 }
0057
0058 static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
0059 {
0060 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
0061 struct hns_roce_bank *bank;
0062 u8 bankid;
0063 int id;
0064
0065 mutex_lock(&cq_table->bank_mutex);
0066 bankid = get_least_load_bankid_for_cq(cq_table->bank);
0067 bank = &cq_table->bank[bankid];
0068
0069 id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
0070 if (id < 0) {
0071 mutex_unlock(&cq_table->bank_mutex);
0072 return id;
0073 }
0074
0075
0076 hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
0077 bank->inuse++;
0078 mutex_unlock(&cq_table->bank_mutex);
0079
0080 return 0;
0081 }
0082
0083 static inline u8 get_cq_bankid(unsigned long cqn)
0084 {
0085
0086 return (u8)(cqn & GENMASK(1, 0));
0087 }
0088
0089 static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
0090 {
0091 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
0092 struct hns_roce_bank *bank;
0093
0094 bank = &cq_table->bank[get_cq_bankid(cqn)];
0095
0096 ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
0097
0098 mutex_lock(&cq_table->bank_mutex);
0099 bank->inuse--;
0100 mutex_unlock(&cq_table->bank_mutex);
0101 }
0102
0103 static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
0104 struct hns_roce_cq *hr_cq,
0105 u64 *mtts, dma_addr_t dma_handle)
0106 {
0107 struct ib_device *ibdev = &hr_dev->ib_dev;
0108 struct hns_roce_cmd_mailbox *mailbox;
0109 int ret;
0110
0111 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
0112 if (IS_ERR(mailbox)) {
0113 ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
0114 return PTR_ERR(mailbox);
0115 }
0116
0117 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
0118
0119 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
0120 hr_cq->cqn);
0121 if (ret)
0122 ibdev_err(ibdev,
0123 "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
0124 hr_cq->cqn, ret);
0125
0126 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
0127
0128 return ret;
0129 }
0130
0131 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
0132 {
0133 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
0134 struct ib_device *ibdev = &hr_dev->ib_dev;
0135 u64 mtts[MTT_MIN_COUNT] = {};
0136 dma_addr_t dma_handle;
0137 int ret;
0138
0139 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
0140 &dma_handle);
0141 if (!ret) {
0142 ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
0143 return -EINVAL;
0144 }
0145
0146
0147 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
0148 if (ret) {
0149 ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
0150 hr_cq->cqn, ret);
0151 return ret;
0152 }
0153
0154 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
0155 if (ret) {
0156 ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
0157 goto err_put;
0158 }
0159
0160 ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
0161 if (ret)
0162 goto err_xa;
0163
0164 return 0;
0165
0166 err_xa:
0167 xa_erase(&cq_table->array, hr_cq->cqn);
0168 err_put:
0169 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
0170
0171 return ret;
0172 }
0173
0174 static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
0175 {
0176 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
0177 struct device *dev = hr_dev->dev;
0178 int ret;
0179
0180 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
0181 hr_cq->cqn);
0182 if (ret)
0183 dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
0184 hr_cq->cqn);
0185
0186 xa_erase(&cq_table->array, hr_cq->cqn);
0187
0188
0189 synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
0190
0191
0192 if (refcount_dec_and_test(&hr_cq->refcount))
0193 complete(&hr_cq->free);
0194 wait_for_completion(&hr_cq->free);
0195
0196 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
0197 }
0198
0199 static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
0200 struct ib_udata *udata, unsigned long addr)
0201 {
0202 struct ib_device *ibdev = &hr_dev->ib_dev;
0203 struct hns_roce_buf_attr buf_attr = {};
0204 int ret;
0205
0206 buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
0207 buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
0208 buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
0209 buf_attr.region_count = 1;
0210
0211 ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
0212 hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
0213 udata, addr);
0214 if (ret)
0215 ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
0216
0217 return ret;
0218 }
0219
0220 static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
0221 {
0222 hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
0223 }
0224
0225 static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
0226 struct ib_udata *udata, unsigned long addr,
0227 struct hns_roce_ib_create_cq_resp *resp)
0228 {
0229 bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
0230 struct hns_roce_ucontext *uctx;
0231 int err;
0232
0233 if (udata) {
0234 if (has_db &&
0235 udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
0236 uctx = rdma_udata_to_drv_context(udata,
0237 struct hns_roce_ucontext, ibucontext);
0238 err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
0239 if (err)
0240 return err;
0241 hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
0242 resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
0243 }
0244 } else {
0245 if (has_db) {
0246 err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
0247 if (err)
0248 return err;
0249 hr_cq->set_ci_db = hr_cq->db.db_record;
0250 *hr_cq->set_ci_db = 0;
0251 hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
0252 }
0253 hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
0254 DB_REG_OFFSET * hr_dev->priv_uar.index;
0255 }
0256
0257 return 0;
0258 }
0259
0260 static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
0261 struct ib_udata *udata)
0262 {
0263 struct hns_roce_ucontext *uctx;
0264
0265 if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
0266 return;
0267
0268 hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
0269 if (udata) {
0270 uctx = rdma_udata_to_drv_context(udata,
0271 struct hns_roce_ucontext,
0272 ibucontext);
0273 hns_roce_db_unmap_user(uctx, &hr_cq->db);
0274 } else {
0275 hns_roce_free_db(hr_dev, &hr_cq->db);
0276 }
0277 }
0278
0279 static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
0280 const struct ib_cq_init_attr *attr)
0281 {
0282 struct ib_device *ibdev = &hr_dev->ib_dev;
0283
0284 if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
0285 ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
0286 attr->cqe, hr_dev->caps.max_cqes);
0287 return -EINVAL;
0288 }
0289
0290 if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
0291 ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
0292 attr->comp_vector, hr_dev->caps.num_comp_vectors);
0293 return -EINVAL;
0294 }
0295
0296 return 0;
0297 }
0298
0299 static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
0300 struct hns_roce_ib_create_cq *ucmd)
0301 {
0302 struct ib_device *ibdev = hr_cq->ib_cq.device;
0303 int ret;
0304
0305 ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
0306 if (ret) {
0307 ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
0308 return ret;
0309 }
0310
0311 return 0;
0312 }
0313
0314 static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
0315 struct hns_roce_ib_create_cq *ucmd)
0316 {
0317 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
0318
0319 cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
0320 cq_entries = roundup_pow_of_two(cq_entries);
0321 hr_cq->ib_cq.cqe = cq_entries - 1;
0322 hr_cq->cq_depth = cq_entries;
0323 hr_cq->vector = vector;
0324
0325 spin_lock_init(&hr_cq->lock);
0326 INIT_LIST_HEAD(&hr_cq->sq_list);
0327 INIT_LIST_HEAD(&hr_cq->rq_list);
0328 }
0329
0330 static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
0331 struct hns_roce_ib_create_cq *ucmd)
0332 {
0333 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
0334
0335 if (!udata) {
0336 hr_cq->cqe_size = hr_dev->caps.cqe_sz;
0337 return 0;
0338 }
0339
0340 if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
0341 if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
0342 ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
0343 ibdev_err(&hr_dev->ib_dev,
0344 "invalid cqe size %u.\n", ucmd->cqe_size);
0345 return -EINVAL;
0346 }
0347
0348 hr_cq->cqe_size = ucmd->cqe_size;
0349 } else {
0350 hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
0351 }
0352
0353 return 0;
0354 }
0355
0356 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
0357 struct ib_udata *udata)
0358 {
0359 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
0360 struct hns_roce_ib_create_cq_resp resp = {};
0361 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
0362 struct ib_device *ibdev = &hr_dev->ib_dev;
0363 struct hns_roce_ib_create_cq ucmd = {};
0364 int ret;
0365
0366 if (attr->flags)
0367 return -EOPNOTSUPP;
0368
0369 ret = verify_cq_create_attr(hr_dev, attr);
0370 if (ret)
0371 return ret;
0372
0373 if (udata) {
0374 ret = get_cq_ucmd(hr_cq, udata, &ucmd);
0375 if (ret)
0376 return ret;
0377 }
0378
0379 set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
0380
0381 ret = set_cqe_size(hr_cq, udata, &ucmd);
0382 if (ret)
0383 return ret;
0384
0385 ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
0386 if (ret) {
0387 ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
0388 return ret;
0389 }
0390
0391 ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
0392 if (ret) {
0393 ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
0394 goto err_cq_buf;
0395 }
0396
0397 ret = alloc_cqn(hr_dev, hr_cq);
0398 if (ret) {
0399 ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
0400 goto err_cq_db;
0401 }
0402
0403 ret = alloc_cqc(hr_dev, hr_cq);
0404 if (ret) {
0405 ibdev_err(ibdev,
0406 "failed to alloc CQ context, ret = %d.\n", ret);
0407 goto err_cqn;
0408 }
0409
0410 if (udata) {
0411 resp.cqn = hr_cq->cqn;
0412 ret = ib_copy_to_udata(udata, &resp,
0413 min(udata->outlen, sizeof(resp)));
0414 if (ret)
0415 goto err_cqc;
0416 }
0417
0418 hr_cq->cons_index = 0;
0419 hr_cq->arm_sn = 1;
0420 refcount_set(&hr_cq->refcount, 1);
0421 init_completion(&hr_cq->free);
0422
0423 return 0;
0424
0425 err_cqc:
0426 free_cqc(hr_dev, hr_cq);
0427 err_cqn:
0428 free_cqn(hr_dev, hr_cq->cqn);
0429 err_cq_db:
0430 free_cq_db(hr_dev, hr_cq, udata);
0431 err_cq_buf:
0432 free_cq_buf(hr_dev, hr_cq);
0433 return ret;
0434 }
0435
0436 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
0437 {
0438 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
0439 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
0440
0441 free_cqc(hr_dev, hr_cq);
0442 free_cqn(hr_dev, hr_cq->cqn);
0443 free_cq_db(hr_dev, hr_cq, udata);
0444 free_cq_buf(hr_dev, hr_cq);
0445
0446 return 0;
0447 }
0448
0449 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
0450 {
0451 struct hns_roce_cq *hr_cq;
0452 struct ib_cq *ibcq;
0453
0454 hr_cq = xa_load(&hr_dev->cq_table.array,
0455 cqn & (hr_dev->caps.num_cqs - 1));
0456 if (!hr_cq) {
0457 dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
0458 cqn);
0459 return;
0460 }
0461
0462 ++hr_cq->arm_sn;
0463 ibcq = &hr_cq->ib_cq;
0464 if (ibcq->comp_handler)
0465 ibcq->comp_handler(ibcq, ibcq->cq_context);
0466 }
0467
0468 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
0469 {
0470 struct device *dev = hr_dev->dev;
0471 struct hns_roce_cq *hr_cq;
0472 struct ib_event event;
0473 struct ib_cq *ibcq;
0474
0475 hr_cq = xa_load(&hr_dev->cq_table.array,
0476 cqn & (hr_dev->caps.num_cqs - 1));
0477 if (!hr_cq) {
0478 dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
0479 return;
0480 }
0481
0482 if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
0483 event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
0484 event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
0485 dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
0486 event_type, cqn);
0487 return;
0488 }
0489
0490 refcount_inc(&hr_cq->refcount);
0491
0492 ibcq = &hr_cq->ib_cq;
0493 if (ibcq->event_handler) {
0494 event.device = ibcq->device;
0495 event.element.cq = ibcq;
0496 event.event = IB_EVENT_CQ_ERR;
0497 ibcq->event_handler(&event, ibcq->cq_context);
0498 }
0499
0500 if (refcount_dec_and_test(&hr_cq->refcount))
0501 complete(&hr_cq->free);
0502 }
0503
0504 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
0505 {
0506 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
0507 unsigned int reserved_from_bot;
0508 unsigned int i;
0509
0510 mutex_init(&cq_table->bank_mutex);
0511 xa_init(&cq_table->array);
0512
0513 reserved_from_bot = hr_dev->caps.reserved_cqs;
0514
0515 for (i = 0; i < reserved_from_bot; i++) {
0516 cq_table->bank[get_cq_bankid(i)].inuse++;
0517 cq_table->bank[get_cq_bankid(i)].min++;
0518 }
0519
0520 for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
0521 ida_init(&cq_table->bank[i].ida);
0522 cq_table->bank[i].max = hr_dev->caps.num_cqs /
0523 HNS_ROCE_CQ_BANK_NUM - 1;
0524 }
0525 }
0526
0527 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
0528 {
0529 int i;
0530
0531 for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
0532 ida_destroy(&hr_dev->cq_table.bank[i].ida);
0533 }