0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/pci.h>
0035 #include <rdma/ib_addr.h>
0036 #include <rdma/ib_umem.h>
0037 #include <rdma/uverbs_ioctl.h>
0038 #include "hns_roce_common.h"
0039 #include "hns_roce_device.h"
0040 #include "hns_roce_hem.h"
0041
0042 static void flush_work_handle(struct work_struct *work)
0043 {
0044 struct hns_roce_work *flush_work = container_of(work,
0045 struct hns_roce_work, work);
0046 struct hns_roce_qp *hr_qp = container_of(flush_work,
0047 struct hns_roce_qp, flush_work);
0048 struct device *dev = flush_work->hr_dev->dev;
0049 struct ib_qp_attr attr;
0050 int attr_mask;
0051 int ret;
0052
0053 attr_mask = IB_QP_STATE;
0054 attr.qp_state = IB_QPS_ERR;
0055
0056 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
0057 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
0058 if (ret)
0059 dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
0060 ret);
0061 }
0062
0063
0064
0065
0066
0067 if (refcount_dec_and_test(&hr_qp->refcount))
0068 complete(&hr_qp->free);
0069 }
0070
0071 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
0072 {
0073 struct hns_roce_work *flush_work = &hr_qp->flush_work;
0074
0075 flush_work->hr_dev = hr_dev;
0076 INIT_WORK(&flush_work->work, flush_work_handle);
0077 refcount_inc(&hr_qp->refcount);
0078 queue_work(hr_dev->irq_workq, &flush_work->work);
0079 }
0080
0081 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
0082 {
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
0093 init_flush_work(dev, qp);
0094 }
0095
0096 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
0097 {
0098 struct device *dev = hr_dev->dev;
0099 struct hns_roce_qp *qp;
0100
0101 xa_lock(&hr_dev->qp_table_xa);
0102 qp = __hns_roce_qp_lookup(hr_dev, qpn);
0103 if (qp)
0104 refcount_inc(&qp->refcount);
0105 xa_unlock(&hr_dev->qp_table_xa);
0106
0107 if (!qp) {
0108 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
0109 return;
0110 }
0111
0112 if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
0113 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
0114 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
0115 event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
0116 event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
0117 qp->state = IB_QPS_ERR;
0118
0119 flush_cqe(hr_dev, qp);
0120 }
0121
0122 qp->event(qp, (enum hns_roce_event)event_type);
0123
0124 if (refcount_dec_and_test(&qp->refcount))
0125 complete(&qp->free);
0126 }
0127
0128 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
0129 enum hns_roce_event type)
0130 {
0131 struct ib_qp *ibqp = &hr_qp->ibqp;
0132 struct ib_event event;
0133
0134 if (ibqp->event_handler) {
0135 event.device = ibqp->device;
0136 event.element.qp = ibqp;
0137 switch (type) {
0138 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
0139 event.event = IB_EVENT_PATH_MIG;
0140 break;
0141 case HNS_ROCE_EVENT_TYPE_COMM_EST:
0142 event.event = IB_EVENT_COMM_EST;
0143 break;
0144 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
0145 event.event = IB_EVENT_SQ_DRAINED;
0146 break;
0147 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
0148 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
0149 break;
0150 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
0151 event.event = IB_EVENT_QP_FATAL;
0152 break;
0153 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
0154 event.event = IB_EVENT_PATH_MIG_ERR;
0155 break;
0156 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
0157 event.event = IB_EVENT_QP_REQ_ERR;
0158 break;
0159 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
0160 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
0161 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
0162 event.event = IB_EVENT_QP_ACCESS_ERR;
0163 break;
0164 default:
0165 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
0166 type, hr_qp->qpn);
0167 return;
0168 }
0169 ibqp->event_handler(&event, ibqp->qp_context);
0170 }
0171 }
0172
0173 static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
0174 {
0175 u32 least_load = bank[0].inuse;
0176 u8 bankid = 0;
0177 u32 bankcnt;
0178 u8 i;
0179
0180 for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
0181 bankcnt = bank[i].inuse;
0182 if (bankcnt < least_load) {
0183 least_load = bankcnt;
0184 bankid = i;
0185 }
0186 }
0187
0188 return bankid;
0189 }
0190
0191 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
0192 unsigned long *qpn)
0193 {
0194 int id;
0195
0196 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL);
0197 if (id < 0) {
0198 id = ida_alloc_range(&bank->ida, bank->min, bank->max,
0199 GFP_KERNEL);
0200 if (id < 0)
0201 return id;
0202 }
0203
0204
0205 bank->next = (id + 1) > bank->max ? bank->min : id + 1;
0206
0207
0208 *qpn = (id << 3) | bankid;
0209
0210 return 0;
0211 }
0212 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
0213 {
0214 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
0215 unsigned long num = 0;
0216 u8 bankid;
0217 int ret;
0218
0219 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
0220 num = 1;
0221 hr_qp->doorbell_qpn = 1;
0222 } else {
0223 mutex_lock(&qp_table->bank_mutex);
0224 bankid = get_least_load_bankid_for_qp(qp_table->bank);
0225
0226 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
0227 &num);
0228 if (ret) {
0229 ibdev_err(&hr_dev->ib_dev,
0230 "failed to alloc QPN, ret = %d\n", ret);
0231 mutex_unlock(&qp_table->bank_mutex);
0232 return ret;
0233 }
0234
0235 qp_table->bank[bankid].inuse++;
0236 mutex_unlock(&qp_table->bank_mutex);
0237
0238 hr_qp->doorbell_qpn = (u32)num;
0239 }
0240
0241 hr_qp->qpn = num;
0242
0243 return 0;
0244 }
0245
0246 static void add_qp_to_list(struct hns_roce_dev *hr_dev,
0247 struct hns_roce_qp *hr_qp,
0248 struct ib_cq *send_cq, struct ib_cq *recv_cq)
0249 {
0250 struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
0251 unsigned long flags;
0252
0253 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
0254 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
0255
0256 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
0257 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
0258
0259 list_add_tail(&hr_qp->node, &hr_dev->qp_list);
0260 if (hr_send_cq)
0261 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
0262 if (hr_recv_cq)
0263 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
0264
0265 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
0266 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
0267 }
0268
0269 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
0270 struct hns_roce_qp *hr_qp,
0271 struct ib_qp_init_attr *init_attr)
0272 {
0273 struct xarray *xa = &hr_dev->qp_table_xa;
0274 int ret;
0275
0276 if (!hr_qp->qpn)
0277 return -EINVAL;
0278
0279 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
0280 if (ret)
0281 dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
0282 else
0283
0284 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
0285 init_attr->recv_cq);
0286
0287 return ret;
0288 }
0289
0290 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
0291 {
0292 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
0293 struct device *dev = hr_dev->dev;
0294 int ret;
0295
0296 if (!hr_qp->qpn)
0297 return -EINVAL;
0298
0299
0300 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
0301 if (ret) {
0302 dev_err(dev, "Failed to get QPC table\n");
0303 goto err_out;
0304 }
0305
0306
0307 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
0308 if (ret) {
0309 dev_err(dev, "Failed to get IRRL table\n");
0310 goto err_put_qp;
0311 }
0312
0313 if (hr_dev->caps.trrl_entry_sz) {
0314
0315 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
0316 hr_qp->qpn);
0317 if (ret) {
0318 dev_err(dev, "Failed to get TRRL table\n");
0319 goto err_put_irrl;
0320 }
0321 }
0322
0323 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
0324
0325 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
0326 hr_qp->qpn);
0327 if (ret) {
0328 dev_err(dev, "Failed to get SCC CTX table\n");
0329 goto err_put_trrl;
0330 }
0331 }
0332
0333 return 0;
0334
0335 err_put_trrl:
0336 if (hr_dev->caps.trrl_entry_sz)
0337 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
0338
0339 err_put_irrl:
0340 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
0341
0342 err_put_qp:
0343 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
0344
0345 err_out:
0346 return ret;
0347 }
0348
0349 static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
0350 {
0351 rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
0352 }
0353
0354 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
0355 {
0356 struct xarray *xa = &hr_dev->qp_table_xa;
0357 unsigned long flags;
0358
0359 list_del(&hr_qp->node);
0360
0361 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
0362 list_del(&hr_qp->sq_node);
0363
0364 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
0365 hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
0366 list_del(&hr_qp->rq_node);
0367
0368 xa_lock_irqsave(xa, flags);
0369 __xa_erase(xa, hr_qp->qpn);
0370 xa_unlock_irqrestore(xa, flags);
0371 }
0372
0373 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
0374 {
0375 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
0376
0377 if (hr_dev->caps.trrl_entry_sz)
0378 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
0379 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
0380 }
0381
0382 static inline u8 get_qp_bankid(unsigned long qpn)
0383 {
0384
0385 return (u8)(qpn & GENMASK(2, 0));
0386 }
0387
0388 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
0389 {
0390 u8 bankid;
0391
0392 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
0393 return;
0394
0395 if (hr_qp->qpn < hr_dev->caps.reserved_qps)
0396 return;
0397
0398 bankid = get_qp_bankid(hr_qp->qpn);
0399
0400 ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
0401
0402 mutex_lock(&hr_dev->qp_table.bank_mutex);
0403 hr_dev->qp_table.bank[bankid].inuse--;
0404 mutex_unlock(&hr_dev->qp_table.bank_mutex);
0405 }
0406
0407 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
0408 bool user)
0409 {
0410 u32 max_sge = dev->caps.max_rq_sg;
0411
0412 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
0413 return max_sge;
0414
0415
0416
0417
0418
0419
0420
0421 if (user)
0422 max_sge = roundup_pow_of_two(max_sge + 1);
0423 else
0424 hr_qp->rq.rsv_sge = 1;
0425
0426 return max_sge;
0427 }
0428
0429 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
0430 struct hns_roce_qp *hr_qp, int has_rq, bool user)
0431 {
0432 u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
0433 u32 cnt;
0434
0435
0436 if (!has_rq) {
0437 hr_qp->rq.wqe_cnt = 0;
0438 hr_qp->rq.max_gs = 0;
0439 hr_qp->rq_inl_buf.wqe_cnt = 0;
0440 cap->max_recv_wr = 0;
0441 cap->max_recv_sge = 0;
0442
0443 return 0;
0444 }
0445
0446
0447 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
0448 cap->max_recv_sge > max_sge) {
0449 ibdev_err(&hr_dev->ib_dev,
0450 "RQ config error, depth = %u, sge = %u\n",
0451 cap->max_recv_wr, cap->max_recv_sge);
0452 return -EINVAL;
0453 }
0454
0455 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
0456 if (cnt > hr_dev->caps.max_wqes) {
0457 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
0458 cap->max_recv_wr);
0459 return -EINVAL;
0460 }
0461
0462 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
0463 hr_qp->rq.rsv_sge);
0464
0465 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
0466 hr_qp->rq.max_gs);
0467
0468 hr_qp->rq.wqe_cnt = cnt;
0469 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
0470 hr_qp->ibqp.qp_type != IB_QPT_UD &&
0471 hr_qp->ibqp.qp_type != IB_QPT_GSI)
0472 hr_qp->rq_inl_buf.wqe_cnt = cnt;
0473 else
0474 hr_qp->rq_inl_buf.wqe_cnt = 0;
0475
0476 cap->max_recv_wr = cnt;
0477 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
0478
0479 return 0;
0480 }
0481
0482 static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp)
0483 {
0484
0485 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD)
0486 return qp->sq.max_gs;
0487
0488 if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
0489 return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE;
0490
0491 return 0;
0492 }
0493
0494 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
0495 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
0496 {
0497 u32 total_sge_cnt;
0498 u32 wqe_sge_cnt;
0499
0500 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
0501
0502 hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
0503
0504 wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
0505
0506
0507
0508
0509 if (wqe_sge_cnt) {
0510 total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt);
0511 hr_qp->sge.sge_cnt = max(total_sge_cnt,
0512 (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
0513 }
0514 }
0515
0516 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
0517 struct ib_qp_cap *cap,
0518 struct hns_roce_ib_create_qp *ucmd)
0519 {
0520 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
0521 u8 max_sq_stride = ilog2(roundup_sq_stride);
0522
0523
0524 if (ucmd->log_sq_stride > max_sq_stride ||
0525 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
0526 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
0527 return -EINVAL;
0528 }
0529
0530 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
0531 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
0532 cap->max_send_sge);
0533 return -EINVAL;
0534 }
0535
0536 return 0;
0537 }
0538
0539 static int set_user_sq_size(struct hns_roce_dev *hr_dev,
0540 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
0541 struct hns_roce_ib_create_qp *ucmd)
0542 {
0543 struct ib_device *ibdev = &hr_dev->ib_dev;
0544 u32 cnt = 0;
0545 int ret;
0546
0547 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
0548 cnt > hr_dev->caps.max_wqes)
0549 return -EINVAL;
0550
0551 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
0552 if (ret) {
0553 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
0554 ret);
0555 return ret;
0556 }
0557
0558 set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
0559
0560 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
0561 hr_qp->sq.wqe_cnt = cnt;
0562
0563 return 0;
0564 }
0565
0566 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
0567 struct hns_roce_qp *hr_qp,
0568 struct hns_roce_buf_attr *buf_attr)
0569 {
0570 int buf_size;
0571 int idx = 0;
0572
0573 hr_qp->buff_size = 0;
0574
0575
0576 hr_qp->sq.offset = 0;
0577 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
0578 hr_qp->sq.wqe_shift);
0579 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
0580 buf_attr->region[idx].size = buf_size;
0581 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
0582 idx++;
0583 hr_qp->buff_size += buf_size;
0584 }
0585
0586
0587 hr_qp->sge.offset = hr_qp->buff_size;
0588 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
0589 hr_qp->sge.sge_shift);
0590 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
0591 buf_attr->region[idx].size = buf_size;
0592 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
0593 idx++;
0594 hr_qp->buff_size += buf_size;
0595 }
0596
0597
0598 hr_qp->rq.offset = hr_qp->buff_size;
0599 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
0600 hr_qp->rq.wqe_shift);
0601 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
0602 buf_attr->region[idx].size = buf_size;
0603 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
0604 idx++;
0605 hr_qp->buff_size += buf_size;
0606 }
0607
0608 if (hr_qp->buff_size < 1)
0609 return -EINVAL;
0610
0611 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
0612 buf_attr->region_count = idx;
0613
0614 return 0;
0615 }
0616
0617 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
0618 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
0619 {
0620 struct ib_device *ibdev = &hr_dev->ib_dev;
0621 u32 cnt;
0622
0623 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
0624 cap->max_send_sge > hr_dev->caps.max_sq_sg) {
0625 ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n");
0626 return -EINVAL;
0627 }
0628
0629 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
0630 if (cnt > hr_dev->caps.max_wqes) {
0631 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
0632 cnt);
0633 return -EINVAL;
0634 }
0635
0636 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
0637 hr_qp->sq.wqe_cnt = cnt;
0638
0639 set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
0640
0641
0642 cap->max_send_wr = cnt;
0643 cap->max_send_sge = hr_qp->sq.max_gs;
0644
0645 return 0;
0646 }
0647
0648 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
0649 {
0650 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
0651 return 0;
0652
0653 return 1;
0654 }
0655
0656 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
0657 {
0658 if (attr->qp_type == IB_QPT_XRC_INI ||
0659 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
0660 !attr->cap.max_recv_wr)
0661 return 0;
0662
0663 return 1;
0664 }
0665
0666 static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
0667 struct ib_qp_init_attr *init_attr)
0668 {
0669 u32 max_recv_sge = init_attr->cap.max_recv_sge;
0670 u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
0671 struct hns_roce_rinl_wqe *wqe_list;
0672 int i;
0673
0674
0675 wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
0676 GFP_KERNEL);
0677 if (!wqe_list)
0678 goto err;
0679
0680
0681 wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
0682 sizeof(struct hns_roce_rinl_sge)),
0683 GFP_KERNEL);
0684 if (!wqe_list[0].sg_list)
0685 goto err_wqe_list;
0686
0687
0688 for (i = 1; i < wqe_cnt; i++)
0689 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
0690
0691 hr_qp->rq_inl_buf.wqe_list = wqe_list;
0692
0693 return 0;
0694
0695 err_wqe_list:
0696 kfree(wqe_list);
0697
0698 err:
0699 return -ENOMEM;
0700 }
0701
0702 static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
0703 {
0704 if (hr_qp->rq_inl_buf.wqe_list)
0705 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
0706 kfree(hr_qp->rq_inl_buf.wqe_list);
0707 }
0708
0709 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
0710 struct ib_qp_init_attr *init_attr,
0711 struct ib_udata *udata, unsigned long addr)
0712 {
0713 struct ib_device *ibdev = &hr_dev->ib_dev;
0714 struct hns_roce_buf_attr buf_attr = {};
0715 int ret;
0716
0717 if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
0718 ret = alloc_rq_inline_buf(hr_qp, init_attr);
0719 if (ret) {
0720 ibdev_err(ibdev,
0721 "failed to alloc inline buf, ret = %d.\n",
0722 ret);
0723 return ret;
0724 }
0725 } else {
0726 hr_qp->rq_inl_buf.wqe_list = NULL;
0727 }
0728
0729 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
0730 if (ret) {
0731 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
0732 goto err_inline;
0733 }
0734 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
0735 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
0736 udata, addr);
0737 if (ret) {
0738 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
0739 goto err_inline;
0740 }
0741
0742 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
0743 hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
0744
0745 return 0;
0746
0747 err_inline:
0748 free_rq_inline_buf(hr_qp);
0749
0750 return ret;
0751 }
0752
0753 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
0754 {
0755 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
0756 free_rq_inline_buf(hr_qp);
0757 }
0758
0759 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
0760 struct ib_qp_init_attr *init_attr,
0761 struct ib_udata *udata,
0762 struct hns_roce_ib_create_qp_resp *resp,
0763 struct hns_roce_ib_create_qp *ucmd)
0764 {
0765 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
0766 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
0767 hns_roce_qp_has_sq(init_attr) &&
0768 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
0769 }
0770
0771 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
0772 struct ib_qp_init_attr *init_attr,
0773 struct ib_udata *udata,
0774 struct hns_roce_ib_create_qp_resp *resp)
0775 {
0776 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
0777 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
0778 hns_roce_qp_has_rq(init_attr));
0779 }
0780
0781 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
0782 struct ib_qp_init_attr *init_attr)
0783 {
0784 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
0785 hns_roce_qp_has_rq(init_attr));
0786 }
0787
0788 static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
0789 struct hns_roce_dev *hr_dev,
0790 struct ib_udata *udata,
0791 struct hns_roce_ib_create_qp_resp *resp)
0792 {
0793 struct hns_roce_ucontext *uctx =
0794 rdma_udata_to_drv_context(udata,
0795 struct hns_roce_ucontext, ibucontext);
0796 struct rdma_user_mmap_entry *rdma_entry;
0797 u64 address;
0798
0799 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
0800
0801 hr_qp->dwqe_mmap_entry =
0802 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
0803 HNS_ROCE_DWQE_SIZE,
0804 HNS_ROCE_MMAP_TYPE_DWQE);
0805
0806 if (!hr_qp->dwqe_mmap_entry) {
0807 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
0808 return -ENOMEM;
0809 }
0810
0811 rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
0812 resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
0813
0814 return 0;
0815 }
0816
0817 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
0818 struct hns_roce_qp *hr_qp,
0819 struct ib_qp_init_attr *init_attr,
0820 struct ib_udata *udata,
0821 struct hns_roce_ib_create_qp *ucmd,
0822 struct hns_roce_ib_create_qp_resp *resp)
0823 {
0824 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
0825 struct hns_roce_ucontext, ibucontext);
0826 struct ib_device *ibdev = &hr_dev->ib_dev;
0827 int ret;
0828
0829 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
0830 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
0831 if (ret) {
0832 ibdev_err(ibdev,
0833 "failed to map user SQ doorbell, ret = %d.\n",
0834 ret);
0835 goto err_out;
0836 }
0837 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
0838 }
0839
0840 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
0841 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
0842 if (ret) {
0843 ibdev_err(ibdev,
0844 "failed to map user RQ doorbell, ret = %d.\n",
0845 ret);
0846 goto err_sdb;
0847 }
0848 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
0849 }
0850
0851 return 0;
0852
0853 err_sdb:
0854 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
0855 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
0856 err_out:
0857 return ret;
0858 }
0859
0860 static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev,
0861 struct hns_roce_qp *hr_qp,
0862 struct ib_qp_init_attr *init_attr)
0863 {
0864 struct ib_device *ibdev = &hr_dev->ib_dev;
0865 int ret;
0866
0867 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
0868 hr_qp->sq.db_reg = hr_dev->mem_base +
0869 HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
0870 else
0871 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset +
0872 DB_REG_OFFSET * hr_dev->priv_uar.index;
0873
0874 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
0875 DB_REG_OFFSET * hr_dev->priv_uar.index;
0876
0877 if (kernel_qp_has_rdb(hr_dev, init_attr)) {
0878 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
0879 if (ret) {
0880 ibdev_err(ibdev,
0881 "failed to alloc kernel RQ doorbell, ret = %d.\n",
0882 ret);
0883 return ret;
0884 }
0885 *hr_qp->rdb.db_record = 0;
0886 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
0887 }
0888
0889 return 0;
0890 }
0891
0892 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
0893 struct ib_qp_init_attr *init_attr,
0894 struct ib_udata *udata,
0895 struct hns_roce_ib_create_qp *ucmd,
0896 struct hns_roce_ib_create_qp_resp *resp)
0897 {
0898 int ret;
0899
0900 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
0901 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
0902
0903 if (udata) {
0904 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
0905 ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
0906 if (ret)
0907 return ret;
0908 }
0909
0910 ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
0911 resp);
0912 if (ret)
0913 goto err_remove_qp;
0914 } else {
0915 ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
0916 if (ret)
0917 return ret;
0918 }
0919
0920 return 0;
0921
0922 err_remove_qp:
0923 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
0924 qp_user_mmap_entry_remove(hr_qp);
0925
0926 return ret;
0927 }
0928
0929 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
0930 struct ib_udata *udata)
0931 {
0932 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
0933 udata, struct hns_roce_ucontext, ibucontext);
0934
0935 if (udata) {
0936 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
0937 hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
0938 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
0939 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
0940 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
0941 qp_user_mmap_entry_remove(hr_qp);
0942 } else {
0943 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
0944 hns_roce_free_db(hr_dev, &hr_qp->rdb);
0945 }
0946 }
0947
0948 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
0949 struct hns_roce_qp *hr_qp)
0950 {
0951 struct ib_device *ibdev = &hr_dev->ib_dev;
0952 u64 *sq_wrid = NULL;
0953 u64 *rq_wrid = NULL;
0954 int ret;
0955
0956 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
0957 if (ZERO_OR_NULL_PTR(sq_wrid)) {
0958 ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
0959 return -ENOMEM;
0960 }
0961
0962 if (hr_qp->rq.wqe_cnt) {
0963 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
0964 if (ZERO_OR_NULL_PTR(rq_wrid)) {
0965 ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
0966 ret = -ENOMEM;
0967 goto err_sq;
0968 }
0969 }
0970
0971 hr_qp->sq.wrid = sq_wrid;
0972 hr_qp->rq.wrid = rq_wrid;
0973 return 0;
0974 err_sq:
0975 kfree(sq_wrid);
0976
0977 return ret;
0978 }
0979
0980 static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
0981 {
0982 kfree(hr_qp->rq.wrid);
0983 kfree(hr_qp->sq.wrid);
0984 }
0985
0986 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
0987 struct ib_qp_init_attr *init_attr,
0988 struct ib_udata *udata,
0989 struct hns_roce_ib_create_qp *ucmd)
0990 {
0991 struct ib_device *ibdev = &hr_dev->ib_dev;
0992 int ret;
0993
0994 if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
0995 init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
0996
0997 hr_qp->max_inline_data = init_attr->cap.max_inline_data;
0998
0999 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1000 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
1001 else
1002 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
1003
1004 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
1005 hns_roce_qp_has_rq(init_attr), !!udata);
1006 if (ret) {
1007 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
1008 ret);
1009 return ret;
1010 }
1011
1012 if (udata) {
1013 ret = ib_copy_from_udata(ucmd, udata,
1014 min(udata->inlen, sizeof(*ucmd)));
1015 if (ret) {
1016 ibdev_err(ibdev,
1017 "failed to copy QP ucmd, ret = %d\n", ret);
1018 return ret;
1019 }
1020
1021 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
1022 if (ret)
1023 ibdev_err(ibdev,
1024 "failed to set user SQ size, ret = %d.\n",
1025 ret);
1026 } else {
1027 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
1028 if (ret)
1029 ibdev_err(ibdev,
1030 "failed to set kernel SQ size, ret = %d.\n",
1031 ret);
1032 }
1033
1034 return ret;
1035 }
1036
1037 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
1038 struct ib_pd *ib_pd,
1039 struct ib_qp_init_attr *init_attr,
1040 struct ib_udata *udata,
1041 struct hns_roce_qp *hr_qp)
1042 {
1043 struct hns_roce_ib_create_qp_resp resp = {};
1044 struct ib_device *ibdev = &hr_dev->ib_dev;
1045 struct hns_roce_ib_create_qp ucmd;
1046 int ret;
1047
1048 mutex_init(&hr_qp->mutex);
1049 spin_lock_init(&hr_qp->sq.lock);
1050 spin_lock_init(&hr_qp->rq.lock);
1051
1052 hr_qp->state = IB_QPS_RESET;
1053 hr_qp->flush_flag = 0;
1054
1055 if (init_attr->create_flags)
1056 return -EOPNOTSUPP;
1057
1058 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
1059 if (ret) {
1060 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
1061 return ret;
1062 }
1063
1064 if (!udata) {
1065 ret = alloc_kernel_wrid(hr_dev, hr_qp);
1066 if (ret) {
1067 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
1068 ret);
1069 return ret;
1070 }
1071 }
1072
1073 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
1074 if (ret) {
1075 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
1076 goto err_buf;
1077 }
1078
1079 ret = alloc_qpn(hr_dev, hr_qp);
1080 if (ret) {
1081 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
1082 goto err_qpn;
1083 }
1084
1085 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
1086 if (ret) {
1087 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
1088 ret);
1089 goto err_db;
1090 }
1091
1092 ret = alloc_qpc(hr_dev, hr_qp);
1093 if (ret) {
1094 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
1095 ret);
1096 goto err_qpc;
1097 }
1098
1099 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
1100 if (ret) {
1101 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
1102 goto err_store;
1103 }
1104
1105 if (udata) {
1106 resp.cap_flags = hr_qp->en_flags;
1107 ret = ib_copy_to_udata(udata, &resp,
1108 min(udata->outlen, sizeof(resp)));
1109 if (ret) {
1110 ibdev_err(ibdev, "copy qp resp failed!\n");
1111 goto err_store;
1112 }
1113 }
1114
1115 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
1116 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
1117 if (ret)
1118 goto err_flow_ctrl;
1119 }
1120
1121 hr_qp->ibqp.qp_num = hr_qp->qpn;
1122 hr_qp->event = hns_roce_ib_qp_event;
1123 refcount_set(&hr_qp->refcount, 1);
1124 init_completion(&hr_qp->free);
1125
1126 return 0;
1127
1128 err_flow_ctrl:
1129 hns_roce_qp_remove(hr_dev, hr_qp);
1130 err_store:
1131 free_qpc(hr_dev, hr_qp);
1132 err_qpc:
1133 free_qp_db(hr_dev, hr_qp, udata);
1134 err_db:
1135 free_qpn(hr_dev, hr_qp);
1136 err_qpn:
1137 free_qp_buf(hr_dev, hr_qp);
1138 err_buf:
1139 free_kernel_wrid(hr_qp);
1140 return ret;
1141 }
1142
1143 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1144 struct ib_udata *udata)
1145 {
1146 if (refcount_dec_and_test(&hr_qp->refcount))
1147 complete(&hr_qp->free);
1148 wait_for_completion(&hr_qp->free);
1149
1150 free_qpc(hr_dev, hr_qp);
1151 free_qpn(hr_dev, hr_qp);
1152 free_qp_buf(hr_dev, hr_qp);
1153 free_kernel_wrid(hr_qp);
1154 free_qp_db(hr_dev, hr_qp, udata);
1155 }
1156
1157 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
1158 bool is_user)
1159 {
1160 switch (type) {
1161 case IB_QPT_XRC_INI:
1162 case IB_QPT_XRC_TGT:
1163 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
1164 goto out;
1165 break;
1166 case IB_QPT_UD:
1167 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
1168 is_user)
1169 goto out;
1170 break;
1171 case IB_QPT_RC:
1172 case IB_QPT_GSI:
1173 break;
1174 default:
1175 goto out;
1176 }
1177
1178 return 0;
1179
1180 out:
1181 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type);
1182
1183 return -EOPNOTSUPP;
1184 }
1185
1186 int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1187 struct ib_udata *udata)
1188 {
1189 struct ib_device *ibdev = qp->device;
1190 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
1191 struct hns_roce_qp *hr_qp = to_hr_qp(qp);
1192 struct ib_pd *pd = qp->pd;
1193 int ret;
1194
1195 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
1196 if (ret)
1197 return ret;
1198
1199 if (init_attr->qp_type == IB_QPT_XRC_TGT)
1200 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
1201
1202 if (init_attr->qp_type == IB_QPT_GSI) {
1203 hr_qp->port = init_attr->port_num - 1;
1204 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1205 }
1206
1207 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
1208 if (ret)
1209 ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
1210 init_attr->qp_type, ret);
1211
1212 return ret;
1213 }
1214
1215 int to_hr_qp_type(int qp_type)
1216 {
1217 switch (qp_type) {
1218 case IB_QPT_RC:
1219 return SERV_TYPE_RC;
1220 case IB_QPT_UD:
1221 case IB_QPT_GSI:
1222 return SERV_TYPE_UD;
1223 case IB_QPT_XRC_INI:
1224 case IB_QPT_XRC_TGT:
1225 return SERV_TYPE_XRC;
1226 default:
1227 return -1;
1228 }
1229 }
1230
1231 static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1232 struct hns_roce_qp *hr_qp,
1233 struct ib_qp_attr *attr, int attr_mask)
1234 {
1235 enum ib_mtu active_mtu;
1236 int p;
1237
1238 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1239 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1240
1241 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1242 attr->path_mtu > hr_dev->caps.max_mtu) ||
1243 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1244 ibdev_err(&hr_dev->ib_dev,
1245 "attr path_mtu(%d)invalid while modify qp",
1246 attr->path_mtu);
1247 return -EINVAL;
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1254 int attr_mask)
1255 {
1256 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1257 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1258 int p;
1259
1260 if ((attr_mask & IB_QP_PORT) &&
1261 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1262 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
1263 attr->port_num);
1264 return -EINVAL;
1265 }
1266
1267 if (attr_mask & IB_QP_PKEY_INDEX) {
1268 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1269 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1270 ibdev_err(&hr_dev->ib_dev,
1271 "invalid attr, pkey_index = %u.\n",
1272 attr->pkey_index);
1273 return -EINVAL;
1274 }
1275 }
1276
1277 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1278 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1279 ibdev_err(&hr_dev->ib_dev,
1280 "invalid attr, max_rd_atomic = %u.\n",
1281 attr->max_rd_atomic);
1282 return -EINVAL;
1283 }
1284
1285 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1286 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1287 ibdev_err(&hr_dev->ib_dev,
1288 "invalid attr, max_dest_rd_atomic = %u.\n",
1289 attr->max_dest_rd_atomic);
1290 return -EINVAL;
1291 }
1292
1293 if (attr_mask & IB_QP_PATH_MTU)
1294 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1295
1296 return 0;
1297 }
1298
1299 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1300 int attr_mask, struct ib_udata *udata)
1301 {
1302 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1303 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1304 enum ib_qp_state cur_state, new_state;
1305 int ret = -EINVAL;
1306
1307 mutex_lock(&hr_qp->mutex);
1308
1309 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
1310 goto out;
1311
1312 cur_state = hr_qp->state;
1313 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1314
1315 if (ibqp->uobject &&
1316 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1317 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1318 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1319
1320 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1321 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1322 } else {
1323 ibdev_warn(&hr_dev->ib_dev,
1324 "flush cqe is not supported in userspace!\n");
1325 goto out;
1326 }
1327 }
1328
1329 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1330 attr_mask)) {
1331 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1332 goto out;
1333 }
1334
1335 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1336 if (ret)
1337 goto out;
1338
1339 if (cur_state == new_state && cur_state == IB_QPS_RESET)
1340 goto out;
1341
1342 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1343 new_state);
1344
1345 out:
1346 mutex_unlock(&hr_qp->mutex);
1347
1348 return ret;
1349 }
1350
1351 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1352 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1353 {
1354 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1355 __acquire(&send_cq->lock);
1356 __acquire(&recv_cq->lock);
1357 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1358 spin_lock_irq(&send_cq->lock);
1359 __acquire(&recv_cq->lock);
1360 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1361 spin_lock_irq(&recv_cq->lock);
1362 __acquire(&send_cq->lock);
1363 } else if (send_cq == recv_cq) {
1364 spin_lock_irq(&send_cq->lock);
1365 __acquire(&recv_cq->lock);
1366 } else if (send_cq->cqn < recv_cq->cqn) {
1367 spin_lock_irq(&send_cq->lock);
1368 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1369 } else {
1370 spin_lock_irq(&recv_cq->lock);
1371 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1372 }
1373 }
1374
1375 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1376 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1377 __releases(&recv_cq->lock)
1378 {
1379 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1380 __release(&recv_cq->lock);
1381 __release(&send_cq->lock);
1382 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1383 __release(&recv_cq->lock);
1384 spin_unlock(&send_cq->lock);
1385 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1386 __release(&send_cq->lock);
1387 spin_unlock(&recv_cq->lock);
1388 } else if (send_cq == recv_cq) {
1389 __release(&recv_cq->lock);
1390 spin_unlock_irq(&send_cq->lock);
1391 } else if (send_cq->cqn < recv_cq->cqn) {
1392 spin_unlock(&recv_cq->lock);
1393 spin_unlock_irq(&send_cq->lock);
1394 } else {
1395 spin_unlock(&send_cq->lock);
1396 spin_unlock_irq(&recv_cq->lock);
1397 }
1398 }
1399
1400 static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
1401 {
1402 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1403 }
1404
1405 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1406 {
1407 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1408 }
1409
1410 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1411 {
1412 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1413 }
1414
1415 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n)
1416 {
1417 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1418 }
1419
1420 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1421 struct ib_cq *ib_cq)
1422 {
1423 struct hns_roce_cq *hr_cq;
1424 u32 cur;
1425
1426 cur = hr_wq->head - hr_wq->tail;
1427 if (likely(cur + nreq < hr_wq->wqe_cnt))
1428 return false;
1429
1430 hr_cq = to_hr_cq(ib_cq);
1431 spin_lock(&hr_cq->lock);
1432 cur = hr_wq->head - hr_wq->tail;
1433 spin_unlock(&hr_cq->lock);
1434
1435 return cur + nreq >= hr_wq->wqe_cnt;
1436 }
1437
1438 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1439 {
1440 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1441 unsigned int reserved_from_bot;
1442 unsigned int i;
1443
1444 qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps,
1445 sizeof(u32), GFP_KERNEL);
1446 if (!qp_table->idx_table.spare_idx)
1447 return -ENOMEM;
1448
1449 mutex_init(&qp_table->scc_mutex);
1450 mutex_init(&qp_table->bank_mutex);
1451 xa_init(&hr_dev->qp_table_xa);
1452
1453 reserved_from_bot = hr_dev->caps.reserved_qps;
1454
1455 for (i = 0; i < reserved_from_bot; i++) {
1456 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++;
1457 hr_dev->qp_table.bank[get_qp_bankid(i)].min++;
1458 }
1459
1460 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
1461 ida_init(&hr_dev->qp_table.bank[i].ida);
1462 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps /
1463 HNS_ROCE_QP_BANK_NUM - 1;
1464 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
1465 }
1466
1467 return 0;
1468 }
1469
1470 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1471 {
1472 int i;
1473
1474 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
1475 ida_destroy(&hr_dev->qp_table.bank[i].ida);
1476 kfree(hr_dev->qp_table.idx_table.spare_idx);
1477 }