0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/acpi.h>
0034 #include <linux/etherdevice.h>
0035 #include <linux/interrupt.h>
0036 #include <linux/iopoll.h>
0037 #include <linux/kernel.h>
0038 #include <linux/types.h>
0039 #include <net/addrconf.h>
0040 #include <rdma/ib_addr.h>
0041 #include <rdma/ib_cache.h>
0042 #include <rdma/ib_umem.h>
0043 #include <rdma/uverbs_ioctl.h>
0044
0045 #include "hnae3.h"
0046 #include "hns_roce_common.h"
0047 #include "hns_roce_device.h"
0048 #include "hns_roce_cmd.h"
0049 #include "hns_roce_hem.h"
0050 #include "hns_roce_hw_v2.h"
0051
0052 enum {
0053 CMD_RST_PRC_OTHERS,
0054 CMD_RST_PRC_SUCCESS,
0055 CMD_RST_PRC_EBUSY,
0056 };
0057
0058 enum ecc_resource_type {
0059 ECC_RESOURCE_QPC,
0060 ECC_RESOURCE_CQC,
0061 ECC_RESOURCE_MPT,
0062 ECC_RESOURCE_SRQC,
0063 ECC_RESOURCE_GMV,
0064 ECC_RESOURCE_QPC_TIMER,
0065 ECC_RESOURCE_CQC_TIMER,
0066 ECC_RESOURCE_SCCC,
0067 ECC_RESOURCE_COUNT,
0068 };
0069
0070 static const struct {
0071 const char *name;
0072 u8 read_bt0_op;
0073 u8 write_bt0_op;
0074 } fmea_ram_res[] = {
0075 { "ECC_RESOURCE_QPC",
0076 HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
0077 { "ECC_RESOURCE_CQC",
0078 HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
0079 { "ECC_RESOURCE_MPT",
0080 HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
0081 { "ECC_RESOURCE_SRQC",
0082 HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
0083
0084 { "ECC_RESOURCE_GMV",
0085 0, 0 },
0086 { "ECC_RESOURCE_QPC_TIMER",
0087 HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
0088 { "ECC_RESOURCE_CQC_TIMER",
0089 HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
0090 { "ECC_RESOURCE_SCCC",
0091 HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
0092 };
0093
0094 static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
0095 struct ib_sge *sg)
0096 {
0097 dseg->lkey = cpu_to_le32(sg->lkey);
0098 dseg->addr = cpu_to_le64(sg->addr);
0099 dseg->len = cpu_to_le32(sg->length);
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109 #define HR_OPC_MAP(ib_key, hr_key) \
0110 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
0111
0112 static const u32 hns_roce_op_code[] = {
0113 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
0114 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
0115 HR_OPC_MAP(SEND, SEND),
0116 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
0117 HR_OPC_MAP(RDMA_READ, RDMA_READ),
0118 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
0119 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
0120 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
0121 HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
0122 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
0123 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
0124 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
0125 };
0126
0127 static u32 to_hr_opcode(u32 ib_opcode)
0128 {
0129 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
0130 return HNS_ROCE_V2_WQE_OP_MASK;
0131
0132 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
0133 HNS_ROCE_V2_WQE_OP_MASK;
0134 }
0135
0136 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
0137 const struct ib_reg_wr *wr)
0138 {
0139 struct hns_roce_wqe_frmr_seg *fseg =
0140 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
0141 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
0142 u64 pbl_ba;
0143
0144
0145 hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
0146 hr_reg_write_bool(fseg, FRMR_ATOMIC,
0147 wr->access & IB_ACCESS_REMOTE_ATOMIC);
0148 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
0149 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
0150 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
0151
0152
0153 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
0154 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
0155 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
0156
0157 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
0158 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
0159 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
0160 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
0161
0162 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
0163 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
0164 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
0165 hr_reg_clear(fseg, FRMR_BLK_MODE);
0166 }
0167
0168 static void set_atomic_seg(const struct ib_send_wr *wr,
0169 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
0170 unsigned int valid_num_sge)
0171 {
0172 struct hns_roce_v2_wqe_data_seg *dseg =
0173 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
0174 struct hns_roce_wqe_atomic_seg *aseg =
0175 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
0176
0177 set_data_seg_v2(dseg, wr->sg_list);
0178
0179 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
0180 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
0181 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
0182 } else {
0183 aseg->fetchadd_swap_data =
0184 cpu_to_le64(atomic_wr(wr)->compare_add);
0185 aseg->cmp_data = 0;
0186 }
0187
0188 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
0189 }
0190
0191 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
0192 const struct ib_send_wr *wr,
0193 unsigned int *sge_idx, u32 msg_len)
0194 {
0195 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
0196 unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
0197 unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
0198 unsigned int left_len_in_pg;
0199 unsigned int idx = *sge_idx;
0200 unsigned int i = 0;
0201 unsigned int len;
0202 void *addr;
0203 void *dseg;
0204
0205 if (msg_len > ext_sge_sz) {
0206 ibdev_err(ibdev,
0207 "no enough extended sge space for inline data.\n");
0208 return -EINVAL;
0209 }
0210
0211 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
0212 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
0213 len = wr->sg_list[0].length;
0214 addr = (void *)(unsigned long)(wr->sg_list[0].addr);
0215
0216
0217
0218
0219
0220
0221 while (1) {
0222 if (len <= left_len_in_pg) {
0223 memcpy(dseg, addr, len);
0224
0225 idx += len / dseg_len;
0226
0227 i++;
0228 if (i >= wr->num_sge)
0229 break;
0230
0231 left_len_in_pg -= len;
0232 len = wr->sg_list[i].length;
0233 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
0234 dseg += len;
0235 } else {
0236 memcpy(dseg, addr, left_len_in_pg);
0237
0238 len -= left_len_in_pg;
0239 addr += left_len_in_pg;
0240 idx += left_len_in_pg / dseg_len;
0241 dseg = hns_roce_get_extend_sge(qp,
0242 idx & (qp->sge.sge_cnt - 1));
0243 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
0244 }
0245 }
0246
0247 *sge_idx = idx;
0248
0249 return 0;
0250 }
0251
0252 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
0253 unsigned int *sge_ind, unsigned int cnt)
0254 {
0255 struct hns_roce_v2_wqe_data_seg *dseg;
0256 unsigned int idx = *sge_ind;
0257
0258 while (cnt > 0) {
0259 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
0260 if (likely(sge->length)) {
0261 set_data_seg_v2(dseg, sge);
0262 idx++;
0263 cnt--;
0264 }
0265 sge++;
0266 }
0267
0268 *sge_ind = idx;
0269 }
0270
0271 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
0272 {
0273 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
0274 int mtu = ib_mtu_enum_to_int(qp->path_mtu);
0275
0276 if (len > qp->max_inline_data || len > mtu) {
0277 ibdev_err(&hr_dev->ib_dev,
0278 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
0279 len, qp->max_inline_data, mtu);
0280 return false;
0281 }
0282
0283 return true;
0284 }
0285
0286 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
0287 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
0288 unsigned int *sge_idx)
0289 {
0290 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
0291 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
0292 struct ib_device *ibdev = &hr_dev->ib_dev;
0293 unsigned int curr_idx = *sge_idx;
0294 void *dseg = rc_sq_wqe;
0295 unsigned int i;
0296 int ret;
0297
0298 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
0299 ibdev_err(ibdev, "invalid inline parameters!\n");
0300 return -EINVAL;
0301 }
0302
0303 if (!check_inl_data_len(qp, msg_len))
0304 return -EINVAL;
0305
0306 dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
0307
0308 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
0309 hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
0310
0311 for (i = 0; i < wr->num_sge; i++) {
0312 memcpy(dseg, ((void *)wr->sg_list[i].addr),
0313 wr->sg_list[i].length);
0314 dseg += wr->sg_list[i].length;
0315 }
0316 } else {
0317 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
0318
0319 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
0320 if (ret)
0321 return ret;
0322
0323 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx);
0324 }
0325
0326 *sge_idx = curr_idx;
0327
0328 return 0;
0329 }
0330
0331 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
0332 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
0333 unsigned int *sge_ind,
0334 unsigned int valid_num_sge)
0335 {
0336 struct hns_roce_v2_wqe_data_seg *dseg =
0337 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
0338 struct hns_roce_qp *qp = to_hr_qp(ibqp);
0339 int j = 0;
0340 int i;
0341
0342 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
0343 (*sge_ind) & (qp->sge.sge_cnt - 1));
0344
0345 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
0346 !!(wr->send_flags & IB_SEND_INLINE));
0347 if (wr->send_flags & IB_SEND_INLINE)
0348 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
0349
0350 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
0351 for (i = 0; i < wr->num_sge; i++) {
0352 if (likely(wr->sg_list[i].length)) {
0353 set_data_seg_v2(dseg, wr->sg_list + i);
0354 dseg++;
0355 }
0356 }
0357 } else {
0358 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
0359 if (likely(wr->sg_list[i].length)) {
0360 set_data_seg_v2(dseg, wr->sg_list + i);
0361 dseg++;
0362 j++;
0363 }
0364 }
0365
0366 set_extend_sge(qp, wr->sg_list + i, sge_ind,
0367 valid_num_sge - HNS_ROCE_SGE_IN_WQE);
0368 }
0369
0370 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
0371
0372 return 0;
0373 }
0374
0375 static int check_send_valid(struct hns_roce_dev *hr_dev,
0376 struct hns_roce_qp *hr_qp)
0377 {
0378 struct ib_device *ibdev = &hr_dev->ib_dev;
0379 struct ib_qp *ibqp = &hr_qp->ibqp;
0380
0381 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
0382 ibqp->qp_type != IB_QPT_GSI &&
0383 ibqp->qp_type != IB_QPT_UD)) {
0384 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
0385 ibqp->qp_type);
0386 return -EOPNOTSUPP;
0387 } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
0388 hr_qp->state == IB_QPS_INIT ||
0389 hr_qp->state == IB_QPS_RTR)) {
0390 ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
0391 hr_qp->state);
0392 return -EINVAL;
0393 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
0394 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
0395 hr_dev->state);
0396 return -EIO;
0397 }
0398
0399 return 0;
0400 }
0401
0402 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
0403 unsigned int *sge_len)
0404 {
0405 unsigned int valid_num = 0;
0406 unsigned int len = 0;
0407 int i;
0408
0409 for (i = 0; i < wr->num_sge; i++) {
0410 if (likely(wr->sg_list[i].length)) {
0411 len += wr->sg_list[i].length;
0412 valid_num++;
0413 }
0414 }
0415
0416 *sge_len = len;
0417 return valid_num;
0418 }
0419
0420 static __le32 get_immtdata(const struct ib_send_wr *wr)
0421 {
0422 switch (wr->opcode) {
0423 case IB_WR_SEND_WITH_IMM:
0424 case IB_WR_RDMA_WRITE_WITH_IMM:
0425 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
0426 default:
0427 return 0;
0428 }
0429 }
0430
0431 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
0432 const struct ib_send_wr *wr)
0433 {
0434 u32 ib_op = wr->opcode;
0435
0436 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
0437 return -EINVAL;
0438
0439 ud_sq_wqe->immtdata = get_immtdata(wr);
0440
0441 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
0442
0443 return 0;
0444 }
0445
0446 static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
0447 struct hns_roce_ah *ah)
0448 {
0449 struct ib_device *ib_dev = ah->ibah.device;
0450 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
0451
0452 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport);
0453 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
0454 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
0455 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
0456
0457 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
0458 return -EINVAL;
0459
0460 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
0461
0462 ud_sq_wqe->sgid_index = ah->av.gid_index;
0463
0464 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
0465 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
0466
0467 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
0468 return 0;
0469
0470 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en);
0471 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id);
0472
0473 return 0;
0474 }
0475
0476 static inline int set_ud_wqe(struct hns_roce_qp *qp,
0477 const struct ib_send_wr *wr,
0478 void *wqe, unsigned int *sge_idx,
0479 unsigned int owner_bit)
0480 {
0481 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
0482 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
0483 unsigned int curr_idx = *sge_idx;
0484 unsigned int valid_num_sge;
0485 u32 msg_len = 0;
0486 int ret;
0487
0488 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
0489
0490 ret = set_ud_opcode(ud_sq_wqe, wr);
0491 if (WARN_ON(ret))
0492 return ret;
0493
0494 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
0495
0496 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE,
0497 !!(wr->send_flags & IB_SEND_SIGNALED));
0498 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE,
0499 !!(wr->send_flags & IB_SEND_SOLICITED));
0500
0501 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn);
0502 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge);
0503 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX,
0504 curr_idx & (qp->sge.sge_cnt - 1));
0505
0506 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
0507 qp->qkey : ud_wr(wr)->remote_qkey);
0508 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn);
0509
0510 ret = fill_ud_av(ud_sq_wqe, ah);
0511 if (ret)
0512 return ret;
0513
0514 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
0515
0516 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
0517
0518
0519
0520
0521
0522
0523
0524 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
0525 dma_wmb();
0526
0527 *sge_idx = curr_idx;
0528 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit);
0529
0530 return 0;
0531 }
0532
0533 static int set_rc_opcode(struct hns_roce_dev *hr_dev,
0534 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
0535 const struct ib_send_wr *wr)
0536 {
0537 u32 ib_op = wr->opcode;
0538 int ret = 0;
0539
0540 rc_sq_wqe->immtdata = get_immtdata(wr);
0541
0542 switch (ib_op) {
0543 case IB_WR_RDMA_READ:
0544 case IB_WR_RDMA_WRITE:
0545 case IB_WR_RDMA_WRITE_WITH_IMM:
0546 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
0547 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
0548 break;
0549 case IB_WR_SEND:
0550 case IB_WR_SEND_WITH_IMM:
0551 break;
0552 case IB_WR_ATOMIC_CMP_AND_SWP:
0553 case IB_WR_ATOMIC_FETCH_AND_ADD:
0554 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
0555 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
0556 break;
0557 case IB_WR_REG_MR:
0558 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
0559 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
0560 else
0561 ret = -EOPNOTSUPP;
0562 break;
0563 case IB_WR_LOCAL_INV:
0564 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_SO);
0565 fallthrough;
0566 case IB_WR_SEND_WITH_INV:
0567 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
0568 break;
0569 default:
0570 ret = -EINVAL;
0571 }
0572
0573 if (unlikely(ret))
0574 return ret;
0575
0576 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
0577
0578 return ret;
0579 }
0580
0581 static inline int set_rc_wqe(struct hns_roce_qp *qp,
0582 const struct ib_send_wr *wr,
0583 void *wqe, unsigned int *sge_idx,
0584 unsigned int owner_bit)
0585 {
0586 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
0587 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
0588 unsigned int curr_idx = *sge_idx;
0589 unsigned int valid_num_sge;
0590 u32 msg_len = 0;
0591 int ret;
0592
0593 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
0594
0595 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
0596
0597 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
0598 if (WARN_ON(ret))
0599 return ret;
0600
0601 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE,
0602 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
0603
0604 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE,
0605 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
0606
0607 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
0608 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
0609
0610 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
0611 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
0612 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
0613 else if (wr->opcode != IB_WR_REG_MR)
0614 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
0615 &curr_idx, valid_num_sge);
0616
0617
0618
0619
0620
0621
0622
0623 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
0624 dma_wmb();
0625
0626 *sge_idx = curr_idx;
0627 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit);
0628
0629 return ret;
0630 }
0631
0632 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
0633 struct hns_roce_qp *qp)
0634 {
0635 if (unlikely(qp->state == IB_QPS_ERR)) {
0636 flush_cqe(hr_dev, qp);
0637 } else {
0638 struct hns_roce_v2_db sq_db = {};
0639
0640 hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn);
0641 hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
0642 hr_reg_write(&sq_db, DB_PI, qp->sq.head);
0643 hr_reg_write(&sq_db, DB_SL, qp->sl);
0644
0645 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
0646 }
0647 }
0648
0649 static inline void update_rq_db(struct hns_roce_dev *hr_dev,
0650 struct hns_roce_qp *qp)
0651 {
0652 if (unlikely(qp->state == IB_QPS_ERR)) {
0653 flush_cqe(hr_dev, qp);
0654 } else {
0655 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
0656 *qp->rdb.db_record =
0657 qp->rq.head & V2_DB_PRODUCER_IDX_M;
0658 } else {
0659 struct hns_roce_v2_db rq_db = {};
0660
0661 hr_reg_write(&rq_db, DB_TAG, qp->qpn);
0662 hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
0663 hr_reg_write(&rq_db, DB_PI, qp->rq.head);
0664
0665 hns_roce_write64(hr_dev, (__le32 *)&rq_db,
0666 qp->rq.db_reg);
0667 }
0668 }
0669 }
0670
0671 static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
0672 u64 __iomem *dest)
0673 {
0674 #define HNS_ROCE_WRITE_TIMES 8
0675 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
0676 struct hnae3_handle *handle = priv->handle;
0677 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
0678 int i;
0679
0680 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
0681 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
0682 writeq_relaxed(*(val + i), dest + i);
0683 }
0684
0685 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
0686 void *wqe)
0687 {
0688 #define HNS_ROCE_SL_SHIFT 2
0689 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
0690
0691
0692 hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
0693 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
0694 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H,
0695 qp->sl >> HNS_ROCE_SL_SHIFT);
0696 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
0697
0698 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
0699 }
0700
0701 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
0702 const struct ib_send_wr *wr,
0703 const struct ib_send_wr **bad_wr)
0704 {
0705 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
0706 struct ib_device *ibdev = &hr_dev->ib_dev;
0707 struct hns_roce_qp *qp = to_hr_qp(ibqp);
0708 unsigned long flags = 0;
0709 unsigned int owner_bit;
0710 unsigned int sge_idx;
0711 unsigned int wqe_idx;
0712 void *wqe = NULL;
0713 u32 nreq;
0714 int ret;
0715
0716 spin_lock_irqsave(&qp->sq.lock, flags);
0717
0718 ret = check_send_valid(hr_dev, qp);
0719 if (unlikely(ret)) {
0720 *bad_wr = wr;
0721 nreq = 0;
0722 goto out;
0723 }
0724
0725 sge_idx = qp->next_sge;
0726
0727 for (nreq = 0; wr; ++nreq, wr = wr->next) {
0728 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
0729 ret = -ENOMEM;
0730 *bad_wr = wr;
0731 goto out;
0732 }
0733
0734 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
0735
0736 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
0737 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
0738 wr->num_sge, qp->sq.max_gs);
0739 ret = -EINVAL;
0740 *bad_wr = wr;
0741 goto out;
0742 }
0743
0744 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
0745 qp->sq.wrid[wqe_idx] = wr->wr_id;
0746 owner_bit =
0747 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
0748
0749
0750 if (ibqp->qp_type == IB_QPT_RC)
0751 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
0752 else
0753 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
0754
0755 if (unlikely(ret)) {
0756 *bad_wr = wr;
0757 goto out;
0758 }
0759 }
0760
0761 out:
0762 if (likely(nreq)) {
0763 qp->sq.head += nreq;
0764 qp->next_sge = sge_idx;
0765
0766 if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
0767 write_dwqe(hr_dev, qp, wqe);
0768 else
0769 update_sq_db(hr_dev, qp);
0770 }
0771
0772 spin_unlock_irqrestore(&qp->sq.lock, flags);
0773
0774 return ret;
0775 }
0776
0777 static int check_recv_valid(struct hns_roce_dev *hr_dev,
0778 struct hns_roce_qp *hr_qp)
0779 {
0780 struct ib_device *ibdev = &hr_dev->ib_dev;
0781 struct ib_qp *ibqp = &hr_qp->ibqp;
0782
0783 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
0784 ibqp->qp_type != IB_QPT_GSI &&
0785 ibqp->qp_type != IB_QPT_UD)) {
0786 ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
0787 ibqp->qp_type);
0788 return -EOPNOTSUPP;
0789 }
0790
0791 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
0792 return -EIO;
0793
0794 if (hr_qp->state == IB_QPS_RESET)
0795 return -EINVAL;
0796
0797 return 0;
0798 }
0799
0800 static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
0801 u32 max_sge, bool rsv)
0802 {
0803 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
0804 u32 i, cnt;
0805
0806 for (i = 0, cnt = 0; i < wr->num_sge; i++) {
0807
0808 if (!wr->sg_list[i].length)
0809 continue;
0810 set_data_seg_v2(dseg + cnt, wr->sg_list + i);
0811 cnt++;
0812 }
0813
0814
0815 if (rsv) {
0816 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
0817 dseg[cnt].addr = 0;
0818 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
0819 } else {
0820
0821 if (cnt < max_sge)
0822 memset(dseg + cnt, 0,
0823 (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
0824 }
0825 }
0826
0827 static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
0828 u32 wqe_idx, u32 max_sge)
0829 {
0830 struct hns_roce_rinl_sge *sge_list;
0831 void *wqe = NULL;
0832 u32 i;
0833
0834 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
0835 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
0836
0837
0838 if (hr_qp->rq_inl_buf.wqe_cnt) {
0839 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
0840 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
0841 for (i = 0; i < wr->num_sge; i++) {
0842 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
0843 sge_list[i].len = wr->sg_list[i].length;
0844 }
0845 }
0846 }
0847
0848 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
0849 const struct ib_recv_wr *wr,
0850 const struct ib_recv_wr **bad_wr)
0851 {
0852 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
0853 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
0854 struct ib_device *ibdev = &hr_dev->ib_dev;
0855 u32 wqe_idx, nreq, max_sge;
0856 unsigned long flags;
0857 int ret;
0858
0859 spin_lock_irqsave(&hr_qp->rq.lock, flags);
0860
0861 ret = check_recv_valid(hr_dev, hr_qp);
0862 if (unlikely(ret)) {
0863 *bad_wr = wr;
0864 nreq = 0;
0865 goto out;
0866 }
0867
0868 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
0869 for (nreq = 0; wr; ++nreq, wr = wr->next) {
0870 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
0871 hr_qp->ibqp.recv_cq))) {
0872 ret = -ENOMEM;
0873 *bad_wr = wr;
0874 goto out;
0875 }
0876
0877 if (unlikely(wr->num_sge > max_sge)) {
0878 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
0879 wr->num_sge, max_sge);
0880 ret = -EINVAL;
0881 *bad_wr = wr;
0882 goto out;
0883 }
0884
0885 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
0886 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
0887 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
0888 }
0889
0890 out:
0891 if (likely(nreq)) {
0892 hr_qp->rq.head += nreq;
0893
0894 update_rq_db(hr_dev, hr_qp);
0895 }
0896 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
0897
0898 return ret;
0899 }
0900
0901 static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
0902 {
0903 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
0904 }
0905
0906 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
0907 {
0908 return hns_roce_buf_offset(idx_que->mtr.kmem,
0909 n << idx_que->entry_shift);
0910 }
0911
0912 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
0913 {
0914
0915 spin_lock(&srq->lock);
0916
0917 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
0918 srq->idx_que.tail++;
0919
0920 spin_unlock(&srq->lock);
0921 }
0922
0923 static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
0924 {
0925 struct hns_roce_idx_que *idx_que = &srq->idx_que;
0926
0927 return idx_que->head - idx_que->tail >= srq->wqe_cnt;
0928 }
0929
0930 static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
0931 const struct ib_recv_wr *wr)
0932 {
0933 struct ib_device *ib_dev = srq->ibsrq.device;
0934
0935 if (unlikely(wr->num_sge > max_sge)) {
0936 ibdev_err(ib_dev,
0937 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
0938 wr->num_sge, max_sge);
0939 return -EINVAL;
0940 }
0941
0942 if (unlikely(hns_roce_srqwq_overflow(srq))) {
0943 ibdev_err(ib_dev,
0944 "failed to check srqwq status, srqwq is full.\n");
0945 return -ENOMEM;
0946 }
0947
0948 return 0;
0949 }
0950
0951 static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
0952 {
0953 struct hns_roce_idx_que *idx_que = &srq->idx_que;
0954 u32 pos;
0955
0956 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
0957 if (unlikely(pos == srq->wqe_cnt))
0958 return -ENOSPC;
0959
0960 bitmap_set(idx_que->bitmap, pos, 1);
0961 *wqe_idx = pos;
0962 return 0;
0963 }
0964
0965 static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
0966 {
0967 struct hns_roce_idx_que *idx_que = &srq->idx_que;
0968 unsigned int head;
0969 __le32 *buf;
0970
0971 head = idx_que->head & (srq->wqe_cnt - 1);
0972
0973 buf = get_idx_buf(idx_que, head);
0974 *buf = cpu_to_le32(wqe_idx);
0975
0976 idx_que->head++;
0977 }
0978
0979 static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
0980 {
0981 hr_reg_write(db, DB_TAG, srq->srqn);
0982 hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
0983 hr_reg_write(db, DB_PI, srq->idx_que.head);
0984 }
0985
0986 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
0987 const struct ib_recv_wr *wr,
0988 const struct ib_recv_wr **bad_wr)
0989 {
0990 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
0991 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
0992 struct hns_roce_v2_db srq_db;
0993 unsigned long flags;
0994 int ret = 0;
0995 u32 max_sge;
0996 u32 wqe_idx;
0997 void *wqe;
0998 u32 nreq;
0999
1000 spin_lock_irqsave(&srq->lock, flags);
1001
1002 max_sge = srq->max_gs - srq->rsv_sge;
1003 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1004 ret = check_post_srq_valid(srq, max_sge, wr);
1005 if (ret) {
1006 *bad_wr = wr;
1007 break;
1008 }
1009
1010 ret = get_srq_wqe_idx(srq, &wqe_idx);
1011 if (unlikely(ret)) {
1012 *bad_wr = wr;
1013 break;
1014 }
1015
1016 wqe = get_srq_wqe_buf(srq, wqe_idx);
1017 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
1018 fill_wqe_idx(srq, wqe_idx);
1019 srq->wrid[wqe_idx] = wr->wr_id;
1020 }
1021
1022 if (likely(nreq)) {
1023 update_srq_db(&srq_db, srq);
1024
1025 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
1026 }
1027
1028 spin_unlock_irqrestore(&srq->lock, flags);
1029
1030 return ret;
1031 }
1032
1033 static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
1034 unsigned long instance_stage,
1035 unsigned long reset_stage)
1036 {
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 hr_dev->is_reset = true;
1047 hr_dev->dis_db = true;
1048
1049 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
1050 instance_stage == HNS_ROCE_STATE_INIT)
1051 return CMD_RST_PRC_EBUSY;
1052
1053 return CMD_RST_PRC_SUCCESS;
1054 }
1055
1056 static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
1057 unsigned long instance_stage,
1058 unsigned long reset_stage)
1059 {
1060 #define HW_RESET_TIMEOUT_US 1000000
1061 #define HW_RESET_SLEEP_US 1000
1062
1063 struct hns_roce_v2_priv *priv = hr_dev->priv;
1064 struct hnae3_handle *handle = priv->handle;
1065 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1066 unsigned long val;
1067 int ret;
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078 hr_dev->dis_db = true;
1079
1080 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
1081 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
1082 HW_RESET_TIMEOUT_US, false, handle);
1083 if (!ret)
1084 hr_dev->is_reset = true;
1085
1086 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1087 instance_stage == HNS_ROCE_STATE_INIT)
1088 return CMD_RST_PRC_EBUSY;
1089
1090 return CMD_RST_PRC_SUCCESS;
1091 }
1092
1093 static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1094 {
1095 struct hns_roce_v2_priv *priv = hr_dev->priv;
1096 struct hnae3_handle *handle = priv->handle;
1097 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1098
1099
1100
1101
1102
1103 hr_dev->dis_db = true;
1104 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1105 hr_dev->is_reset = true;
1106
1107 return CMD_RST_PRC_EBUSY;
1108 }
1109
1110 static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
1111 struct hnae3_handle *handle)
1112 {
1113 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1114 unsigned long instance_stage;
1115 unsigned long reset_stage;
1116 unsigned long reset_cnt;
1117 bool sw_resetting;
1118 bool hw_resetting;
1119
1120
1121
1122
1123
1124
1125
1126
1127 instance_stage = handle->rinfo.instance_state;
1128 reset_stage = handle->rinfo.reset_state;
1129 reset_cnt = ops->ae_dev_reset_cnt(handle);
1130 if (reset_cnt != hr_dev->reset_cnt)
1131 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1132 reset_stage);
1133
1134 hw_resetting = ops->get_cmdq_stat(handle);
1135 if (hw_resetting)
1136 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1137 reset_stage);
1138
1139 sw_resetting = ops->ae_dev_resetting(handle);
1140 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1141 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1142
1143 return CMD_RST_PRC_OTHERS;
1144 }
1145
1146 static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
1147 {
1148 struct hns_roce_v2_priv *priv = hr_dev->priv;
1149 struct hnae3_handle *handle = priv->handle;
1150 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1151
1152 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
1153 return true;
1154
1155 if (ops->get_hw_reset_stat(handle))
1156 return true;
1157
1158 if (ops->ae_dev_resetting(handle))
1159 return true;
1160
1161 return false;
1162 }
1163
1164 static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
1165 {
1166 struct hns_roce_v2_priv *priv = hr_dev->priv;
1167 u32 status;
1168
1169 if (hr_dev->is_reset)
1170 status = CMD_RST_PRC_SUCCESS;
1171 else
1172 status = check_aedev_reset_status(hr_dev, priv->handle);
1173
1174 *busy = (status == CMD_RST_PRC_EBUSY);
1175
1176 return status == CMD_RST_PRC_OTHERS;
1177 }
1178
1179 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1180 struct hns_roce_v2_cmq_ring *ring)
1181 {
1182 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1183
1184 ring->desc = dma_alloc_coherent(hr_dev->dev, size,
1185 &ring->desc_dma_addr, GFP_KERNEL);
1186 if (!ring->desc)
1187 return -ENOMEM;
1188
1189 return 0;
1190 }
1191
1192 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1193 struct hns_roce_v2_cmq_ring *ring)
1194 {
1195 dma_free_coherent(hr_dev->dev,
1196 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1197 ring->desc, ring->desc_dma_addr);
1198
1199 ring->desc_dma_addr = 0;
1200 }
1201
1202 static int init_csq(struct hns_roce_dev *hr_dev,
1203 struct hns_roce_v2_cmq_ring *csq)
1204 {
1205 dma_addr_t dma;
1206 int ret;
1207
1208 csq->desc_num = CMD_CSQ_DESC_NUM;
1209 spin_lock_init(&csq->lock);
1210 csq->flag = TYPE_CSQ;
1211 csq->head = 0;
1212
1213 ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
1214 if (ret)
1215 return ret;
1216
1217 dma = csq->desc_dma_addr;
1218 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
1219 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
1220 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1221 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1222
1223
1224 roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
1225 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
1226
1227 return 0;
1228 }
1229
1230 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1231 {
1232 struct hns_roce_v2_priv *priv = hr_dev->priv;
1233 int ret;
1234
1235 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1236
1237 ret = init_csq(hr_dev, &priv->cmq.csq);
1238 if (ret)
1239 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
1240
1241 return ret;
1242 }
1243
1244 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1245 {
1246 struct hns_roce_v2_priv *priv = hr_dev->priv;
1247
1248 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1249 }
1250
1251 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1252 enum hns_roce_opcode_type opcode,
1253 bool is_read)
1254 {
1255 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1256 desc->opcode = cpu_to_le16(opcode);
1257 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1258 if (is_read)
1259 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1260 else
1261 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1262 }
1263
1264 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1265 {
1266 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1267 struct hns_roce_v2_priv *priv = hr_dev->priv;
1268
1269 return tail == priv->cmq.csq.head;
1270 }
1271
1272 static void update_cmdq_status(struct hns_roce_dev *hr_dev)
1273 {
1274 struct hns_roce_v2_priv *priv = hr_dev->priv;
1275 struct hnae3_handle *handle = priv->handle;
1276
1277 if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
1278 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT)
1279 hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
1280 }
1281
1282 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1283 struct hns_roce_cmq_desc *desc, int num)
1284 {
1285 struct hns_roce_v2_priv *priv = hr_dev->priv;
1286 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1287 u32 timeout = 0;
1288 u16 desc_ret;
1289 u32 tail;
1290 int ret;
1291 int i;
1292
1293 spin_lock_bh(&csq->lock);
1294
1295 tail = csq->head;
1296
1297 for (i = 0; i < num; i++) {
1298 csq->desc[csq->head++] = desc[i];
1299 if (csq->head == csq->desc_num)
1300 csq->head = 0;
1301 }
1302
1303
1304 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
1305
1306 do {
1307 if (hns_roce_cmq_csq_done(hr_dev))
1308 break;
1309 udelay(1);
1310 } while (++timeout < priv->cmq.tx_timeout);
1311
1312 if (hns_roce_cmq_csq_done(hr_dev)) {
1313 ret = 0;
1314 for (i = 0; i < num; i++) {
1315
1316 desc[i] = csq->desc[tail++];
1317 if (tail == csq->desc_num)
1318 tail = 0;
1319
1320 desc_ret = le16_to_cpu(desc[i].retval);
1321 if (likely(desc_ret == CMD_EXEC_SUCCESS))
1322 continue;
1323
1324 dev_err_ratelimited(hr_dev->dev,
1325 "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
1326 desc->opcode, desc_ret);
1327 ret = -EIO;
1328 }
1329 } else {
1330
1331 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1332 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
1333 csq->head, tail);
1334 csq->head = tail;
1335
1336 update_cmdq_status(hr_dev);
1337
1338 ret = -EAGAIN;
1339 }
1340
1341 spin_unlock_bh(&csq->lock);
1342
1343 return ret;
1344 }
1345
1346 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1347 struct hns_roce_cmq_desc *desc, int num)
1348 {
1349 bool busy;
1350 int ret;
1351
1352 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
1353 return -EIO;
1354
1355 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1356 return busy ? -EBUSY : 0;
1357
1358 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1359 if (ret) {
1360 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1361 return busy ? -EBUSY : 0;
1362 }
1363
1364 return ret;
1365 }
1366
1367 static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
1368 dma_addr_t base_addr, u8 cmd, unsigned long tag)
1369 {
1370 struct hns_roce_cmd_mailbox *mbox;
1371 int ret;
1372
1373 mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1374 if (IS_ERR(mbox))
1375 return PTR_ERR(mbox);
1376
1377 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag);
1378 hns_roce_free_cmd_mailbox(hr_dev, mbox);
1379 return ret;
1380 }
1381
1382 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1383 {
1384 struct hns_roce_query_version *resp;
1385 struct hns_roce_cmq_desc desc;
1386 int ret;
1387
1388 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1389 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1390 if (ret)
1391 return ret;
1392
1393 resp = (struct hns_roce_query_version *)desc.data;
1394 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1395 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1396
1397 return 0;
1398 }
1399
1400 static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
1401 struct hnae3_handle *handle)
1402 {
1403 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1404 unsigned long end;
1405
1406 hr_dev->dis_db = true;
1407
1408 dev_warn(hr_dev->dev,
1409 "Func clear is pending, device in resetting state.\n");
1410 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1411 while (end) {
1412 if (!ops->get_hw_reset_stat(handle)) {
1413 hr_dev->is_reset = true;
1414 dev_info(hr_dev->dev,
1415 "Func clear success after reset.\n");
1416 return;
1417 }
1418 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1419 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1420 }
1421
1422 dev_warn(hr_dev->dev, "Func clear failed.\n");
1423 }
1424
1425 static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
1426 struct hnae3_handle *handle)
1427 {
1428 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1429 unsigned long end;
1430
1431 hr_dev->dis_db = true;
1432
1433 dev_warn(hr_dev->dev,
1434 "Func clear is pending, device in resetting state.\n");
1435 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1436 while (end) {
1437 if (ops->ae_dev_reset_cnt(handle) !=
1438 hr_dev->reset_cnt) {
1439 hr_dev->is_reset = true;
1440 dev_info(hr_dev->dev,
1441 "Func clear success after sw reset\n");
1442 return;
1443 }
1444 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1445 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1446 }
1447
1448 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1449 }
1450
1451 static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
1452 int flag)
1453 {
1454 struct hns_roce_v2_priv *priv = hr_dev->priv;
1455 struct hnae3_handle *handle = priv->handle;
1456 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1457
1458 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
1459 hr_dev->dis_db = true;
1460 hr_dev->is_reset = true;
1461 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1462 return;
1463 }
1464
1465 if (ops->get_hw_reset_stat(handle)) {
1466 func_clr_hw_resetting_state(hr_dev, handle);
1467 return;
1468 }
1469
1470 if (ops->ae_dev_resetting(handle) &&
1471 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
1472 func_clr_sw_resetting_state(hr_dev, handle);
1473 return;
1474 }
1475
1476 if (retval && !flag)
1477 dev_warn(hr_dev->dev,
1478 "Func clear read failed, ret = %d.\n", retval);
1479
1480 dev_warn(hr_dev->dev, "Func clear failed.\n");
1481 }
1482
1483 static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
1484 {
1485 bool fclr_write_fail_flag = false;
1486 struct hns_roce_func_clear *resp;
1487 struct hns_roce_cmq_desc desc;
1488 unsigned long end;
1489 int ret = 0;
1490
1491 if (check_device_is_in_reset(hr_dev))
1492 goto out;
1493
1494 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1495 resp = (struct hns_roce_func_clear *)desc.data;
1496 resp->rst_funcid_en = cpu_to_le32(vf_id);
1497
1498 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1499 if (ret) {
1500 fclr_write_fail_flag = true;
1501 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1502 ret);
1503 goto out;
1504 }
1505
1506 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1507 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1508 while (end) {
1509 if (check_device_is_in_reset(hr_dev))
1510 goto out;
1511 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1512 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1513
1514 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1515 true);
1516
1517 resp->rst_funcid_en = cpu_to_le32(vf_id);
1518 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1519 if (ret)
1520 continue;
1521
1522 if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) {
1523 if (vf_id == 0)
1524 hr_dev->is_reset = true;
1525 return;
1526 }
1527 }
1528
1529 out:
1530 hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
1531 }
1532
1533 static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1534 {
1535 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1536 struct hns_roce_cmq_desc desc[2];
1537 struct hns_roce_cmq_req *req_a;
1538
1539 req_a = (struct hns_roce_cmq_req *)desc[0].data;
1540 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1541 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1542 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1543 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
1544
1545 return hns_roce_cmq_send(hr_dev, desc, 2);
1546 }
1547
1548 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1549 {
1550 int ret;
1551 int i;
1552
1553 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
1554 return;
1555
1556 for (i = hr_dev->func_num - 1; i >= 0; i--) {
1557 __hns_roce_function_clear(hr_dev, i);
1558
1559 if (i == 0)
1560 continue;
1561
1562 ret = hns_roce_free_vf_resource(hr_dev, i);
1563 if (ret)
1564 ibdev_err(&hr_dev->ib_dev,
1565 "failed to free vf resource, vf_id = %d, ret = %d.\n",
1566 i, ret);
1567 }
1568 }
1569
1570 static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
1571 {
1572 struct hns_roce_cmq_desc desc;
1573 int ret;
1574
1575 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
1576 false);
1577 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1578 if (ret)
1579 ibdev_err(&hr_dev->ib_dev,
1580 "failed to clear extended doorbell info, ret = %d.\n",
1581 ret);
1582
1583 return ret;
1584 }
1585
1586 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1587 {
1588 struct hns_roce_query_fw_info *resp;
1589 struct hns_roce_cmq_desc desc;
1590 int ret;
1591
1592 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1593 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1594 if (ret)
1595 return ret;
1596
1597 resp = (struct hns_roce_query_fw_info *)desc.data;
1598 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1599
1600 return 0;
1601 }
1602
1603 static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
1604 {
1605 struct hns_roce_cmq_desc desc;
1606 int ret;
1607
1608 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
1609 hr_dev->func_num = 1;
1610 return 0;
1611 }
1612
1613 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
1614 true);
1615 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1616 if (ret) {
1617 hr_dev->func_num = 1;
1618 return ret;
1619 }
1620
1621 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
1622 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
1623
1624 return 0;
1625 }
1626
1627 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1628 {
1629 struct hns_roce_cmq_desc desc;
1630 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1631 u32 clock_cycles_of_1us;
1632
1633 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1634 false);
1635
1636 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
1637 clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
1638 else
1639 clock_cycles_of_1us = HNS_ROCE_1US_CFG;
1640
1641 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
1642 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
1643
1644 return hns_roce_cmq_send(hr_dev, &desc, 1);
1645 }
1646
1647 static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1648 {
1649 struct hns_roce_cmq_desc desc[2];
1650 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1651 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1652 struct hns_roce_caps *caps = &hr_dev->caps;
1653 enum hns_roce_opcode_type opcode;
1654 u32 func_num;
1655 int ret;
1656
1657 if (is_vf) {
1658 opcode = HNS_ROCE_OPC_QUERY_VF_RES;
1659 func_num = 1;
1660 } else {
1661 opcode = HNS_ROCE_OPC_QUERY_PF_RES;
1662 func_num = hr_dev->func_num;
1663 }
1664
1665 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
1666 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1667 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
1668
1669 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1670 if (ret)
1671 return ret;
1672
1673 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
1674 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
1675 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
1676 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
1677 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
1678 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
1679 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
1680 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
1681
1682 if (is_vf) {
1683 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
1684 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
1685 func_num;
1686 } else {
1687 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
1688 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
1689 func_num;
1690 }
1691
1692 return 0;
1693 }
1694
1695 static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1696 {
1697 struct hns_roce_cmq_desc desc;
1698 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1699 struct hns_roce_caps *caps = &hr_dev->caps;
1700 u32 func_num, qp_num;
1701 int ret;
1702
1703 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
1704 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1705 if (ret)
1706 return ret;
1707
1708 func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
1709 qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
1710 caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1711
1712 qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
1713 caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1714
1715 return 0;
1716 }
1717
1718 static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
1719 {
1720 struct hns_roce_cmq_desc desc;
1721 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1722 struct hns_roce_caps *caps = &hr_dev->caps;
1723 int ret;
1724
1725 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1726 true);
1727
1728 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1729 if (ret)
1730 return ret;
1731
1732 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
1733 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
1734
1735 return 0;
1736 }
1737
1738 static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1739 {
1740 struct device *dev = hr_dev->dev;
1741 int ret;
1742
1743 ret = load_func_res_caps(hr_dev, is_vf);
1744 if (ret) {
1745 dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
1746 is_vf ? "vf" : "pf");
1747 return ret;
1748 }
1749
1750 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1751 ret = load_ext_cfg_caps(hr_dev, is_vf);
1752 if (ret)
1753 dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
1754 ret, is_vf ? "vf" : "pf");
1755 }
1756
1757 return ret;
1758 }
1759
1760 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1761 {
1762 struct device *dev = hr_dev->dev;
1763 int ret;
1764
1765 ret = query_func_resource_caps(hr_dev, false);
1766 if (ret)
1767 return ret;
1768
1769 ret = load_pf_timer_res_caps(hr_dev);
1770 if (ret)
1771 dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
1772 ret);
1773
1774 return ret;
1775 }
1776
1777 static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
1778 {
1779 return query_func_resource_caps(hr_dev, true);
1780 }
1781
1782 static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1783 u32 vf_id)
1784 {
1785 struct hns_roce_vf_switch *swt;
1786 struct hns_roce_cmq_desc desc;
1787 int ret;
1788
1789 swt = (struct hns_roce_vf_switch *)desc.data;
1790 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1791 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1792 hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id);
1793 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1794 if (ret)
1795 return ret;
1796
1797 desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1798 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1799 hr_reg_enable(swt, VF_SWITCH_ALW_LPBK);
1800 hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK);
1801 hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD);
1802
1803 return hns_roce_cmq_send(hr_dev, &desc, 1);
1804 }
1805
1806 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
1807 {
1808 u32 vf_id;
1809 int ret;
1810
1811 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1812 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
1813 if (ret)
1814 return ret;
1815 }
1816 return 0;
1817 }
1818
1819 static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
1820 {
1821 struct hns_roce_cmq_desc desc[2];
1822 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1823 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1824 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1825 struct hns_roce_caps *caps = &hr_dev->caps;
1826
1827 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1828 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1829 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1830
1831 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
1832
1833 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
1834 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
1835 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
1836 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
1837 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
1838 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
1839 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
1840 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
1841 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
1842 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
1843 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
1844 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
1845 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
1846 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
1847
1848 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1849 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
1850 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
1851 vf_id * caps->gmv_bt_num);
1852 } else {
1853 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
1854 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
1855 vf_id * caps->sgid_bt_num);
1856 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
1857 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
1858 vf_id * caps->smac_bt_num);
1859 }
1860
1861 return hns_roce_cmq_send(hr_dev, desc, 2);
1862 }
1863
1864 static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
1865 {
1866 struct hns_roce_cmq_desc desc;
1867 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1868 struct hns_roce_caps *caps = &hr_dev->caps;
1869
1870 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);
1871
1872 hr_reg_write(req, EXT_CFG_VF_ID, vf_id);
1873
1874 hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
1875 hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
1876 hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
1877 hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);
1878
1879 return hns_roce_cmq_send(hr_dev, &desc, 1);
1880 }
1881
1882 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1883 {
1884 u32 func_num = max_t(u32, 1, hr_dev->func_num);
1885 u32 vf_id;
1886 int ret;
1887
1888 for (vf_id = 0; vf_id < func_num; vf_id++) {
1889 ret = config_vf_hem_resource(hr_dev, vf_id);
1890 if (ret) {
1891 dev_err(hr_dev->dev,
1892 "failed to config vf-%u hem res, ret = %d.\n",
1893 vf_id, ret);
1894 return ret;
1895 }
1896
1897 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1898 ret = config_vf_ext_resource(hr_dev, vf_id);
1899 if (ret) {
1900 dev_err(hr_dev->dev,
1901 "failed to config vf-%u ext res, ret = %d.\n",
1902 vf_id, ret);
1903 return ret;
1904 }
1905 }
1906 }
1907
1908 return 0;
1909 }
1910
1911 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1912 {
1913 struct hns_roce_cmq_desc desc;
1914 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1915 struct hns_roce_caps *caps = &hr_dev->caps;
1916
1917 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1918
1919 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
1920 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1921 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
1922 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1923 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
1924 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
1925
1926 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
1927 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1928 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
1929 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1930 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
1931 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
1932
1933 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
1934 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1935 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
1936 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1937 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
1938 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
1939
1940 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
1941 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1942 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
1943 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1944 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
1945 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
1946
1947 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
1948 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1949 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
1950 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1951 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
1952 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
1953
1954 return hns_roce_cmq_send(hr_dev, &desc, 1);
1955 }
1956
1957
1958 static void set_default_caps(struct hns_roce_dev *hr_dev)
1959 {
1960 struct hns_roce_caps *caps = &hr_dev->caps;
1961
1962 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1963 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1964 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1965 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1966 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1967 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1968 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1969 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1970 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1971
1972 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1973 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1974 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1975 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1976 caps->num_comp_vectors = 0;
1977
1978 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1979 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1980 caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
1981 caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
1982
1983 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1984 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1985 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1986 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1987 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1988 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1989 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1990 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1991 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1992 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1993 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
1994 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1995 caps->reserved_lkey = 0;
1996 caps->reserved_pds = 0;
1997 caps->reserved_mrws = 1;
1998 caps->reserved_uars = 0;
1999 caps->reserved_cqs = 0;
2000 caps->reserved_srqs = 0;
2001 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
2002
2003 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
2004 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
2005 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
2006 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
2007 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
2008
2009 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
2010 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
2011 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
2012 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
2013 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
2014 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
2015 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
2016 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
2017
2018 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
2019 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
2020 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
2021 HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
2022
2023 caps->pkey_table_len[0] = 1;
2024 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
2025 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
2026 caps->local_ca_ack_delay = 0;
2027 caps->max_mtu = IB_MTU_4096;
2028
2029 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
2030 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
2031
2032 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
2033 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
2034 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
2035
2036 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
2037
2038 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2039 caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
2040 HNS_ROCE_CAP_FLAG_DIRECT_WQE;
2041 caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
2042 } else {
2043 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
2044
2045
2046 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
2047 caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
2048 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
2049 }
2050 }
2051
2052 static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
2053 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
2054 {
2055 u64 obj_per_chunk;
2056 u64 bt_chunk_size = PAGE_SIZE;
2057 u64 buf_chunk_size = PAGE_SIZE;
2058 u64 obj_per_chunk_default = buf_chunk_size / obj_size;
2059
2060 *buf_page_size = 0;
2061 *bt_page_size = 0;
2062
2063 switch (hop_num) {
2064 case 3:
2065 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2066 (bt_chunk_size / BA_BYTE_LEN) *
2067 (bt_chunk_size / BA_BYTE_LEN) *
2068 obj_per_chunk_default;
2069 break;
2070 case 2:
2071 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2072 (bt_chunk_size / BA_BYTE_LEN) *
2073 obj_per_chunk_default;
2074 break;
2075 case 1:
2076 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2077 obj_per_chunk_default;
2078 break;
2079 case HNS_ROCE_HOP_NUM_0:
2080 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
2081 break;
2082 default:
2083 pr_err("table %u not support hop_num = %u!\n", hem_type,
2084 hop_num);
2085 return;
2086 }
2087
2088 if (hem_type >= HEM_TYPE_MTT)
2089 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2090 else
2091 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2092 }
2093
2094 static void set_hem_page_size(struct hns_roce_dev *hr_dev)
2095 {
2096 struct hns_roce_caps *caps = &hr_dev->caps;
2097
2098
2099 caps->eqe_ba_pg_sz = 0;
2100 caps->eqe_buf_pg_sz = 0;
2101
2102
2103 caps->llm_buf_pg_sz = 0;
2104
2105
2106 caps->mpt_ba_pg_sz = 0;
2107 caps->mpt_buf_pg_sz = 0;
2108 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2109 caps->pbl_buf_pg_sz = 0;
2110 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2111 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2112 HEM_TYPE_MTPT);
2113
2114
2115 caps->qpc_ba_pg_sz = 0;
2116 caps->qpc_buf_pg_sz = 0;
2117 caps->qpc_timer_ba_pg_sz = 0;
2118 caps->qpc_timer_buf_pg_sz = 0;
2119 caps->sccc_ba_pg_sz = 0;
2120 caps->sccc_buf_pg_sz = 0;
2121 caps->mtt_ba_pg_sz = 0;
2122 caps->mtt_buf_pg_sz = 0;
2123 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2124 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2125 HEM_TYPE_QPC);
2126
2127 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
2128 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
2129 caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
2130 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
2131
2132
2133 caps->cqc_ba_pg_sz = 0;
2134 caps->cqc_buf_pg_sz = 0;
2135 caps->cqc_timer_ba_pg_sz = 0;
2136 caps->cqc_timer_buf_pg_sz = 0;
2137 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
2138 caps->cqe_buf_pg_sz = 0;
2139 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2140 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2141 HEM_TYPE_CQC);
2142 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
2143 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2144
2145
2146 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
2147 caps->srqc_ba_pg_sz = 0;
2148 caps->srqc_buf_pg_sz = 0;
2149 caps->srqwqe_ba_pg_sz = 0;
2150 caps->srqwqe_buf_pg_sz = 0;
2151 caps->idx_ba_pg_sz = 0;
2152 caps->idx_buf_pg_sz = 0;
2153 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
2154 caps->srqc_hop_num, caps->srqc_bt_num,
2155 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
2156 HEM_TYPE_SRQC);
2157 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2158 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2159 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2160 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
2161 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
2162 &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2163 }
2164
2165
2166 caps->gmv_ba_pg_sz = 0;
2167 caps->gmv_buf_pg_sz = 0;
2168 }
2169
2170
2171 static void apply_func_caps(struct hns_roce_dev *hr_dev)
2172 {
2173 struct hns_roce_caps *caps = &hr_dev->caps;
2174 struct hns_roce_v2_priv *priv = hr_dev->priv;
2175
2176
2177 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2178 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2179 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2180
2181 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2182 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2183 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2184
2185 caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
2186 caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
2187
2188 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2189 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2190 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2191
2192 if (!caps->num_comp_vectors)
2193 caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1,
2194 (u32)priv->handle->rinfo.num_vectors - 2);
2195
2196 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2197 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
2198 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2199 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2200
2201
2202 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2203 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2204 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2205
2206
2207 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2208
2209 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2210 caps->gid_table_len[0] = caps->gmv_bt_num *
2211 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
2212
2213 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
2214 caps->gmv_entry_sz);
2215 } else {
2216 u32 func_num = max_t(u32, 1, hr_dev->func_num);
2217
2218 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
2219 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2220 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2221 caps->gid_table_len[0] /= func_num;
2222 }
2223
2224 if (hr_dev->is_vf) {
2225 caps->default_aeq_arm_st = 0x3;
2226 caps->default_ceq_arm_st = 0x3;
2227 caps->default_ceq_max_cnt = 0x1;
2228 caps->default_ceq_period = 0x10;
2229 caps->default_aeq_max_cnt = 0x1;
2230 caps->default_aeq_period = 0x10;
2231 }
2232
2233 set_hem_page_size(hr_dev);
2234 }
2235
2236 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
2237 {
2238 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
2239 struct hns_roce_caps *caps = &hr_dev->caps;
2240 struct hns_roce_query_pf_caps_a *resp_a;
2241 struct hns_roce_query_pf_caps_b *resp_b;
2242 struct hns_roce_query_pf_caps_c *resp_c;
2243 struct hns_roce_query_pf_caps_d *resp_d;
2244 struct hns_roce_query_pf_caps_e *resp_e;
2245 int ctx_hop_num;
2246 int pbl_hop_num;
2247 int ret;
2248 int i;
2249
2250 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
2251 hns_roce_cmq_setup_basic_desc(&desc[i],
2252 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
2253 true);
2254 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
2255 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2256 else
2257 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2258 }
2259
2260 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
2261 if (ret)
2262 return ret;
2263
2264 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
2265 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
2266 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
2267 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
2268 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
2269
2270 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
2271 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
2272 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
2273 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
2274 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
2275 caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
2276 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
2277 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
2278 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
2279 caps->num_other_vectors = resp_a->num_other_vectors;
2280 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
2281 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
2282 caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
2283 caps->cqe_sz = resp_a->cqe_sz;
2284
2285 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
2286 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
2287 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
2288 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
2289 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
2290 caps->idx_entry_sz = resp_b->idx_entry_sz;
2291 caps->sccc_sz = resp_b->sccc_sz;
2292 caps->max_mtu = resp_b->max_mtu;
2293 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
2294 caps->min_cqes = resp_b->min_cqes;
2295 caps->min_wqes = resp_b->min_wqes;
2296 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
2297 caps->pkey_table_len[0] = resp_b->pkey_table_len;
2298 caps->phy_num_uars = resp_b->phy_num_uars;
2299 ctx_hop_num = resp_b->ctx_hop_num;
2300 pbl_hop_num = resp_b->pbl_hop_num;
2301
2302 caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS);
2303
2304 caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS);
2305 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2306 HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2307
2308 caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
2309 caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
2310 caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
2311 caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS);
2312 caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS);
2313 caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD);
2314 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2315 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2316
2317 caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
2318 caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE);
2319 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2320 caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
2321 caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
2322 caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
2323 caps->default_aeq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST);
2324 caps->default_ceq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST);
2325 caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
2326 caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
2327 caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
2328 caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS);
2329
2330 caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS);
2331 caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT);
2332 caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
2333 caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
2334 caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
2335 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2336 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2337 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2338 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2339
2340 caps->qpc_hop_num = ctx_hop_num;
2341 caps->sccc_hop_num = ctx_hop_num;
2342 caps->srqc_hop_num = ctx_hop_num;
2343 caps->cqc_hop_num = ctx_hop_num;
2344 caps->mpt_hop_num = ctx_hop_num;
2345 caps->mtt_hop_num = pbl_hop_num;
2346 caps->cqe_hop_num = pbl_hop_num;
2347 caps->srqwqe_hop_num = pbl_hop_num;
2348 caps->idx_hop_num = pbl_hop_num;
2349 caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM);
2350 caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM);
2351 caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM);
2352
2353 return 0;
2354 }
2355
2356 static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
2357 {
2358 struct hns_roce_cmq_desc desc;
2359 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2360
2361 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2362 false);
2363
2364 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
2365 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
2366
2367 return hns_roce_cmq_send(hr_dev, &desc, 1);
2368 }
2369
2370 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2371 {
2372 struct hns_roce_caps *caps = &hr_dev->caps;
2373 int ret;
2374
2375 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
2376 return 0;
2377
2378 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
2379 caps->qpc_sz);
2380 if (ret) {
2381 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2382 return ret;
2383 }
2384
2385 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
2386 caps->sccc_sz);
2387 if (ret)
2388 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2389
2390 return ret;
2391 }
2392
2393 static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
2394 {
2395 struct device *dev = hr_dev->dev;
2396 int ret;
2397
2398 hr_dev->func_num = 1;
2399
2400 set_default_caps(hr_dev);
2401
2402 ret = hns_roce_query_vf_resource(hr_dev);
2403 if (ret) {
2404 dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
2405 return ret;
2406 }
2407
2408 apply_func_caps(hr_dev);
2409
2410 ret = hns_roce_v2_set_bt(hr_dev);
2411 if (ret)
2412 dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
2413
2414 return ret;
2415 }
2416
2417 static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
2418 {
2419 struct device *dev = hr_dev->dev;
2420 int ret;
2421
2422 ret = hns_roce_query_func_info(hr_dev);
2423 if (ret) {
2424 dev_err(dev, "failed to query func info, ret = %d.\n", ret);
2425 return ret;
2426 }
2427
2428 ret = hns_roce_config_global_param(hr_dev);
2429 if (ret) {
2430 dev_err(dev, "failed to config global param, ret = %d.\n", ret);
2431 return ret;
2432 }
2433
2434 ret = hns_roce_set_vf_switch_param(hr_dev);
2435 if (ret) {
2436 dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
2437 return ret;
2438 }
2439
2440 ret = hns_roce_query_pf_caps(hr_dev);
2441 if (ret)
2442 set_default_caps(hr_dev);
2443
2444 ret = hns_roce_query_pf_resource(hr_dev);
2445 if (ret) {
2446 dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
2447 return ret;
2448 }
2449
2450 apply_func_caps(hr_dev);
2451
2452 ret = hns_roce_alloc_vf_resource(hr_dev);
2453 if (ret) {
2454 dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
2455 return ret;
2456 }
2457
2458 ret = hns_roce_v2_set_bt(hr_dev);
2459 if (ret) {
2460 dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
2461 return ret;
2462 }
2463
2464
2465 return hns_roce_config_entry_size(hr_dev);
2466 }
2467
2468 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2469 {
2470 struct device *dev = hr_dev->dev;
2471 int ret;
2472
2473 ret = hns_roce_cmq_query_hw_info(hr_dev);
2474 if (ret) {
2475 dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
2476 return ret;
2477 }
2478
2479 ret = hns_roce_query_fw_ver(hr_dev);
2480 if (ret) {
2481 dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
2482 return ret;
2483 }
2484
2485 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2486 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2487
2488 if (hr_dev->is_vf)
2489 return hns_roce_v2_vf_profile(hr_dev);
2490 else
2491 return hns_roce_v2_pf_profile(hr_dev);
2492 }
2493
2494 static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
2495 {
2496 u32 i, next_ptr, page_num;
2497 __le64 *entry = cfg_buf;
2498 dma_addr_t addr;
2499 u64 val;
2500
2501 page_num = data_buf->npages;
2502 for (i = 0; i < page_num; i++) {
2503 addr = hns_roce_buf_page(data_buf, i);
2504 if (i == (page_num - 1))
2505 next_ptr = 0;
2506 else
2507 next_ptr = i + 1;
2508
2509 val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
2510 entry[i] = cpu_to_le64(val);
2511 }
2512 }
2513
2514 static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
2515 struct hns_roce_link_table *table)
2516 {
2517 struct hns_roce_cmq_desc desc[2];
2518 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
2519 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
2520 struct hns_roce_buf *buf = table->buf;
2521 enum hns_roce_opcode_type opcode;
2522 dma_addr_t addr;
2523
2524 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2525 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
2526 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2527 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
2528
2529 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
2530 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
2531 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
2532 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
2533 hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
2534
2535 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0));
2536 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
2537 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
2538 hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
2539 hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
2540
2541 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1));
2542 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
2543 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
2544 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
2545
2546 return hns_roce_cmq_send(hr_dev, desc, 2);
2547 }
2548
2549 static struct hns_roce_link_table *
2550 alloc_link_table_buf(struct hns_roce_dev *hr_dev)
2551 {
2552 struct hns_roce_v2_priv *priv = hr_dev->priv;
2553 struct hns_roce_link_table *link_tbl;
2554 u32 pg_shift, size, min_size;
2555
2556 link_tbl = &priv->ext_llm;
2557 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
2558 size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
2559 min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift;
2560
2561
2562 size = max(size, min_size);
2563 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0);
2564 if (IS_ERR(link_tbl->buf))
2565 return ERR_PTR(-ENOMEM);
2566
2567
2568 size = link_tbl->buf->npages * sizeof(u64);
2569 link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
2570 &link_tbl->table.map,
2571 GFP_KERNEL);
2572 if (!link_tbl->table.buf) {
2573 hns_roce_buf_free(hr_dev, link_tbl->buf);
2574 return ERR_PTR(-ENOMEM);
2575 }
2576
2577 return link_tbl;
2578 }
2579
2580 static void free_link_table_buf(struct hns_roce_dev *hr_dev,
2581 struct hns_roce_link_table *tbl)
2582 {
2583 if (tbl->buf) {
2584 u32 size = tbl->buf->npages * sizeof(u64);
2585
2586 dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
2587 tbl->table.map);
2588 }
2589
2590 hns_roce_buf_free(hr_dev, tbl->buf);
2591 }
2592
2593 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
2594 {
2595 struct hns_roce_link_table *link_tbl;
2596 int ret;
2597
2598 link_tbl = alloc_link_table_buf(hr_dev);
2599 if (IS_ERR(link_tbl))
2600 return -ENOMEM;
2601
2602 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
2603 ret = -EINVAL;
2604 goto err_alloc;
2605 }
2606
2607 config_llm_table(link_tbl->buf, link_tbl->table.buf);
2608 ret = set_llm_cfg_to_hw(hr_dev, link_tbl);
2609 if (ret)
2610 goto err_alloc;
2611
2612 return 0;
2613
2614 err_alloc:
2615 free_link_table_buf(hr_dev, link_tbl);
2616 return ret;
2617 }
2618
2619 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
2620 {
2621 struct hns_roce_v2_priv *priv = hr_dev->priv;
2622
2623 free_link_table_buf(hr_dev, &priv->ext_llm);
2624 }
2625
2626 static void free_dip_list(struct hns_roce_dev *hr_dev)
2627 {
2628 struct hns_roce_dip *hr_dip;
2629 struct hns_roce_dip *tmp;
2630 unsigned long flags;
2631
2632 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
2633
2634 list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
2635 list_del(&hr_dip->node);
2636 kfree(hr_dip);
2637 }
2638
2639 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
2640 }
2641
2642 static void free_mr_exit(struct hns_roce_dev *hr_dev)
2643 {
2644 struct hns_roce_v2_priv *priv = hr_dev->priv;
2645 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2646 int ret;
2647 int i;
2648
2649 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2650 if (free_mr->rsv_qp[i]) {
2651 ret = ib_destroy_qp(free_mr->rsv_qp[i]);
2652 if (ret)
2653 ibdev_err(&hr_dev->ib_dev,
2654 "failed to destroy qp in free mr.\n");
2655
2656 free_mr->rsv_qp[i] = NULL;
2657 }
2658 }
2659
2660 if (free_mr->rsv_cq) {
2661 ib_destroy_cq(free_mr->rsv_cq);
2662 free_mr->rsv_cq = NULL;
2663 }
2664
2665 if (free_mr->rsv_pd) {
2666 ib_dealloc_pd(free_mr->rsv_pd);
2667 free_mr->rsv_pd = NULL;
2668 }
2669 }
2670
2671 static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
2672 {
2673 struct hns_roce_v2_priv *priv = hr_dev->priv;
2674 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2675 struct ib_device *ibdev = &hr_dev->ib_dev;
2676 struct ib_cq_init_attr cq_init_attr = {};
2677 struct ib_qp_init_attr qp_init_attr = {};
2678 struct ib_pd *pd;
2679 struct ib_cq *cq;
2680 struct ib_qp *qp;
2681 int ret;
2682 int i;
2683
2684 pd = ib_alloc_pd(ibdev, 0);
2685 if (IS_ERR(pd)) {
2686 ibdev_err(ibdev, "failed to create pd for free mr.\n");
2687 return PTR_ERR(pd);
2688 }
2689 free_mr->rsv_pd = pd;
2690
2691 cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
2692 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_init_attr);
2693 if (IS_ERR(cq)) {
2694 ibdev_err(ibdev, "failed to create cq for free mr.\n");
2695 ret = PTR_ERR(cq);
2696 goto create_failed;
2697 }
2698 free_mr->rsv_cq = cq;
2699
2700 qp_init_attr.qp_type = IB_QPT_RC;
2701 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2702 qp_init_attr.send_cq = free_mr->rsv_cq;
2703 qp_init_attr.recv_cq = free_mr->rsv_cq;
2704 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2705 qp_init_attr.cap.max_send_wr = HNS_ROCE_FREE_MR_USED_SQWQE_NUM;
2706 qp_init_attr.cap.max_send_sge = HNS_ROCE_FREE_MR_USED_SQSGE_NUM;
2707 qp_init_attr.cap.max_recv_wr = HNS_ROCE_FREE_MR_USED_RQWQE_NUM;
2708 qp_init_attr.cap.max_recv_sge = HNS_ROCE_FREE_MR_USED_RQSGE_NUM;
2709
2710 qp = ib_create_qp(free_mr->rsv_pd, &qp_init_attr);
2711 if (IS_ERR(qp)) {
2712 ibdev_err(ibdev, "failed to create qp for free mr.\n");
2713 ret = PTR_ERR(qp);
2714 goto create_failed;
2715 }
2716
2717 free_mr->rsv_qp[i] = qp;
2718 }
2719
2720 return 0;
2721
2722 create_failed:
2723 free_mr_exit(hr_dev);
2724
2725 return ret;
2726 }
2727
2728 static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
2729 struct ib_qp_attr *attr, int sl_num)
2730 {
2731 struct hns_roce_v2_priv *priv = hr_dev->priv;
2732 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2733 struct ib_device *ibdev = &hr_dev->ib_dev;
2734 struct hns_roce_qp *hr_qp;
2735 int loopback;
2736 int mask;
2737 int ret;
2738
2739 hr_qp = to_hr_qp(free_mr->rsv_qp[sl_num]);
2740 hr_qp->free_mr_en = 1;
2741
2742 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS;
2743 attr->qp_state = IB_QPS_INIT;
2744 attr->port_num = 1;
2745 attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
2746 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
2747 if (ret) {
2748 ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
2749 ret);
2750 return ret;
2751 }
2752
2753 loopback = hr_dev->loop_idc;
2754
2755 hr_dev->loop_idc = 1;
2756
2757 mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
2758 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
2759 attr->qp_state = IB_QPS_RTR;
2760 attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2761 attr->path_mtu = IB_MTU_256;
2762 attr->dest_qp_num = hr_qp->qpn;
2763 attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2764
2765 rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
2766
2767 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
2768 hr_dev->loop_idc = loopback;
2769 if (ret) {
2770 ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
2771 ret);
2772 return ret;
2773 }
2774
2775 mask = IB_QP_STATE | IB_QP_SQ_PSN | IB_QP_RETRY_CNT | IB_QP_TIMEOUT |
2776 IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC;
2777 attr->qp_state = IB_QPS_RTS;
2778 attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN;
2779 attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
2780 attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
2781 ret = ib_modify_qp(&hr_qp->ibqp, attr, mask);
2782 if (ret)
2783 ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
2784 ret);
2785
2786 return ret;
2787 }
2788
2789 static int free_mr_modify_qp(struct hns_roce_dev *hr_dev)
2790 {
2791 struct hns_roce_v2_priv *priv = hr_dev->priv;
2792 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
2793 struct ib_qp_attr attr = {};
2794 int ret;
2795 int i;
2796
2797 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
2798 rdma_ah_set_static_rate(&attr.ah_attr, 3);
2799 rdma_ah_set_port_num(&attr.ah_attr, 1);
2800
2801 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
2802 ret = free_mr_modify_rsv_qp(hr_dev, &attr, i);
2803 if (ret)
2804 return ret;
2805 }
2806
2807 return 0;
2808 }
2809
2810 static int free_mr_init(struct hns_roce_dev *hr_dev)
2811 {
2812 int ret;
2813
2814 ret = free_mr_alloc_res(hr_dev);
2815 if (ret)
2816 return ret;
2817
2818 ret = free_mr_modify_qp(hr_dev);
2819 if (ret)
2820 goto err_modify_qp;
2821
2822 return 0;
2823
2824 err_modify_qp:
2825 free_mr_exit(hr_dev);
2826
2827 return ret;
2828 }
2829
2830 static int get_hem_table(struct hns_roce_dev *hr_dev)
2831 {
2832 unsigned int qpc_count;
2833 unsigned int cqc_count;
2834 unsigned int gmv_count;
2835 int ret;
2836 int i;
2837
2838
2839 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
2840 gmv_count++) {
2841 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
2842 if (ret)
2843 goto err_gmv_failed;
2844 }
2845
2846 if (hr_dev->is_vf)
2847 return 0;
2848
2849
2850 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2851 qpc_count++) {
2852 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2853 qpc_count);
2854 if (ret) {
2855 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2856 goto err_qpc_timer_failed;
2857 }
2858 }
2859
2860
2861 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2862 cqc_count++) {
2863 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2864 cqc_count);
2865 if (ret) {
2866 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2867 goto err_cqc_timer_failed;
2868 }
2869 }
2870
2871 return 0;
2872
2873 err_cqc_timer_failed:
2874 for (i = 0; i < cqc_count; i++)
2875 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2876
2877 err_qpc_timer_failed:
2878 for (i = 0; i < qpc_count; i++)
2879 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2880
2881 err_gmv_failed:
2882 for (i = 0; i < gmv_count; i++)
2883 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2884
2885 return ret;
2886 }
2887
2888 static void put_hem_table(struct hns_roce_dev *hr_dev)
2889 {
2890 int i;
2891
2892 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
2893 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2894
2895 if (hr_dev->is_vf)
2896 return;
2897
2898 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
2899 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2900
2901 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
2902 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2903 }
2904
2905 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2906 {
2907 int ret;
2908
2909
2910 ret = hns_roce_clear_extdb_list_info(hr_dev);
2911 if (ret)
2912 return ret;
2913
2914 ret = get_hem_table(hr_dev);
2915 if (ret)
2916 return ret;
2917
2918 if (hr_dev->is_vf)
2919 return 0;
2920
2921 ret = hns_roce_init_link_table(hr_dev);
2922 if (ret) {
2923 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
2924 goto err_llm_init_failed;
2925 }
2926
2927 return 0;
2928
2929 err_llm_init_failed:
2930 put_hem_table(hr_dev);
2931
2932 return ret;
2933 }
2934
2935 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2936 {
2937 hns_roce_function_clear(hr_dev);
2938
2939 if (!hr_dev->is_vf)
2940 hns_roce_free_link_table(hr_dev);
2941
2942 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
2943 free_dip_list(hr_dev);
2944 }
2945
2946 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev,
2947 struct hns_roce_mbox_msg *mbox_msg)
2948 {
2949 struct hns_roce_cmq_desc desc;
2950 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2951
2952 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2953
2954 mb->in_param_l = cpu_to_le32(mbox_msg->in_param);
2955 mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32);
2956 mb->out_param_l = cpu_to_le32(mbox_msg->out_param);
2957 mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32);
2958 mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd);
2959 mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 |
2960 mbox_msg->token);
2961
2962 return hns_roce_cmq_send(hr_dev, &desc, 1);
2963 }
2964
2965 static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
2966 u8 *complete_status)
2967 {
2968 struct hns_roce_mbox_status *mb_st;
2969 struct hns_roce_cmq_desc desc;
2970 unsigned long end;
2971 int ret = -EBUSY;
2972 u32 status;
2973 bool busy;
2974
2975 mb_st = (struct hns_roce_mbox_status *)desc.data;
2976 end = msecs_to_jiffies(timeout) + jiffies;
2977 while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
2978 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
2979 return -EIO;
2980
2981 status = 0;
2982 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
2983 true);
2984 ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
2985 if (!ret) {
2986 status = le32_to_cpu(mb_st->mb_status_hw_run);
2987
2988 if (!(status & MB_ST_HW_RUN_M))
2989 break;
2990 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
2991 break;
2992 }
2993
2994 if (time_after(jiffies, end)) {
2995 dev_err_ratelimited(hr_dev->dev,
2996 "failed to wait mbox status 0x%x\n",
2997 status);
2998 return -ETIMEDOUT;
2999 }
3000
3001 cond_resched();
3002 ret = -EBUSY;
3003 }
3004
3005 if (!ret) {
3006 *complete_status = (u8)(status & MB_ST_COMPLETE_M);
3007 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
3008
3009 ret = 0;
3010 *complete_status = MB_ST_COMPLETE_M;
3011 }
3012
3013 return ret;
3014 }
3015
3016 static int v2_post_mbox(struct hns_roce_dev *hr_dev,
3017 struct hns_roce_mbox_msg *mbox_msg)
3018 {
3019 u8 status = 0;
3020 int ret;
3021
3022
3023 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
3024 &status);
3025 if (unlikely(ret)) {
3026 dev_err_ratelimited(hr_dev->dev,
3027 "failed to check post mbox status = 0x%x, ret = %d.\n",
3028 status, ret);
3029 return ret;
3030 }
3031
3032
3033 ret = hns_roce_mbox_post(hr_dev, mbox_msg);
3034 if (ret)
3035 dev_err_ratelimited(hr_dev->dev,
3036 "failed to post mailbox, ret = %d.\n", ret);
3037
3038 return ret;
3039 }
3040
3041 static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev)
3042 {
3043 u8 status = 0;
3044 int ret;
3045
3046 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS,
3047 &status);
3048 if (!ret) {
3049 if (status != MB_ST_COMPLETE_SUCC)
3050 return -EBUSY;
3051 } else {
3052 dev_err_ratelimited(hr_dev->dev,
3053 "failed to check mbox status = 0x%x, ret = %d.\n",
3054 status, ret);
3055 }
3056
3057 return ret;
3058 }
3059
3060 static void copy_gid(void *dest, const union ib_gid *gid)
3061 {
3062 #define GID_SIZE 4
3063 const union ib_gid *src = gid;
3064 __le32 (*p)[GID_SIZE] = dest;
3065 int i;
3066
3067 if (!gid)
3068 src = &zgid;
3069
3070 for (i = 0; i < GID_SIZE; i++)
3071 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
3072 }
3073
3074 static int config_sgid_table(struct hns_roce_dev *hr_dev,
3075 int gid_index, const union ib_gid *gid,
3076 enum hns_roce_sgid_type sgid_type)
3077 {
3078 struct hns_roce_cmq_desc desc;
3079 struct hns_roce_cfg_sgid_tb *sgid_tb =
3080 (struct hns_roce_cfg_sgid_tb *)desc.data;
3081
3082 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
3083
3084 hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index);
3085 hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type);
3086
3087 copy_gid(&sgid_tb->vf_sgid_l, gid);
3088
3089 return hns_roce_cmq_send(hr_dev, &desc, 1);
3090 }
3091
3092 static int config_gmv_table(struct hns_roce_dev *hr_dev,
3093 int gid_index, const union ib_gid *gid,
3094 enum hns_roce_sgid_type sgid_type,
3095 const struct ib_gid_attr *attr)
3096 {
3097 struct hns_roce_cmq_desc desc[2];
3098 struct hns_roce_cfg_gmv_tb_a *tb_a =
3099 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
3100 struct hns_roce_cfg_gmv_tb_b *tb_b =
3101 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
3102
3103 u16 vlan_id = VLAN_CFI_MASK;
3104 u8 mac[ETH_ALEN] = {};
3105 int ret;
3106
3107 if (gid) {
3108 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
3109 if (ret)
3110 return ret;
3111 }
3112
3113 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
3114 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
3115
3116 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
3117
3118 copy_gid(&tb_a->vf_sgid_l, gid);
3119
3120 hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type);
3121 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK);
3122 hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id);
3123
3124 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
3125
3126 hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]);
3127 hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index);
3128
3129 return hns_roce_cmq_send(hr_dev, desc, 2);
3130 }
3131
3132 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
3133 const union ib_gid *gid,
3134 const struct ib_gid_attr *attr)
3135 {
3136 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
3137 int ret;
3138
3139 if (gid) {
3140 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
3141 if (ipv6_addr_v4mapped((void *)gid))
3142 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
3143 else
3144 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
3145 } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
3146 sgid_type = GID_TYPE_FLAG_ROCE_V1;
3147 }
3148 }
3149
3150 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
3151 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
3152 else
3153 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
3154
3155 if (ret)
3156 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
3157 ret);
3158
3159 return ret;
3160 }
3161
3162 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
3163 const u8 *addr)
3164 {
3165 struct hns_roce_cmq_desc desc;
3166 struct hns_roce_cfg_smac_tb *smac_tb =
3167 (struct hns_roce_cfg_smac_tb *)desc.data;
3168 u16 reg_smac_h;
3169 u32 reg_smac_l;
3170
3171 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
3172
3173 reg_smac_l = *(u32 *)(&addr[0]);
3174 reg_smac_h = *(u16 *)(&addr[4]);
3175
3176 hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port);
3177 hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h);
3178 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
3179
3180 return hns_roce_cmq_send(hr_dev, &desc, 1);
3181 }
3182
3183 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
3184 struct hns_roce_v2_mpt_entry *mpt_entry,
3185 struct hns_roce_mr *mr)
3186 {
3187 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
3188 struct ib_device *ibdev = &hr_dev->ib_dev;
3189 dma_addr_t pbl_ba;
3190 int i, count;
3191
3192 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
3193 ARRAY_SIZE(pages), &pbl_ba);
3194 if (count < 1) {
3195 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
3196 count);
3197 return -ENOBUFS;
3198 }
3199
3200
3201 for (i = 0; i < count; i++)
3202 pages[i] >>= 6;
3203
3204 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3205 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
3206 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
3207
3208 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
3209 hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
3210
3211 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
3212 hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1]));
3213 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3214 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3215
3216 return 0;
3217 }
3218
3219 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
3220 void *mb_buf, struct hns_roce_mr *mr)
3221 {
3222 struct hns_roce_v2_mpt_entry *mpt_entry;
3223
3224 mpt_entry = mb_buf;
3225 memset(mpt_entry, 0, sizeof(*mpt_entry));
3226
3227 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3228 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3229 hr_reg_enable(mpt_entry, MPT_L_INV_EN);
3230
3231 hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
3232 mr->access & IB_ACCESS_MW_BIND);
3233 hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
3234 mr->access & IB_ACCESS_REMOTE_ATOMIC);
3235 hr_reg_write_bool(mpt_entry, MPT_RR_EN,
3236 mr->access & IB_ACCESS_REMOTE_READ);
3237 hr_reg_write_bool(mpt_entry, MPT_RW_EN,
3238 mr->access & IB_ACCESS_REMOTE_WRITE);
3239 hr_reg_write_bool(mpt_entry, MPT_LW_EN,
3240 mr->access & IB_ACCESS_LOCAL_WRITE);
3241
3242 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3243 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3244 mpt_entry->lkey = cpu_to_le32(mr->key);
3245 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3246 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3247
3248 if (mr->type != MR_TYPE_MR)
3249 hr_reg_enable(mpt_entry, MPT_PA);
3250
3251 if (mr->type == MR_TYPE_DMA)
3252 return 0;
3253
3254 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
3255 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
3256
3257 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3258 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3259 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
3260
3261 return set_mtpt_pbl(hr_dev, mpt_entry, mr);
3262 }
3263
3264 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
3265 struct hns_roce_mr *mr, int flags,
3266 void *mb_buf)
3267 {
3268 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
3269 u32 mr_access_flags = mr->access;
3270 int ret = 0;
3271
3272 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3273 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3274
3275 if (flags & IB_MR_REREG_ACCESS) {
3276 hr_reg_write(mpt_entry, MPT_BIND_EN,
3277 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
3278 hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
3279 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3280 hr_reg_write(mpt_entry, MPT_RR_EN,
3281 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
3282 hr_reg_write(mpt_entry, MPT_RW_EN,
3283 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
3284 hr_reg_write(mpt_entry, MPT_LW_EN,
3285 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
3286 }
3287
3288 if (flags & IB_MR_REREG_TRANS) {
3289 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3290 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3291 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3292 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3293
3294 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3295 }
3296
3297 return ret;
3298 }
3299
3300 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
3301 void *mb_buf, struct hns_roce_mr *mr)
3302 {
3303 struct ib_device *ibdev = &hr_dev->ib_dev;
3304 struct hns_roce_v2_mpt_entry *mpt_entry;
3305 dma_addr_t pbl_ba = 0;
3306
3307 mpt_entry = mb_buf;
3308 memset(mpt_entry, 0, sizeof(*mpt_entry));
3309
3310 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
3311 ibdev_err(ibdev, "failed to find frmr mtr.\n");
3312 return -ENOBUFS;
3313 }
3314
3315 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
3316 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3317
3318 hr_reg_enable(mpt_entry, MPT_RA_EN);
3319 hr_reg_enable(mpt_entry, MPT_R_INV_EN);
3320 hr_reg_enable(mpt_entry, MPT_L_INV_EN);
3321
3322 hr_reg_enable(mpt_entry, MPT_FRE);
3323 hr_reg_clear(mpt_entry, MPT_MR_MW);
3324 hr_reg_enable(mpt_entry, MPT_BPD);
3325 hr_reg_clear(mpt_entry, MPT_PA);
3326
3327 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1);
3328 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3329 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3330 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3331 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3332
3333 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3334
3335 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
3336 hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
3337
3338 return 0;
3339 }
3340
3341 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
3342 {
3343 struct hns_roce_v2_mpt_entry *mpt_entry;
3344
3345 mpt_entry = mb_buf;
3346 memset(mpt_entry, 0, sizeof(*mpt_entry));
3347
3348 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
3349 hr_reg_write(mpt_entry, MPT_PD, mw->pdn);
3350
3351 hr_reg_enable(mpt_entry, MPT_R_INV_EN);
3352 hr_reg_enable(mpt_entry, MPT_L_INV_EN);
3353 hr_reg_enable(mpt_entry, MPT_LW_EN);
3354
3355 hr_reg_enable(mpt_entry, MPT_MR_MW);
3356 hr_reg_enable(mpt_entry, MPT_BPD);
3357 hr_reg_clear(mpt_entry, MPT_PA);
3358 hr_reg_write(mpt_entry, MPT_BQP,
3359 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
3360
3361 mpt_entry->lkey = cpu_to_le32(mw->rkey);
3362
3363 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM,
3364 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
3365 mw->pbl_hop_num);
3366 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3367 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3368 hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
3369 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
3370
3371 return 0;
3372 }
3373
3374 static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
3375 {
3376 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
3377 struct ib_device *ibdev = &hr_dev->ib_dev;
3378 const struct ib_send_wr *bad_wr;
3379 struct ib_rdma_wr rdma_wr = {};
3380 struct ib_send_wr *send_wr;
3381 int ret;
3382
3383 send_wr = &rdma_wr.wr;
3384 send_wr->opcode = IB_WR_RDMA_WRITE;
3385
3386 ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr);
3387 if (ret) {
3388 ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n",
3389 ret);
3390 return ret;
3391 }
3392
3393 return 0;
3394 }
3395
3396 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3397 struct ib_wc *wc);
3398
3399 static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev)
3400 {
3401 struct hns_roce_v2_priv *priv = hr_dev->priv;
3402 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr;
3403 struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)];
3404 struct ib_device *ibdev = &hr_dev->ib_dev;
3405 struct hns_roce_qp *hr_qp;
3406 unsigned long end;
3407 int cqe_cnt = 0;
3408 int npolled;
3409 int ret;
3410 int i;
3411
3412
3413
3414
3415
3416 if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
3417 priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT ||
3418 hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT)
3419 return;
3420
3421 mutex_lock(&free_mr->mutex);
3422
3423 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) {
3424 hr_qp = to_hr_qp(free_mr->rsv_qp[i]);
3425
3426 ret = free_mr_post_send_lp_wqe(hr_qp);
3427 if (ret) {
3428 ibdev_err(ibdev,
3429 "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n",
3430 hr_qp->qpn, ret);
3431 break;
3432 }
3433
3434 cqe_cnt++;
3435 }
3436
3437 end = msecs_to_jiffies(HNS_ROCE_V2_FREE_MR_TIMEOUT) + jiffies;
3438 while (cqe_cnt) {
3439 npolled = hns_roce_v2_poll_cq(free_mr->rsv_cq, cqe_cnt, wc);
3440 if (npolled < 0) {
3441 ibdev_err(ibdev,
3442 "failed to poll cqe for free mr, remain %d cqe.\n",
3443 cqe_cnt);
3444 goto out;
3445 }
3446
3447 if (time_after(jiffies, end)) {
3448 ibdev_err(ibdev,
3449 "failed to poll cqe for free mr and timeout, remain %d cqe.\n",
3450 cqe_cnt);
3451 goto out;
3452 }
3453 cqe_cnt -= npolled;
3454 }
3455
3456 out:
3457 mutex_unlock(&free_mr->mutex);
3458 }
3459
3460 static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev)
3461 {
3462 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
3463 free_mr_send_cmd_to_hw(hr_dev);
3464 }
3465
3466 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3467 {
3468 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
3469 }
3470
3471 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3472 {
3473 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
3474
3475
3476 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
3477 NULL;
3478 }
3479
3480 static inline void update_cq_db(struct hns_roce_dev *hr_dev,
3481 struct hns_roce_cq *hr_cq)
3482 {
3483 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
3484 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
3485 } else {
3486 struct hns_roce_v2_db cq_db = {};
3487
3488 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3489 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
3490 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3491 hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
3492
3493 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3494 }
3495 }
3496
3497 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3498 struct hns_roce_srq *srq)
3499 {
3500 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3501 struct hns_roce_v2_cqe *cqe, *dest;
3502 u32 prod_index;
3503 int nfreed = 0;
3504 int wqe_index;
3505 u8 owner_bit;
3506
3507 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
3508 ++prod_index) {
3509 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3510 break;
3511 }
3512
3513
3514
3515
3516
3517 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3518 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
3519 if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
3520 if (srq && hr_reg_read(cqe, CQE_S_R)) {
3521 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
3522 hns_roce_free_srq_wqe(srq, wqe_index);
3523 }
3524 ++nfreed;
3525 } else if (nfreed) {
3526 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3527 hr_cq->ib_cq.cqe);
3528 owner_bit = hr_reg_read(dest, CQE_OWNER);
3529 memcpy(dest, cqe, hr_cq->cqe_size);
3530 hr_reg_write(dest, CQE_OWNER, owner_bit);
3531 }
3532 }
3533
3534 if (nfreed) {
3535 hr_cq->cons_index += nfreed;
3536 update_cq_db(hr_dev, hr_cq);
3537 }
3538 }
3539
3540 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3541 struct hns_roce_srq *srq)
3542 {
3543 spin_lock_irq(&hr_cq->lock);
3544 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3545 spin_unlock_irq(&hr_cq->lock);
3546 }
3547
3548 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3549 struct hns_roce_cq *hr_cq, void *mb_buf,
3550 u64 *mtts, dma_addr_t dma_handle)
3551 {
3552 struct hns_roce_v2_cq_context *cq_context;
3553
3554 cq_context = mb_buf;
3555 memset(cq_context, 0, sizeof(*cq_context));
3556
3557 hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
3558 hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
3559 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
3560 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
3561 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
3562
3563 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
3564 hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
3565
3566 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3567 hr_reg_enable(cq_context, CQC_STASH);
3568
3569 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
3570 to_hr_hw_page_addr(mtts[0]));
3571 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
3572 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3573 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
3574 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3575 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L,
3576 to_hr_hw_page_addr(mtts[1]));
3577 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
3578 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3579 hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
3580 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3581 hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
3582 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3583 hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3);
3584 hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3)));
3585 hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
3586 hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
3587 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
3588 ((u32)hr_cq->db.dma) >> 1);
3589 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
3590 hr_cq->db.dma >> 32);
3591 hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
3592 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3593 hr_reg_write(cq_context, CQC_CQ_PERIOD,
3594 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3595 }
3596
3597 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3598 enum ib_cq_notify_flags flags)
3599 {
3600 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3601 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3602 struct hns_roce_v2_db cq_db = {};
3603 u32 notify_flag;
3604
3605
3606
3607
3608
3609 notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3610 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3611
3612 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3613 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY);
3614 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3615 hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn);
3616 hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag);
3617
3618 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3619
3620 return 0;
3621 }
3622
3623 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3624 struct hns_roce_qp *qp,
3625 struct ib_wc *wc)
3626 {
3627 struct hns_roce_rinl_sge *sge_list;
3628 u32 wr_num, wr_cnt, sge_num;
3629 u32 sge_cnt, data_len, size;
3630 void *wqe_buf;
3631
3632 wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
3633 wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
3634
3635 sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3636 sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3637 wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
3638 data_len = wc->byte_len;
3639
3640 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3641 size = min(sge_list[sge_cnt].len, data_len);
3642 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3643
3644 data_len -= size;
3645 wqe_buf += size;
3646 }
3647
3648 if (unlikely(data_len)) {
3649 wc->status = IB_WC_LOC_LEN_ERR;
3650 return -EAGAIN;
3651 }
3652
3653 return 0;
3654 }
3655
3656 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3657 int num_entries, struct ib_wc *wc)
3658 {
3659 unsigned int left;
3660 int npolled = 0;
3661
3662 left = wq->head - wq->tail;
3663 if (left == 0)
3664 return 0;
3665
3666 left = min_t(unsigned int, (unsigned int)num_entries, left);
3667 while (npolled < left) {
3668 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3669 wc->status = IB_WC_WR_FLUSH_ERR;
3670 wc->vendor_err = 0;
3671 wc->qp = &hr_qp->ibqp;
3672
3673 wq->tail++;
3674 wc++;
3675 npolled++;
3676 }
3677
3678 return npolled;
3679 }
3680
3681 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3682 struct ib_wc *wc)
3683 {
3684 struct hns_roce_qp *hr_qp;
3685 int npolled = 0;
3686
3687 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3688 npolled += sw_comp(hr_qp, &hr_qp->sq,
3689 num_entries - npolled, wc + npolled);
3690 if (npolled >= num_entries)
3691 goto out;
3692 }
3693
3694 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3695 npolled += sw_comp(hr_qp, &hr_qp->rq,
3696 num_entries - npolled, wc + npolled);
3697 if (npolled >= num_entries)
3698 goto out;
3699 }
3700
3701 out:
3702 return npolled;
3703 }
3704
3705 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3706 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3707 struct ib_wc *wc)
3708 {
3709 static const struct {
3710 u32 cqe_status;
3711 enum ib_wc_status wc_status;
3712 } map[] = {
3713 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3714 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3715 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3716 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3717 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3718 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3719 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3720 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3721 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3722 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3723 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3724 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3725 IB_WC_RETRY_EXC_ERR },
3726 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3727 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3728 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3729 };
3730
3731 u32 cqe_status = hr_reg_read(cqe, CQE_STATUS);
3732 int i;
3733
3734 wc->status = IB_WC_GENERAL_ERR;
3735 for (i = 0; i < ARRAY_SIZE(map); i++)
3736 if (cqe_status == map[i].cqe_status) {
3737 wc->status = map[i].wc_status;
3738 break;
3739 }
3740
3741 if (likely(wc->status == IB_WC_SUCCESS ||
3742 wc->status == IB_WC_WR_FLUSH_ERR))
3743 return;
3744
3745 ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3746 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3747 cq->cqe_size, false);
3748 wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
3749
3750
3751
3752
3753
3754
3755 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3756 return;
3757
3758 flush_cqe(hr_dev, qp);
3759 }
3760
3761 static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
3762 struct hns_roce_qp **cur_qp)
3763 {
3764 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3765 struct hns_roce_qp *hr_qp = *cur_qp;
3766 u32 qpn;
3767
3768 qpn = hr_reg_read(cqe, CQE_LCL_QPN);
3769
3770 if (!hr_qp || qpn != hr_qp->qpn) {
3771 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3772 if (unlikely(!hr_qp)) {
3773 ibdev_err(&hr_dev->ib_dev,
3774 "CQ %06lx with entry for unknown QPN %06x\n",
3775 hr_cq->cqn, qpn);
3776 return -EINVAL;
3777 }
3778 *cur_qp = hr_qp;
3779 }
3780
3781 return 0;
3782 }
3783
3784
3785
3786
3787
3788
3789
3790
3791 #define HR_WC_OP_MAP(hr_key, ib_key) \
3792 [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
3793
3794 static const u32 wc_send_op_map[] = {
3795 HR_WC_OP_MAP(SEND, SEND),
3796 HR_WC_OP_MAP(SEND_WITH_INV, SEND),
3797 HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
3798 HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
3799 HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
3800 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
3801 HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV),
3802 HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
3803 HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
3804 HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
3805 HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
3806 HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
3807 HR_WC_OP_MAP(BIND_MW, REG_MR),
3808 };
3809
3810 static int to_ib_wc_send_op(u32 hr_opcode)
3811 {
3812 if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
3813 return -EINVAL;
3814
3815 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
3816 -EINVAL;
3817 }
3818
3819 static const u32 wc_recv_op_map[] = {
3820 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
3821 HR_WC_OP_MAP(SEND, RECV),
3822 HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
3823 HR_WC_OP_MAP(SEND_WITH_INV, RECV),
3824 };
3825
3826 static int to_ib_wc_recv_op(u32 hr_opcode)
3827 {
3828 if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
3829 return -EINVAL;
3830
3831 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
3832 -EINVAL;
3833 }
3834
3835 static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3836 {
3837 u32 hr_opcode;
3838 int ib_opcode;
3839
3840 wc->wc_flags = 0;
3841
3842 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3843 switch (hr_opcode) {
3844 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3845 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3846 break;
3847 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3848 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3849 wc->wc_flags |= IB_WC_WITH_IMM;
3850 break;
3851 case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
3852 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3853 break;
3854 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3855 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3856 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3857 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3858 wc->byte_len = 8;
3859 break;
3860 default:
3861 break;
3862 }
3863
3864 ib_opcode = to_ib_wc_send_op(hr_opcode);
3865 if (ib_opcode < 0)
3866 wc->status = IB_WC_GENERAL_ERR;
3867 else
3868 wc->opcode = ib_opcode;
3869 }
3870
3871 static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
3872 struct hns_roce_v2_cqe *cqe)
3873 {
3874 return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
3875 (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
3876 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3877 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3878 hr_reg_read(cqe, CQE_RQ_INLINE);
3879 }
3880
3881 static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3882 {
3883 struct hns_roce_qp *qp = to_hr_qp(wc->qp);
3884 u32 hr_opcode;
3885 int ib_opcode;
3886 int ret;
3887
3888 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3889
3890 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3891 switch (hr_opcode) {
3892 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3893 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3894 wc->wc_flags = IB_WC_WITH_IMM;
3895 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
3896 break;
3897 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3898 wc->wc_flags = IB_WC_WITH_INVALIDATE;
3899 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3900 break;
3901 default:
3902 wc->wc_flags = 0;
3903 }
3904
3905 ib_opcode = to_ib_wc_recv_op(hr_opcode);
3906 if (ib_opcode < 0)
3907 wc->status = IB_WC_GENERAL_ERR;
3908 else
3909 wc->opcode = ib_opcode;
3910
3911 if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
3912 ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
3913 if (unlikely(ret))
3914 return ret;
3915 }
3916
3917 wc->sl = hr_reg_read(cqe, CQE_SL);
3918 wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
3919 wc->slid = 0;
3920 wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0;
3921 wc->port_num = hr_reg_read(cqe, CQE_PORTN);
3922 wc->pkey_index = 0;
3923
3924 if (hr_reg_read(cqe, CQE_VID_VLD)) {
3925 wc->vlan_id = hr_reg_read(cqe, CQE_VID);
3926 wc->wc_flags |= IB_WC_WITH_VLAN;
3927 } else {
3928 wc->vlan_id = 0xffff;
3929 }
3930
3931 wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE);
3932
3933 return 0;
3934 }
3935
3936 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3937 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3938 {
3939 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3940 struct hns_roce_qp *qp = *cur_qp;
3941 struct hns_roce_srq *srq = NULL;
3942 struct hns_roce_v2_cqe *cqe;
3943 struct hns_roce_wq *wq;
3944 int is_send;
3945 u16 wqe_idx;
3946 int ret;
3947
3948 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
3949 if (!cqe)
3950 return -EAGAIN;
3951
3952 ++hr_cq->cons_index;
3953
3954 rmb();
3955
3956 ret = get_cur_qp(hr_cq, cqe, &qp);
3957 if (ret)
3958 return ret;
3959
3960 wc->qp = &qp->ibqp;
3961 wc->vendor_err = 0;
3962
3963 wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX);
3964
3965 is_send = !hr_reg_read(cqe, CQE_S_R);
3966 if (is_send) {
3967 wq = &qp->sq;
3968
3969
3970
3971
3972 if (qp->sq_signal_bits)
3973 wq->tail += (wqe_idx - (u16)wq->tail) &
3974 (wq->wqe_cnt - 1);
3975
3976 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3977 ++wq->tail;
3978
3979 fill_send_wc(wc, cqe);
3980 } else {
3981 if (qp->ibqp.srq) {
3982 srq = to_hr_srq(qp->ibqp.srq);
3983 wc->wr_id = srq->wrid[wqe_idx];
3984 hns_roce_free_srq_wqe(srq, wqe_idx);
3985 } else {
3986 wq = &qp->rq;
3987 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3988 ++wq->tail;
3989 }
3990
3991 ret = fill_recv_wc(wc, cqe);
3992 }
3993
3994 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
3995 if (unlikely(wc->status != IB_WC_SUCCESS))
3996 return 0;
3997
3998 return ret;
3999 }
4000
4001 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
4002 struct ib_wc *wc)
4003 {
4004 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
4005 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
4006 struct hns_roce_qp *cur_qp = NULL;
4007 unsigned long flags;
4008 int npolled;
4009
4010 spin_lock_irqsave(&hr_cq->lock, flags);
4011
4012
4013
4014
4015
4016
4017
4018
4019 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
4020 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
4021 goto out;
4022 }
4023
4024 for (npolled = 0; npolled < num_entries; ++npolled) {
4025 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
4026 break;
4027 }
4028
4029 if (npolled)
4030 update_cq_db(hr_dev, hr_cq);
4031
4032 out:
4033 spin_unlock_irqrestore(&hr_cq->lock, flags);
4034
4035 return npolled;
4036 }
4037
4038 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
4039 u32 step_idx, u8 *mbox_cmd)
4040 {
4041 u8 cmd;
4042
4043 switch (type) {
4044 case HEM_TYPE_QPC:
4045 cmd = HNS_ROCE_CMD_WRITE_QPC_BT0;
4046 break;
4047 case HEM_TYPE_MTPT:
4048 cmd = HNS_ROCE_CMD_WRITE_MPT_BT0;
4049 break;
4050 case HEM_TYPE_CQC:
4051 cmd = HNS_ROCE_CMD_WRITE_CQC_BT0;
4052 break;
4053 case HEM_TYPE_SRQC:
4054 cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0;
4055 break;
4056 case HEM_TYPE_SCCC:
4057 cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0;
4058 break;
4059 case HEM_TYPE_QPC_TIMER:
4060 cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
4061 break;
4062 case HEM_TYPE_CQC_TIMER:
4063 cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
4064 break;
4065 default:
4066 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
4067 return -EINVAL;
4068 }
4069
4070 *mbox_cmd = cmd + step_idx;
4071
4072 return 0;
4073 }
4074
4075 static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
4076 dma_addr_t base_addr)
4077 {
4078 struct hns_roce_cmq_desc desc;
4079 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
4080 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
4081 u64 addr = to_hr_hw_page_addr(base_addr);
4082
4083 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
4084
4085 hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
4086 hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
4087 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
4088
4089 return hns_roce_cmq_send(hr_dev, &desc, 1);
4090 }
4091
4092 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
4093 dma_addr_t base_addr, u32 hem_type, u32 step_idx)
4094 {
4095 int ret;
4096 u8 cmd;
4097
4098 if (unlikely(hem_type == HEM_TYPE_GMV))
4099 return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
4100
4101 if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
4102 return 0;
4103
4104 ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd);
4105 if (ret < 0)
4106 return ret;
4107
4108 return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj);
4109 }
4110
4111 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
4112 struct hns_roce_hem_table *table, int obj,
4113 u32 step_idx)
4114 {
4115 struct hns_roce_hem_iter iter;
4116 struct hns_roce_hem_mhop mhop;
4117 struct hns_roce_hem *hem;
4118 unsigned long mhop_obj = obj;
4119 int i, j, k;
4120 int ret = 0;
4121 u64 hem_idx = 0;
4122 u64 l1_idx = 0;
4123 u64 bt_ba = 0;
4124 u32 chunk_ba_num;
4125 u32 hop_num;
4126
4127 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4128 return 0;
4129
4130 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
4131 i = mhop.l0_idx;
4132 j = mhop.l1_idx;
4133 k = mhop.l2_idx;
4134 hop_num = mhop.hop_num;
4135 chunk_ba_num = mhop.bt_chunk_size / 8;
4136
4137 if (hop_num == 2) {
4138 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
4139 k;
4140 l1_idx = i * chunk_ba_num + j;
4141 } else if (hop_num == 1) {
4142 hem_idx = i * chunk_ba_num + j;
4143 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
4144 hem_idx = i;
4145 }
4146
4147 if (table->type == HEM_TYPE_SCCC)
4148 obj = mhop.l0_idx;
4149
4150 if (check_whether_last_step(hop_num, step_idx)) {
4151 hem = table->hem[hem_idx];
4152 for (hns_roce_hem_first(hem, &iter);
4153 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
4154 bt_ba = hns_roce_hem_addr(&iter);
4155 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
4156 step_idx);
4157 }
4158 } else {
4159 if (step_idx == 0)
4160 bt_ba = table->bt_l0_dma_addr[i];
4161 else if (step_idx == 1 && hop_num == 2)
4162 bt_ba = table->bt_l1_dma_addr[l1_idx];
4163
4164 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
4165 }
4166
4167 return ret;
4168 }
4169
4170 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
4171 struct hns_roce_hem_table *table,
4172 int tag, u32 step_idx)
4173 {
4174 struct hns_roce_cmd_mailbox *mailbox;
4175 struct device *dev = hr_dev->dev;
4176 u8 cmd = 0xff;
4177 int ret;
4178
4179 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
4180 return 0;
4181
4182 switch (table->type) {
4183 case HEM_TYPE_QPC:
4184 cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0;
4185 break;
4186 case HEM_TYPE_MTPT:
4187 cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0;
4188 break;
4189 case HEM_TYPE_CQC:
4190 cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0;
4191 break;
4192 case HEM_TYPE_SRQC:
4193 cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
4194 break;
4195 case HEM_TYPE_SCCC:
4196 case HEM_TYPE_QPC_TIMER:
4197 case HEM_TYPE_CQC_TIMER:
4198 case HEM_TYPE_GMV:
4199 return 0;
4200 default:
4201 dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
4202 table->type);
4203 return 0;
4204 }
4205
4206 cmd += step_idx;
4207
4208 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4209 if (IS_ERR(mailbox))
4210 return PTR_ERR(mailbox);
4211
4212 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag);
4213
4214 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4215 return ret;
4216 }
4217
4218 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
4219 struct hns_roce_v2_qp_context *context,
4220 struct hns_roce_v2_qp_context *qpc_mask,
4221 struct hns_roce_qp *hr_qp)
4222 {
4223 struct hns_roce_cmd_mailbox *mailbox;
4224 int qpc_size;
4225 int ret;
4226
4227 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4228 if (IS_ERR(mailbox))
4229 return PTR_ERR(mailbox);
4230
4231
4232 qpc_size = hr_dev->caps.qpc_sz;
4233 memcpy(mailbox->buf, context, qpc_size);
4234 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
4235
4236 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
4237 HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn);
4238
4239 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4240
4241 return ret;
4242 }
4243
4244 static void set_access_flags(struct hns_roce_qp *hr_qp,
4245 struct hns_roce_v2_qp_context *context,
4246 struct hns_roce_v2_qp_context *qpc_mask,
4247 const struct ib_qp_attr *attr, int attr_mask)
4248 {
4249 u8 dest_rd_atomic;
4250 u32 access_flags;
4251
4252 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
4253 attr->max_dest_rd_atomic : hr_qp->resp_depth;
4254
4255 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
4256 attr->qp_access_flags : hr_qp->atomic_rd_en;
4257
4258 if (!dest_rd_atomic)
4259 access_flags &= IB_ACCESS_REMOTE_WRITE;
4260
4261 hr_reg_write_bool(context, QPC_RRE,
4262 access_flags & IB_ACCESS_REMOTE_READ);
4263 hr_reg_clear(qpc_mask, QPC_RRE);
4264
4265 hr_reg_write_bool(context, QPC_RWE,
4266 access_flags & IB_ACCESS_REMOTE_WRITE);
4267 hr_reg_clear(qpc_mask, QPC_RWE);
4268
4269 hr_reg_write_bool(context, QPC_ATE,
4270 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4271 hr_reg_clear(qpc_mask, QPC_ATE);
4272 hr_reg_write_bool(context, QPC_EXT_ATE,
4273 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4274 hr_reg_clear(qpc_mask, QPC_EXT_ATE);
4275 }
4276
4277 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
4278 struct hns_roce_v2_qp_context *context,
4279 struct hns_roce_v2_qp_context *qpc_mask)
4280 {
4281 hr_reg_write(context, QPC_SGE_SHIFT,
4282 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
4283 hr_qp->sge.sge_shift));
4284
4285 hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt));
4286
4287 hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
4288 }
4289
4290 static inline int get_cqn(struct ib_cq *ib_cq)
4291 {
4292 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
4293 }
4294
4295 static inline int get_pdn(struct ib_pd *ib_pd)
4296 {
4297 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
4298 }
4299
4300 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
4301 const struct ib_qp_attr *attr,
4302 int attr_mask,
4303 struct hns_roce_v2_qp_context *context,
4304 struct hns_roce_v2_qp_context *qpc_mask)
4305 {
4306 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4307 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4308
4309
4310
4311
4312
4313
4314
4315 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4316
4317 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4318
4319 hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
4320
4321 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
4322
4323
4324 hr_reg_write(context, QPC_VLAN_ID, 0xfff);
4325
4326 if (ibqp->qp_type == IB_QPT_XRC_TGT) {
4327 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
4328
4329 hr_reg_enable(context, QPC_XRC_QP_TYPE);
4330 }
4331
4332 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4333 hr_reg_enable(context, QPC_RQ_RECORD_EN);
4334
4335 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
4336 hr_reg_enable(context, QPC_OWNER_MODE);
4337
4338 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
4339 lower_32_bits(hr_qp->rdb.dma) >> 1);
4340 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
4341 upper_32_bits(hr_qp->rdb.dma));
4342
4343 if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
4344 hr_reg_write_bool(context, QPC_RQIE,
4345 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
4346
4347 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4348
4349 if (ibqp->srq) {
4350 hr_reg_enable(context, QPC_SRQ_EN);
4351 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4352 }
4353
4354 hr_reg_enable(context, QPC_FRE);
4355
4356 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4357
4358 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
4359 return;
4360
4361 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
4362 hr_reg_enable(&context->ext, QPCEX_STASH);
4363 }
4364
4365 static void modify_qp_init_to_init(struct ib_qp *ibqp,
4366 const struct ib_qp_attr *attr, int attr_mask,
4367 struct hns_roce_v2_qp_context *context,
4368 struct hns_roce_v2_qp_context *qpc_mask)
4369 {
4370
4371
4372
4373
4374
4375
4376 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4377 hr_reg_clear(qpc_mask, QPC_TST);
4378
4379 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4380 hr_reg_clear(qpc_mask, QPC_PD);
4381
4382 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4383 hr_reg_clear(qpc_mask, QPC_RX_CQN);
4384
4385 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4386 hr_reg_clear(qpc_mask, QPC_TX_CQN);
4387
4388 if (ibqp->srq) {
4389 hr_reg_enable(context, QPC_SRQ_EN);
4390 hr_reg_clear(qpc_mask, QPC_SRQ_EN);
4391 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4392 hr_reg_clear(qpc_mask, QPC_SRQN);
4393 }
4394 }
4395
4396 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
4397 struct hns_roce_qp *hr_qp,
4398 struct hns_roce_v2_qp_context *context,
4399 struct hns_roce_v2_qp_context *qpc_mask)
4400 {
4401 u64 mtts[MTT_MIN_COUNT] = { 0 };
4402 u64 wqe_sge_ba;
4403 int count;
4404
4405
4406 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
4407 MTT_MIN_COUNT, &wqe_sge_ba);
4408 if (hr_qp->rq.wqe_cnt && count < 1) {
4409 ibdev_err(&hr_dev->ib_dev,
4410 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
4411 return -EINVAL;
4412 }
4413
4414 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4415 qpc_mask->wqe_sge_ba = 0;
4416
4417
4418
4419
4420
4421
4422
4423 hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3));
4424 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H);
4425
4426 hr_reg_write(context, QPC_SQ_HOP_NUM,
4427 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
4428 hr_qp->sq.wqe_cnt));
4429 hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM);
4430
4431 hr_reg_write(context, QPC_SGE_HOP_NUM,
4432 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4433 hr_qp->sge.sge_cnt));
4434 hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM);
4435
4436 hr_reg_write(context, QPC_RQ_HOP_NUM,
4437 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4438 hr_qp->rq.wqe_cnt));
4439
4440 hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);
4441
4442 hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
4443 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4444 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);
4445
4446 hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
4447 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4448 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);
4449
4450 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4451 qpc_mask->rq_cur_blk_addr = 0;
4452
4453 hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H,
4454 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4455 hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
4456
4457 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4458 qpc_mask->rq_nxt_blk_addr = 0;
4459
4460 hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
4461 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4462 hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
4463
4464 return 0;
4465 }
4466
4467 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4468 struct hns_roce_qp *hr_qp,
4469 struct hns_roce_v2_qp_context *context,
4470 struct hns_roce_v2_qp_context *qpc_mask)
4471 {
4472 struct ib_device *ibdev = &hr_dev->ib_dev;
4473 u64 sge_cur_blk = 0;
4474 u64 sq_cur_blk = 0;
4475 int count;
4476
4477
4478 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4479 if (count < 1) {
4480 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4481 hr_qp->qpn);
4482 return -EINVAL;
4483 }
4484 if (hr_qp->sge.sge_cnt > 0) {
4485 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4486 hr_qp->sge.offset,
4487 &sge_cur_blk, 1, NULL);
4488 if (count < 1) {
4489 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4490 hr_qp->qpn);
4491 return -EINVAL;
4492 }
4493 }
4494
4495
4496
4497
4498
4499
4500
4501 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L,
4502 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4503 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H,
4504 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4505 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L);
4506 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H);
4507
4508 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L,
4509 lower_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4510 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H,
4511 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4512 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L);
4513 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H);
4514
4515 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L,
4516 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4517 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H,
4518 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4519 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L);
4520 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H);
4521
4522 return 0;
4523 }
4524
4525 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4526 const struct ib_qp_attr *attr)
4527 {
4528 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4529 return IB_MTU_4096;
4530
4531 return attr->path_mtu;
4532 }
4533
4534 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4535 const struct ib_qp_attr *attr, int attr_mask,
4536 struct hns_roce_v2_qp_context *context,
4537 struct hns_roce_v2_qp_context *qpc_mask)
4538 {
4539 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4540 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4541 struct ib_device *ibdev = &hr_dev->ib_dev;
4542 dma_addr_t trrl_ba;
4543 dma_addr_t irrl_ba;
4544 enum ib_mtu ib_mtu;
4545 const u8 *smac;
4546 u8 lp_pktn_ini;
4547 u64 *mtts;
4548 u8 *dmac;
4549 u32 port;
4550 int mtu;
4551 int ret;
4552
4553 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4554 if (ret) {
4555 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4556 return ret;
4557 }
4558
4559
4560 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4561 hr_qp->qpn, &irrl_ba);
4562 if (!mtts) {
4563 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4564 return -EINVAL;
4565 }
4566
4567
4568 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4569 hr_qp->qpn, &trrl_ba);
4570 if (!mtts) {
4571 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4572 return -EINVAL;
4573 }
4574
4575 if (attr_mask & IB_QP_ALT_PATH) {
4576 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4577 attr_mask);
4578 return -EINVAL;
4579 }
4580
4581 hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4);
4582 hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
4583 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4584 qpc_mask->trrl_ba = 0;
4585 hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4));
4586 hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
4587
4588 context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4589 qpc_mask->irrl_ba = 0;
4590 hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6));
4591 hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
4592
4593 hr_reg_enable(context, QPC_RMT_E2E);
4594 hr_reg_clear(qpc_mask, QPC_RMT_E2E);
4595
4596 hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits);
4597 hr_reg_clear(qpc_mask, QPC_SIG_TYPE);
4598
4599 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4600
4601 smac = (const u8 *)hr_dev->dev_addr[port];
4602 dmac = (u8 *)attr->ah_attr.roce.dmac;
4603
4604 if (ether_addr_equal_unaligned(dmac, smac) ||
4605 hr_dev->loop_idc == 0x1) {
4606 hr_reg_write(context, QPC_LBI, hr_dev->loop_idc);
4607 hr_reg_clear(qpc_mask, QPC_LBI);
4608 }
4609
4610 if (attr_mask & IB_QP_DEST_QPN) {
4611 hr_reg_write(context, QPC_DQPN, attr->dest_qp_num);
4612 hr_reg_clear(qpc_mask, QPC_DQPN);
4613 }
4614
4615 memcpy(&(context->dmac), dmac, sizeof(u32));
4616 hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
4617 qpc_mask->dmac = 0;
4618 hr_reg_clear(qpc_mask, QPC_DMAC_H);
4619
4620 ib_mtu = get_mtu(ibqp, attr);
4621 hr_qp->path_mtu = ib_mtu;
4622
4623 mtu = ib_mtu_enum_to_int(ib_mtu);
4624 if (WARN_ON(mtu <= 0))
4625 return -EINVAL;
4626 #define MAX_LP_MSG_LEN 16384
4627
4628 lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
4629 if (WARN_ON(lp_pktn_ini >= 0xF))
4630 return -EINVAL;
4631
4632 if (attr_mask & IB_QP_PATH_MTU) {
4633 hr_reg_write(context, QPC_MTU, ib_mtu);
4634 hr_reg_clear(qpc_mask, QPC_MTU);
4635 }
4636
4637 hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
4638 hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
4639
4640
4641 hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
4642 hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
4643
4644 hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
4645 hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN);
4646 hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE);
4647
4648 context->rq_rnr_timer = 0;
4649 qpc_mask->rq_rnr_timer = 0;
4650
4651 hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
4652 hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
4653
4654
4655 hr_reg_write(context, QPC_LP_SGEN_INI, 3);
4656 hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
4657
4658 return 0;
4659 }
4660
4661 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4662 const struct ib_qp_attr *attr, int attr_mask,
4663 struct hns_roce_v2_qp_context *context,
4664 struct hns_roce_v2_qp_context *qpc_mask)
4665 {
4666 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4667 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4668 struct ib_device *ibdev = &hr_dev->ib_dev;
4669 int ret;
4670
4671
4672 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4673 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4674 return -EINVAL;
4675 }
4676
4677 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4678 if (ret) {
4679 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4680 return ret;
4681 }
4682
4683
4684
4685
4686
4687
4688 hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX);
4689
4690 hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN);
4691
4692 hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE);
4693 hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD);
4694 hr_reg_clear(qpc_mask, QPC_IRRL_PSN);
4695
4696 hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL);
4697
4698 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN);
4699
4700 hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG);
4701
4702 hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
4703
4704 hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
4705
4706 return 0;
4707 }
4708
4709 static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4710 u32 *dip_idx)
4711 {
4712 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4713 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4714 u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
4715 u32 *head = &hr_dev->qp_table.idx_table.head;
4716 u32 *tail = &hr_dev->qp_table.idx_table.tail;
4717 struct hns_roce_dip *hr_dip;
4718 unsigned long flags;
4719 int ret = 0;
4720
4721 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
4722
4723 spare_idx[*tail] = ibqp->qp_num;
4724 *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
4725
4726 list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
4727 if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
4728 *dip_idx = hr_dip->dip_idx;
4729 goto out;
4730 }
4731 }
4732
4733
4734
4735
4736 hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
4737 if (!hr_dip) {
4738 ret = -ENOMEM;
4739 goto out;
4740 }
4741
4742 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4743 hr_dip->dip_idx = *dip_idx = spare_idx[*head];
4744 *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
4745 list_add_tail(&hr_dip->node, &hr_dev->dip_list);
4746
4747 out:
4748 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
4749 return ret;
4750 }
4751
4752 enum {
4753 CONG_DCQCN,
4754 CONG_WINDOW,
4755 };
4756
4757 enum {
4758 UNSUPPORT_CONG_LEVEL,
4759 SUPPORT_CONG_LEVEL,
4760 };
4761
4762 enum {
4763 CONG_LDCP,
4764 CONG_HC3,
4765 };
4766
4767 enum {
4768 DIP_INVALID,
4769 DIP_VALID,
4770 };
4771
4772 enum {
4773 WND_LIMIT,
4774 WND_UNLIMIT,
4775 };
4776
4777 static int check_cong_type(struct ib_qp *ibqp,
4778 struct hns_roce_congestion_algorithm *cong_alg)
4779 {
4780 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4781
4782
4783 switch (hr_dev->caps.cong_type) {
4784 case CONG_TYPE_DCQCN:
4785 cong_alg->alg_sel = CONG_DCQCN;
4786 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4787 cong_alg->dip_vld = DIP_INVALID;
4788 cong_alg->wnd_mode_sel = WND_LIMIT;
4789 break;
4790 case CONG_TYPE_LDCP:
4791 cong_alg->alg_sel = CONG_WINDOW;
4792 cong_alg->alg_sub_sel = CONG_LDCP;
4793 cong_alg->dip_vld = DIP_INVALID;
4794 cong_alg->wnd_mode_sel = WND_UNLIMIT;
4795 break;
4796 case CONG_TYPE_HC3:
4797 cong_alg->alg_sel = CONG_WINDOW;
4798 cong_alg->alg_sub_sel = CONG_HC3;
4799 cong_alg->dip_vld = DIP_INVALID;
4800 cong_alg->wnd_mode_sel = WND_LIMIT;
4801 break;
4802 case CONG_TYPE_DIP:
4803 cong_alg->alg_sel = CONG_DCQCN;
4804 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4805 cong_alg->dip_vld = DIP_VALID;
4806 cong_alg->wnd_mode_sel = WND_LIMIT;
4807 break;
4808 default:
4809 ibdev_err(&hr_dev->ib_dev,
4810 "error type(%u) for congestion selection.\n",
4811 hr_dev->caps.cong_type);
4812 return -EINVAL;
4813 }
4814
4815 return 0;
4816 }
4817
4818 static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4819 struct hns_roce_v2_qp_context *context,
4820 struct hns_roce_v2_qp_context *qpc_mask)
4821 {
4822 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4823 struct hns_roce_congestion_algorithm cong_field;
4824 struct ib_device *ibdev = ibqp->device;
4825 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
4826 u32 dip_idx = 0;
4827 int ret;
4828
4829 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
4830 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
4831 return 0;
4832
4833 ret = check_cong_type(ibqp, &cong_field);
4834 if (ret)
4835 return ret;
4836
4837 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
4838 hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
4839 hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
4840 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
4841 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
4842 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
4843 cong_field.alg_sub_sel);
4844 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL);
4845 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
4846 hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD);
4847 hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
4848 cong_field.wnd_mode_sel);
4849 hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
4850
4851
4852 if (cong_field.dip_vld == 0)
4853 return 0;
4854
4855 ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
4856 if (ret) {
4857 ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
4858 return ret;
4859 }
4860
4861 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
4862 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
4863
4864 return 0;
4865 }
4866
4867 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4868 const struct ib_qp_attr *attr,
4869 int attr_mask,
4870 struct hns_roce_v2_qp_context *context,
4871 struct hns_roce_v2_qp_context *qpc_mask)
4872 {
4873 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4874 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4875 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4876 struct ib_device *ibdev = &hr_dev->ib_dev;
4877 const struct ib_gid_attr *gid_attr = NULL;
4878 int is_roce_protocol;
4879 u16 vlan_id = 0xffff;
4880 bool is_udp = false;
4881 u8 ib_port;
4882 u8 hr_port;
4883 int ret;
4884
4885
4886
4887
4888
4889
4890 if (hr_qp->free_mr_en) {
4891 hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
4892 hr_reg_clear(qpc_mask, QPC_SL);
4893 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4894 return 0;
4895 }
4896
4897 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4898 hr_port = ib_port - 1;
4899 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4900 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4901
4902 if (is_roce_protocol) {
4903 gid_attr = attr->ah_attr.grh.sgid_attr;
4904 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4905 if (ret)
4906 return ret;
4907
4908 is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
4909 }
4910
4911
4912 if (vlan_id < VLAN_N_VID &&
4913 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4914 hr_reg_enable(context, QPC_RQ_VLAN_EN);
4915 hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN);
4916 hr_reg_enable(context, QPC_SQ_VLAN_EN);
4917 hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN);
4918 }
4919
4920 hr_reg_write(context, QPC_VLAN_ID, vlan_id);
4921 hr_reg_clear(qpc_mask, QPC_VLAN_ID);
4922
4923 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4924 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4925 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4926 return -EINVAL;
4927 }
4928
4929 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4930 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4931 return -EINVAL;
4932 }
4933
4934 hr_reg_write(context, QPC_UDPSPN,
4935 is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
4936 attr->dest_qp_num) :
4937 0);
4938
4939 hr_reg_clear(qpc_mask, QPC_UDPSPN);
4940
4941 hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index);
4942
4943 hr_reg_clear(qpc_mask, QPC_GMV_IDX);
4944
4945 hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit);
4946 hr_reg_clear(qpc_mask, QPC_HOPLIMIT);
4947
4948 ret = fill_cong_field(ibqp, attr, context, qpc_mask);
4949 if (ret)
4950 return ret;
4951
4952 hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh));
4953 hr_reg_clear(qpc_mask, QPC_TC);
4954
4955 hr_reg_write(context, QPC_FL, grh->flow_label);
4956 hr_reg_clear(qpc_mask, QPC_FL);
4957 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4958 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4959
4960 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4961 if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
4962 ibdev_err(ibdev,
4963 "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
4964 hr_qp->sl, MAX_SERVICE_LEVEL);
4965 return -EINVAL;
4966 }
4967
4968 hr_reg_write(context, QPC_SL, hr_qp->sl);
4969 hr_reg_clear(qpc_mask, QPC_SL);
4970
4971 return 0;
4972 }
4973
4974 static bool check_qp_state(enum ib_qp_state cur_state,
4975 enum ib_qp_state new_state)
4976 {
4977 static const bool sm[][IB_QPS_ERR + 1] = {
4978 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
4979 [IB_QPS_INIT] = true },
4980 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
4981 [IB_QPS_INIT] = true,
4982 [IB_QPS_RTR] = true,
4983 [IB_QPS_ERR] = true },
4984 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
4985 [IB_QPS_RTS] = true,
4986 [IB_QPS_ERR] = true },
4987 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
4988 [IB_QPS_RTS] = true,
4989 [IB_QPS_ERR] = true },
4990 [IB_QPS_SQD] = {},
4991 [IB_QPS_SQE] = {},
4992 [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
4993 [IB_QPS_ERR] = true }
4994 };
4995
4996 return sm[cur_state][new_state];
4997 }
4998
4999 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
5000 const struct ib_qp_attr *attr,
5001 int attr_mask,
5002 enum ib_qp_state cur_state,
5003 enum ib_qp_state new_state,
5004 struct hns_roce_v2_qp_context *context,
5005 struct hns_roce_v2_qp_context *qpc_mask)
5006 {
5007 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5008 int ret = 0;
5009
5010 if (!check_qp_state(cur_state, new_state)) {
5011 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
5012 return -EINVAL;
5013 }
5014
5015 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
5016 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
5017 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
5018 qpc_mask);
5019 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
5020 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
5021 qpc_mask);
5022 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
5023 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
5024 qpc_mask);
5025 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
5026 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
5027 qpc_mask);
5028 }
5029
5030 return ret;
5031 }
5032
5033 static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
5034 {
5035 #define QP_ACK_TIMEOUT_MAX_HIP08 20
5036 #define QP_ACK_TIMEOUT_OFFSET 10
5037 #define QP_ACK_TIMEOUT_MAX 31
5038
5039 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5040 if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
5041 ibdev_warn(&hr_dev->ib_dev,
5042 "Local ACK timeout shall be 0 to 20.\n");
5043 return false;
5044 }
5045 *timeout += QP_ACK_TIMEOUT_OFFSET;
5046 } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
5047 if (*timeout > QP_ACK_TIMEOUT_MAX) {
5048 ibdev_warn(&hr_dev->ib_dev,
5049 "Local ACK timeout shall be 0 to 31.\n");
5050 return false;
5051 }
5052 }
5053
5054 return true;
5055 }
5056
5057 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
5058 const struct ib_qp_attr *attr,
5059 int attr_mask,
5060 struct hns_roce_v2_qp_context *context,
5061 struct hns_roce_v2_qp_context *qpc_mask)
5062 {
5063 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5064 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5065 int ret = 0;
5066 u8 timeout;
5067
5068 if (attr_mask & IB_QP_AV) {
5069 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
5070 qpc_mask);
5071 if (ret)
5072 return ret;
5073 }
5074
5075 if (attr_mask & IB_QP_TIMEOUT) {
5076 timeout = attr->timeout;
5077 if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
5078 hr_reg_write(context, QPC_AT, timeout);
5079 hr_reg_clear(qpc_mask, QPC_AT);
5080 }
5081 }
5082
5083 if (attr_mask & IB_QP_RETRY_CNT) {
5084 hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt);
5085 hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT);
5086
5087 hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt);
5088 hr_reg_clear(qpc_mask, QPC_RETRY_CNT);
5089 }
5090
5091 if (attr_mask & IB_QP_RNR_RETRY) {
5092 hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry);
5093 hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT);
5094
5095 hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry);
5096 hr_reg_clear(qpc_mask, QPC_RNR_CNT);
5097 }
5098
5099 if (attr_mask & IB_QP_SQ_PSN) {
5100 hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn);
5101 hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN);
5102
5103 hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn);
5104 hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN);
5105
5106 hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn);
5107 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L);
5108
5109 hr_reg_write(context, QPC_RETRY_MSG_PSN_H,
5110 attr->sq_psn >> RETRY_MSG_PSN_SHIFT);
5111 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H);
5112
5113 hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn);
5114 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN);
5115
5116 hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn);
5117 hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN);
5118 }
5119
5120 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
5121 attr->max_dest_rd_atomic) {
5122 hr_reg_write(context, QPC_RR_MAX,
5123 fls(attr->max_dest_rd_atomic - 1));
5124 hr_reg_clear(qpc_mask, QPC_RR_MAX);
5125 }
5126
5127 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
5128 hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1));
5129 hr_reg_clear(qpc_mask, QPC_SR_MAX);
5130 }
5131
5132 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
5133 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
5134
5135 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
5136 hr_reg_write(context, QPC_MIN_RNR_TIME,
5137 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
5138 HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
5139 hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
5140 }
5141
5142 if (attr_mask & IB_QP_RQ_PSN) {
5143 hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn);
5144 hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN);
5145
5146 hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1);
5147 hr_reg_clear(qpc_mask, QPC_RAQ_PSN);
5148 }
5149
5150 if (attr_mask & IB_QP_QKEY) {
5151 context->qkey_xrcd = cpu_to_le32(attr->qkey);
5152 qpc_mask->qkey_xrcd = 0;
5153 hr_qp->qkey = attr->qkey;
5154 }
5155
5156 return ret;
5157 }
5158
5159 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
5160 const struct ib_qp_attr *attr,
5161 int attr_mask)
5162 {
5163 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5164 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5165
5166 if (attr_mask & IB_QP_ACCESS_FLAGS)
5167 hr_qp->atomic_rd_en = attr->qp_access_flags;
5168
5169 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
5170 hr_qp->resp_depth = attr->max_dest_rd_atomic;
5171 if (attr_mask & IB_QP_PORT) {
5172 hr_qp->port = attr->port_num - 1;
5173 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
5174 }
5175 }
5176
5177 static void clear_qp(struct hns_roce_qp *hr_qp)
5178 {
5179 struct ib_qp *ibqp = &hr_qp->ibqp;
5180
5181 if (ibqp->send_cq)
5182 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
5183 hr_qp->qpn, NULL);
5184
5185 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
5186 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
5187 hr_qp->qpn, ibqp->srq ?
5188 to_hr_srq(ibqp->srq) : NULL);
5189
5190 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
5191 *hr_qp->rdb.db_record = 0;
5192
5193 hr_qp->rq.head = 0;
5194 hr_qp->rq.tail = 0;
5195 hr_qp->sq.head = 0;
5196 hr_qp->sq.tail = 0;
5197 hr_qp->next_sge = 0;
5198 }
5199
5200 static void v2_set_flushed_fields(struct ib_qp *ibqp,
5201 struct hns_roce_v2_qp_context *context,
5202 struct hns_roce_v2_qp_context *qpc_mask)
5203 {
5204 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5205 unsigned long sq_flag = 0;
5206 unsigned long rq_flag = 0;
5207
5208 if (ibqp->qp_type == IB_QPT_XRC_TGT)
5209 return;
5210
5211 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
5212 hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
5213 hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
5214 hr_qp->state = IB_QPS_ERR;
5215 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
5216
5217 if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI)
5218 return;
5219
5220 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
5221 hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
5222 hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
5223 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
5224 }
5225
5226 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
5227 const struct ib_qp_attr *attr,
5228 int attr_mask, enum ib_qp_state cur_state,
5229 enum ib_qp_state new_state)
5230 {
5231 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5232 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5233 struct hns_roce_v2_qp_context ctx[2];
5234 struct hns_roce_v2_qp_context *context = ctx;
5235 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
5236 struct ib_device *ibdev = &hr_dev->ib_dev;
5237 int ret;
5238
5239 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
5240 return -EOPNOTSUPP;
5241
5242
5243
5244
5245
5246
5247
5248 memset(context, 0, hr_dev->caps.qpc_sz);
5249 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
5250
5251 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
5252 new_state, context, qpc_mask);
5253 if (ret)
5254 goto out;
5255
5256
5257 if (new_state == IB_QPS_ERR)
5258 v2_set_flushed_fields(ibqp, context, qpc_mask);
5259
5260
5261 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
5262 qpc_mask);
5263 if (ret)
5264 goto out;
5265
5266 hr_reg_write_bool(context, QPC_INV_CREDIT,
5267 to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC ||
5268 ibqp->srq);
5269 hr_reg_clear(qpc_mask, QPC_INV_CREDIT);
5270
5271
5272 hr_reg_write(context, QPC_QP_ST, new_state);
5273 hr_reg_clear(qpc_mask, QPC_QP_ST);
5274
5275
5276 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
5277 if (ret) {
5278 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
5279 goto out;
5280 }
5281
5282 hr_qp->state = new_state;
5283
5284 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
5285
5286 if (new_state == IB_QPS_RESET && !ibqp->uobject)
5287 clear_qp(hr_qp);
5288
5289 out:
5290 return ret;
5291 }
5292
5293 static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
5294 {
5295 static const enum ib_qp_state map[] = {
5296 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
5297 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
5298 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
5299 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
5300 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
5301 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
5302 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
5303 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
5304 };
5305
5306 return (state < ARRAY_SIZE(map)) ? map[state] : -1;
5307 }
5308
5309 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
5310 struct hns_roce_qp *hr_qp,
5311 struct hns_roce_v2_qp_context *hr_context)
5312 {
5313 struct hns_roce_cmd_mailbox *mailbox;
5314 int ret;
5315
5316 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5317 if (IS_ERR(mailbox))
5318 return PTR_ERR(mailbox);
5319
5320 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
5321 hr_qp->qpn);
5322 if (ret)
5323 goto out;
5324
5325 memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
5326
5327 out:
5328 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5329 return ret;
5330 }
5331
5332 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5333 int qp_attr_mask,
5334 struct ib_qp_init_attr *qp_init_attr)
5335 {
5336 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5337 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5338 struct hns_roce_v2_qp_context context = {};
5339 struct ib_device *ibdev = &hr_dev->ib_dev;
5340 int tmp_qp_state;
5341 int state;
5342 int ret;
5343
5344 memset(qp_attr, 0, sizeof(*qp_attr));
5345 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5346
5347 mutex_lock(&hr_qp->mutex);
5348
5349 if (hr_qp->state == IB_QPS_RESET) {
5350 qp_attr->qp_state = IB_QPS_RESET;
5351 ret = 0;
5352 goto done;
5353 }
5354
5355 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
5356 if (ret) {
5357 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
5358 ret = -EINVAL;
5359 goto out;
5360 }
5361
5362 state = hr_reg_read(&context, QPC_QP_ST);
5363 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
5364 if (tmp_qp_state == -1) {
5365 ibdev_err(ibdev, "Illegal ib_qp_state\n");
5366 ret = -EINVAL;
5367 goto out;
5368 }
5369 hr_qp->state = (u8)tmp_qp_state;
5370 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
5371 qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU);
5372 qp_attr->path_mig_state = IB_MIG_ARMED;
5373 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
5374 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
5375 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
5376
5377 qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
5378 qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
5379 qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN);
5380 qp_attr->qp_access_flags =
5381 ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
5382 ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
5383 ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S);
5384
5385 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
5386 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5387 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
5388 struct ib_global_route *grh =
5389 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
5390
5391 rdma_ah_set_sl(&qp_attr->ah_attr,
5392 hr_reg_read(&context, QPC_SL));
5393 grh->flow_label = hr_reg_read(&context, QPC_FL);
5394 grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
5395 grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
5396 grh->traffic_class = hr_reg_read(&context, QPC_TC);
5397
5398 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
5399 }
5400
5401 qp_attr->port_num = hr_qp->port + 1;
5402 qp_attr->sq_draining = 0;
5403 qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX);
5404 qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
5405
5406 qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
5407 qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
5408 qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
5409 qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
5410
5411 done:
5412 qp_attr->cur_qp_state = qp_attr->qp_state;
5413 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5414 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
5415 qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
5416
5417 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5418 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5419
5420 qp_init_attr->qp_context = ibqp->qp_context;
5421 qp_init_attr->qp_type = ibqp->qp_type;
5422 qp_init_attr->recv_cq = ibqp->recv_cq;
5423 qp_init_attr->send_cq = ibqp->send_cq;
5424 qp_init_attr->srq = ibqp->srq;
5425 qp_init_attr->cap = qp_attr->cap;
5426 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5427
5428 out:
5429 mutex_unlock(&hr_qp->mutex);
5430 return ret;
5431 }
5432
5433 static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
5434 {
5435 return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
5436 hr_qp->ibqp.qp_type == IB_QPT_UD ||
5437 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5438 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
5439 hr_qp->state != IB_QPS_RESET);
5440 }
5441
5442 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5443 struct hns_roce_qp *hr_qp,
5444 struct ib_udata *udata)
5445 {
5446 struct ib_device *ibdev = &hr_dev->ib_dev;
5447 struct hns_roce_cq *send_cq, *recv_cq;
5448 unsigned long flags;
5449 int ret = 0;
5450
5451 if (modify_qp_is_ok(hr_qp)) {
5452
5453 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5454 hr_qp->state, IB_QPS_RESET);
5455 if (ret)
5456 ibdev_err(ibdev,
5457 "failed to modify QP to RST, ret = %d.\n",
5458 ret);
5459 }
5460
5461 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5462 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5463
5464 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5465 hns_roce_lock_cqs(send_cq, recv_cq);
5466
5467 if (!udata) {
5468 if (recv_cq)
5469 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5470 (hr_qp->ibqp.srq ?
5471 to_hr_srq(hr_qp->ibqp.srq) :
5472 NULL));
5473
5474 if (send_cq && send_cq != recv_cq)
5475 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5476 }
5477
5478 hns_roce_qp_remove(hr_dev, hr_qp);
5479
5480 hns_roce_unlock_cqs(send_cq, recv_cq);
5481 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5482
5483 return ret;
5484 }
5485
5486 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5487 {
5488 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5489 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5490 int ret;
5491
5492 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5493 if (ret)
5494 ibdev_err(&hr_dev->ib_dev,
5495 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5496 hr_qp->qpn, ret);
5497
5498 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5499
5500 return 0;
5501 }
5502
5503 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5504 struct hns_roce_qp *hr_qp)
5505 {
5506 struct ib_device *ibdev = &hr_dev->ib_dev;
5507 struct hns_roce_sccc_clr_done *resp;
5508 struct hns_roce_sccc_clr *clr;
5509 struct hns_roce_cmq_desc desc;
5510 int ret, i;
5511
5512 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
5513 return 0;
5514
5515 mutex_lock(&hr_dev->qp_table.scc_mutex);
5516
5517
5518 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5519 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5520 if (ret) {
5521 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5522 goto out;
5523 }
5524
5525
5526 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5527 clr = (struct hns_roce_sccc_clr *)desc.data;
5528 clr->qpn = cpu_to_le32(hr_qp->qpn);
5529 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5530 if (ret) {
5531 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5532 goto out;
5533 }
5534
5535
5536 resp = (struct hns_roce_sccc_clr_done *)desc.data;
5537 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5538 hns_roce_cmq_setup_basic_desc(&desc,
5539 HNS_ROCE_OPC_QUERY_SCCC, true);
5540 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5541 if (ret) {
5542 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5543 ret);
5544 goto out;
5545 }
5546
5547 if (resp->clr_done)
5548 goto out;
5549
5550 msleep(20);
5551 }
5552
5553 ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
5554 ret = -ETIMEDOUT;
5555
5556 out:
5557 mutex_unlock(&hr_dev->qp_table.scc_mutex);
5558 return ret;
5559 }
5560
5561 #define DMA_IDX_SHIFT 3
5562 #define DMA_WQE_SHIFT 3
5563
5564 static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
5565 struct hns_roce_srq_context *ctx)
5566 {
5567 struct hns_roce_idx_que *idx_que = &srq->idx_que;
5568 struct ib_device *ibdev = srq->ibsrq.device;
5569 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5570 u64 mtts_idx[MTT_MIN_COUNT] = {};
5571 dma_addr_t dma_handle_idx = 0;
5572 int ret;
5573
5574
5575 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
5576 ARRAY_SIZE(mtts_idx), &dma_handle_idx);
5577 if (ret < 1) {
5578 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
5579 ret);
5580 return -ENOBUFS;
5581 }
5582
5583 hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
5584 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
5585
5586 hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
5587 hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
5588 upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
5589
5590 hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
5591 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
5592 hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
5593 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
5594
5595 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
5596 to_hr_hw_page_addr(mtts_idx[0]));
5597 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
5598 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5599
5600 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
5601 to_hr_hw_page_addr(mtts_idx[1]));
5602 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
5603 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5604
5605 return 0;
5606 }
5607
5608 static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
5609 {
5610 struct ib_device *ibdev = srq->ibsrq.device;
5611 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5612 struct hns_roce_srq_context *ctx = mb_buf;
5613 u64 mtts_wqe[MTT_MIN_COUNT] = {};
5614 dma_addr_t dma_handle_wqe = 0;
5615 int ret;
5616
5617 memset(ctx, 0, sizeof(*ctx));
5618
5619
5620 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
5621 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
5622 if (ret < 1) {
5623 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
5624 ret);
5625 return -ENOBUFS;
5626 }
5627
5628 hr_reg_write(ctx, SRQC_SRQ_ST, 1);
5629 hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
5630 srq->ibsrq.srq_type == IB_SRQT_XRC);
5631 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
5632 hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
5633 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
5634 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
5635 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
5636 hr_reg_write(ctx, SRQC_RQWS,
5637 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
5638
5639 hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
5640 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5641 srq->wqe_cnt));
5642
5643 hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
5644 hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
5645 upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
5646
5647 hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
5648 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5649 hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
5650 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5651
5652 return hns_roce_v2_write_srqc_index_queue(srq, ctx);
5653 }
5654
5655 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5656 struct ib_srq_attr *srq_attr,
5657 enum ib_srq_attr_mask srq_attr_mask,
5658 struct ib_udata *udata)
5659 {
5660 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5661 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5662 struct hns_roce_srq_context *srq_context;
5663 struct hns_roce_srq_context *srqc_mask;
5664 struct hns_roce_cmd_mailbox *mailbox;
5665 int ret;
5666
5667
5668 if (srq_attr_mask & IB_SRQ_MAX_WR)
5669 return -EINVAL;
5670
5671 if (srq_attr_mask & IB_SRQ_LIMIT) {
5672 if (srq_attr->srq_limit > srq->wqe_cnt)
5673 return -EINVAL;
5674
5675 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5676 if (IS_ERR(mailbox))
5677 return PTR_ERR(mailbox);
5678
5679 srq_context = mailbox->buf;
5680 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5681
5682 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5683
5684 hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
5685 hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
5686
5687 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
5688 HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn);
5689 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5690 if (ret) {
5691 ibdev_err(&hr_dev->ib_dev,
5692 "failed to handle cmd of modifying SRQ, ret = %d.\n",
5693 ret);
5694 return ret;
5695 }
5696 }
5697
5698 return 0;
5699 }
5700
5701 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5702 {
5703 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5704 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5705 struct hns_roce_srq_context *srq_context;
5706 struct hns_roce_cmd_mailbox *mailbox;
5707 int ret;
5708
5709 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5710 if (IS_ERR(mailbox))
5711 return PTR_ERR(mailbox);
5712
5713 srq_context = mailbox->buf;
5714 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
5715 HNS_ROCE_CMD_QUERY_SRQC, srq->srqn);
5716 if (ret) {
5717 ibdev_err(&hr_dev->ib_dev,
5718 "failed to process cmd of querying SRQ, ret = %d.\n",
5719 ret);
5720 goto out;
5721 }
5722
5723 attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL);
5724 attr->max_wr = srq->wqe_cnt;
5725 attr->max_sge = srq->max_gs - srq->rsv_sge;
5726
5727 out:
5728 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5729 return ret;
5730 }
5731
5732 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5733 {
5734 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5735 struct hns_roce_v2_cq_context *cq_context;
5736 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5737 struct hns_roce_v2_cq_context *cqc_mask;
5738 struct hns_roce_cmd_mailbox *mailbox;
5739 int ret;
5740
5741 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5742 if (IS_ERR(mailbox))
5743 return PTR_ERR(mailbox);
5744
5745 cq_context = mailbox->buf;
5746 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5747
5748 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5749
5750 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
5751 hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
5752
5753 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5754 if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
5755 dev_info(hr_dev->dev,
5756 "cq_period(%u) reached the upper limit, adjusted to 65.\n",
5757 cq_period);
5758 cq_period = HNS_ROCE_MAX_CQ_PERIOD;
5759 }
5760 cq_period *= HNS_ROCE_CLOCK_ADJUST;
5761 }
5762 hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
5763 hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
5764
5765 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
5766 HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn);
5767 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5768 if (ret)
5769 ibdev_err(&hr_dev->ib_dev,
5770 "failed to process cmd when modifying CQ, ret = %d.\n",
5771 ret);
5772
5773 return ret;
5774 }
5775
5776 static void hns_roce_irq_work_handle(struct work_struct *work)
5777 {
5778 struct hns_roce_work *irq_work =
5779 container_of(work, struct hns_roce_work, work);
5780 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5781
5782 switch (irq_work->event_type) {
5783 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5784 ibdev_info(ibdev, "Path migrated succeeded.\n");
5785 break;
5786 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5787 ibdev_warn(ibdev, "Path migration failed.\n");
5788 break;
5789 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5790 break;
5791 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5792 ibdev_warn(ibdev, "Send queue drained.\n");
5793 break;
5794 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5795 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
5796 irq_work->queue_num, irq_work->sub_type);
5797 break;
5798 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5799 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
5800 irq_work->queue_num);
5801 break;
5802 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5803 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
5804 irq_work->queue_num, irq_work->sub_type);
5805 break;
5806 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5807 ibdev_warn(ibdev, "SRQ limit reach.\n");
5808 break;
5809 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5810 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5811 break;
5812 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5813 ibdev_err(ibdev, "SRQ catas error.\n");
5814 break;
5815 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5816 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
5817 break;
5818 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5819 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
5820 break;
5821 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5822 ibdev_warn(ibdev, "DB overflow.\n");
5823 break;
5824 case HNS_ROCE_EVENT_TYPE_FLR:
5825 ibdev_warn(ibdev, "Function level reset.\n");
5826 break;
5827 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5828 ibdev_err(ibdev, "xrc domain violation error.\n");
5829 break;
5830 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5831 ibdev_err(ibdev, "invalid xrceth error.\n");
5832 break;
5833 default:
5834 break;
5835 }
5836
5837 kfree(irq_work);
5838 }
5839
5840 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5841 struct hns_roce_eq *eq, u32 queue_num)
5842 {
5843 struct hns_roce_work *irq_work;
5844
5845 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5846 if (!irq_work)
5847 return;
5848
5849 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5850 irq_work->hr_dev = hr_dev;
5851 irq_work->event_type = eq->event_type;
5852 irq_work->sub_type = eq->sub_type;
5853 irq_work->queue_num = queue_num;
5854 queue_work(hr_dev->irq_workq, &(irq_work->work));
5855 }
5856
5857 static void update_eq_db(struct hns_roce_eq *eq)
5858 {
5859 struct hns_roce_dev *hr_dev = eq->hr_dev;
5860 struct hns_roce_v2_db eq_db = {};
5861
5862 if (eq->type_flag == HNS_ROCE_AEQ) {
5863 hr_reg_write(&eq_db, EQ_DB_CMD,
5864 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5865 HNS_ROCE_EQ_DB_CMD_AEQ :
5866 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5867 } else {
5868 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn);
5869
5870 hr_reg_write(&eq_db, EQ_DB_CMD,
5871 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5872 HNS_ROCE_EQ_DB_CMD_CEQ :
5873 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5874 }
5875
5876 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index);
5877
5878 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
5879 }
5880
5881 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5882 {
5883 struct hns_roce_aeqe *aeqe;
5884
5885 aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5886 (eq->cons_index & (eq->entries - 1)) *
5887 eq->eqe_size);
5888
5889 return (hr_reg_read(aeqe, AEQE_OWNER) ^
5890 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5891 }
5892
5893 static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5894 struct hns_roce_eq *eq)
5895 {
5896 struct device *dev = hr_dev->dev;
5897 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5898 irqreturn_t aeqe_found = IRQ_NONE;
5899 int event_type;
5900 u32 queue_num;
5901 int sub_type;
5902
5903 while (aeqe) {
5904
5905
5906
5907 dma_rmb();
5908
5909 event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE);
5910 sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE);
5911 queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM);
5912
5913 switch (event_type) {
5914 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5915 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5916 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5917 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5918 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5919 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5920 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5921 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5922 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5923 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5924 hns_roce_qp_event(hr_dev, queue_num, event_type);
5925 break;
5926 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5927 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5928 hns_roce_srq_event(hr_dev, queue_num, event_type);
5929 break;
5930 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5931 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5932 hns_roce_cq_event(hr_dev, queue_num, event_type);
5933 break;
5934 case HNS_ROCE_EVENT_TYPE_MB:
5935 hns_roce_cmd_event(hr_dev,
5936 le16_to_cpu(aeqe->event.cmd.token),
5937 aeqe->event.cmd.status,
5938 le64_to_cpu(aeqe->event.cmd.out_param));
5939 break;
5940 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5941 case HNS_ROCE_EVENT_TYPE_FLR:
5942 break;
5943 default:
5944 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5945 event_type, eq->eqn, eq->cons_index);
5946 break;
5947 }
5948
5949 eq->event_type = event_type;
5950 eq->sub_type = sub_type;
5951 ++eq->cons_index;
5952 aeqe_found = IRQ_HANDLED;
5953
5954 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
5955
5956 aeqe = next_aeqe_sw_v2(eq);
5957 }
5958
5959 update_eq_db(eq);
5960
5961 return IRQ_RETVAL(aeqe_found);
5962 }
5963
5964 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5965 {
5966 struct hns_roce_ceqe *ceqe;
5967
5968 ceqe = hns_roce_buf_offset(eq->mtr.kmem,
5969 (eq->cons_index & (eq->entries - 1)) *
5970 eq->eqe_size);
5971
5972 return (hr_reg_read(ceqe, CEQE_OWNER) ^
5973 !!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5974 }
5975
5976 static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5977 struct hns_roce_eq *eq)
5978 {
5979 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5980 irqreturn_t ceqe_found = IRQ_NONE;
5981 u32 cqn;
5982
5983 while (ceqe) {
5984
5985
5986
5987 dma_rmb();
5988
5989 cqn = hr_reg_read(ceqe, CEQE_CQN);
5990
5991 hns_roce_cq_completion(hr_dev, cqn);
5992
5993 ++eq->cons_index;
5994 ceqe_found = IRQ_HANDLED;
5995
5996 ceqe = next_ceqe_sw_v2(eq);
5997 }
5998
5999 update_eq_db(eq);
6000
6001 return IRQ_RETVAL(ceqe_found);
6002 }
6003
6004 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
6005 {
6006 struct hns_roce_eq *eq = eq_ptr;
6007 struct hns_roce_dev *hr_dev = eq->hr_dev;
6008 irqreturn_t int_work;
6009
6010 if (eq->type_flag == HNS_ROCE_CEQ)
6011
6012 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
6013 else
6014
6015 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
6016
6017 return IRQ_RETVAL(int_work);
6018 }
6019
6020 static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
6021 u32 int_st)
6022 {
6023 struct pci_dev *pdev = hr_dev->pci_dev;
6024 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
6025 const struct hnae3_ae_ops *ops = ae_dev->ops;
6026 irqreturn_t int_work = IRQ_NONE;
6027 u32 int_en;
6028
6029 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
6030
6031 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
6032 dev_err(hr_dev->dev, "AEQ overflow!\n");
6033
6034 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
6035 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
6036
6037
6038 if (ops->set_default_reset_request)
6039 ops->set_default_reset_request(ae_dev,
6040 HNAE3_FUNC_RESET);
6041 if (ops->reset_event)
6042 ops->reset_event(pdev, NULL);
6043
6044 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
6045 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
6046
6047 int_work = IRQ_HANDLED;
6048 } else {
6049 dev_err(hr_dev->dev, "there is no basic abn irq found.\n");
6050 }
6051
6052 return IRQ_RETVAL(int_work);
6053 }
6054
6055 static int fmea_ram_ecc_query(struct hns_roce_dev *hr_dev,
6056 struct fmea_ram_ecc *ecc_info)
6057 {
6058 struct hns_roce_cmq_desc desc;
6059 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
6060 int ret;
6061
6062 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_QUERY_RAM_ECC, true);
6063 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
6064 if (ret)
6065 return ret;
6066
6067 ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR);
6068 ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE);
6069 ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG);
6070
6071 return 0;
6072 }
6073
6074 static int fmea_recover_gmv(struct hns_roce_dev *hr_dev, u32 idx)
6075 {
6076 struct hns_roce_cmq_desc desc;
6077 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
6078 u32 addr_upper;
6079 u32 addr_low;
6080 int ret;
6081
6082 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, true);
6083 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
6084
6085 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
6086 if (ret) {
6087 dev_err(hr_dev->dev,
6088 "failed to execute cmd to read gmv, ret = %d.\n", ret);
6089 return ret;
6090 }
6091
6092 addr_low = hr_reg_read(req, CFG_GMV_BT_BA_L);
6093 addr_upper = hr_reg_read(req, CFG_GMV_BT_BA_H);
6094
6095 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
6096 hr_reg_write(req, CFG_GMV_BT_BA_L, addr_low);
6097 hr_reg_write(req, CFG_GMV_BT_BA_H, addr_upper);
6098 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
6099
6100 return hns_roce_cmq_send(hr_dev, &desc, 1);
6101 }
6102
6103 static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
6104 {
6105 if (res_type == ECC_RESOURCE_QPC_TIMER ||
6106 res_type == ECC_RESOURCE_CQC_TIMER ||
6107 res_type == ECC_RESOURCE_SCCC)
6108 return le64_to_cpu(*data);
6109
6110 return le64_to_cpu(*data) << PAGE_SHIFT;
6111 }
6112
6113 static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
6114 u32 index)
6115 {
6116 u8 write_bt0_op = fmea_ram_res[res_type].write_bt0_op;
6117 u8 read_bt0_op = fmea_ram_res[res_type].read_bt0_op;
6118 struct hns_roce_cmd_mailbox *mailbox;
6119 u64 addr;
6120 int ret;
6121
6122 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6123 if (IS_ERR(mailbox))
6124 return PTR_ERR(mailbox);
6125
6126 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index);
6127 if (ret) {
6128 dev_err(hr_dev->dev,
6129 "failed to execute cmd to read fmea ram, ret = %d.\n",
6130 ret);
6131 goto out;
6132 }
6133
6134 addr = fmea_get_ram_res_addr(res_type, mailbox->buf);
6135
6136 ret = hns_roce_cmd_mbox(hr_dev, addr, 0, write_bt0_op, index);
6137 if (ret)
6138 dev_err(hr_dev->dev,
6139 "failed to execute cmd to write fmea ram, ret = %d.\n",
6140 ret);
6141
6142 out:
6143 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6144 return ret;
6145 }
6146
6147 static void fmea_ram_ecc_recover(struct hns_roce_dev *hr_dev,
6148 struct fmea_ram_ecc *ecc_info)
6149 {
6150 u32 res_type = ecc_info->res_type;
6151 u32 index = ecc_info->index;
6152 int ret;
6153
6154 BUILD_BUG_ON(ARRAY_SIZE(fmea_ram_res) != ECC_RESOURCE_COUNT);
6155
6156 if (res_type >= ECC_RESOURCE_COUNT) {
6157 dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n",
6158 res_type);
6159 return;
6160 }
6161
6162 if (res_type == ECC_RESOURCE_GMV)
6163 ret = fmea_recover_gmv(hr_dev, index);
6164 else
6165 ret = fmea_recover_others(hr_dev, res_type, index);
6166 if (ret)
6167 dev_err(hr_dev->dev,
6168 "failed to recover %s, index = %u, ret = %d.\n",
6169 fmea_ram_res[res_type].name, index, ret);
6170 }
6171
6172 static void fmea_ram_ecc_work(struct work_struct *ecc_work)
6173 {
6174 struct hns_roce_dev *hr_dev =
6175 container_of(ecc_work, struct hns_roce_dev, ecc_work);
6176 struct fmea_ram_ecc ecc_info = {};
6177
6178 if (fmea_ram_ecc_query(hr_dev, &ecc_info)) {
6179 dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n");
6180 return;
6181 }
6182
6183 if (!ecc_info.is_ecc_err) {
6184 dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n");
6185 return;
6186 }
6187
6188 fmea_ram_ecc_recover(hr_dev, &ecc_info);
6189 }
6190
6191 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
6192 {
6193 struct hns_roce_dev *hr_dev = dev_id;
6194 irqreturn_t int_work = IRQ_NONE;
6195 u32 int_st;
6196
6197 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
6198
6199 if (int_st) {
6200 int_work = abnormal_interrupt_basic(hr_dev, int_st);
6201 } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
6202 queue_work(hr_dev->irq_workq, &hr_dev->ecc_work);
6203 int_work = IRQ_HANDLED;
6204 } else {
6205 dev_err(hr_dev->dev, "there is no abnormal irq found.\n");
6206 }
6207
6208 return IRQ_RETVAL(int_work);
6209 }
6210
6211 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
6212 int eq_num, u32 enable_flag)
6213 {
6214 int i;
6215
6216 for (i = 0; i < eq_num; i++)
6217 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
6218 i * EQ_REG_OFFSET, enable_flag);
6219
6220 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
6221 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
6222 }
6223
6224 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
6225 {
6226 struct device *dev = hr_dev->dev;
6227 int ret;
6228 u8 cmd;
6229
6230 if (eqn < hr_dev->caps.num_comp_vectors)
6231 cmd = HNS_ROCE_CMD_DESTROY_CEQC;
6232 else
6233 cmd = HNS_ROCE_CMD_DESTROY_AEQC;
6234
6235 ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M);
6236 if (ret)
6237 dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
6238 }
6239
6240 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6241 {
6242 hns_roce_mtr_destroy(hr_dev, &eq->mtr);
6243 }
6244
6245 static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6246 {
6247 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
6248 eq->cons_index = 0;
6249 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
6250 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
6251 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
6252 eq->shift = ilog2((unsigned int)eq->entries);
6253 }
6254
6255 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
6256 void *mb_buf)
6257 {
6258 u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
6259 struct hns_roce_eq_context *eqc;
6260 u64 bt_ba = 0;
6261 int count;
6262
6263 eqc = mb_buf;
6264 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
6265
6266 init_eq_config(hr_dev, eq);
6267
6268
6269 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
6270 &bt_ba);
6271 if (count < 1) {
6272 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
6273 return -ENOBUFS;
6274 }
6275
6276 hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
6277 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
6278 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
6279 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
6280 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
6281 hr_reg_write(eqc, EQC_EQN, eq->eqn);
6282 hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
6283 hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
6284 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
6285 hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
6286 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
6287 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
6288 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
6289
6290 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
6291 if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
6292 dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
6293 eq->eq_period);
6294 eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
6295 }
6296 eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
6297 }
6298
6299 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
6300 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
6301 hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
6302 hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
6303 hr_reg_write(eqc, EQC_SHIFT, eq->shift);
6304 hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
6305 hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
6306 hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
6307 hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
6308 hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
6309 hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
6310 hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
6311 hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE);
6312
6313 return 0;
6314 }
6315
6316 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
6317 {
6318 struct hns_roce_buf_attr buf_attr = {};
6319 int err;
6320
6321 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
6322 eq->hop_num = 0;
6323 else
6324 eq->hop_num = hr_dev->caps.eqe_hop_num;
6325
6326 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
6327 buf_attr.region[0].size = eq->entries * eq->eqe_size;
6328 buf_attr.region[0].hopnum = eq->hop_num;
6329 buf_attr.region_count = 1;
6330
6331 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
6332 hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
6333 0);
6334 if (err)
6335 dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
6336
6337 return err;
6338 }
6339
6340 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
6341 struct hns_roce_eq *eq, u8 eq_cmd)
6342 {
6343 struct hns_roce_cmd_mailbox *mailbox;
6344 int ret;
6345
6346
6347 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6348 if (IS_ERR(mailbox))
6349 return PTR_ERR(mailbox);
6350
6351 ret = alloc_eq_buf(hr_dev, eq);
6352 if (ret)
6353 goto free_cmd_mbox;
6354
6355 ret = config_eqc(hr_dev, eq, mailbox->buf);
6356 if (ret)
6357 goto err_cmd_mbox;
6358
6359 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn);
6360 if (ret) {
6361 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
6362 goto err_cmd_mbox;
6363 }
6364
6365 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6366
6367 return 0;
6368
6369 err_cmd_mbox:
6370 free_eq_buf(hr_dev, eq);
6371
6372 free_cmd_mbox:
6373 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6374
6375 return ret;
6376 }
6377
6378 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6379 int comp_num, int aeq_num, int other_num)
6380 {
6381 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6382 int i, j;
6383 int ret;
6384
6385 for (i = 0; i < irq_num; i++) {
6386 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
6387 GFP_KERNEL);
6388 if (!hr_dev->irq_names[i]) {
6389 ret = -ENOMEM;
6390 goto err_kzalloc_failed;
6391 }
6392 }
6393
6394
6395 for (j = 0; j < other_num; j++)
6396 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6397 "hns-abn-%d", j);
6398
6399 for (j = other_num; j < (other_num + aeq_num); j++)
6400 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6401 "hns-aeq-%d", j - other_num);
6402
6403 for (j = (other_num + aeq_num); j < irq_num; j++)
6404 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6405 "hns-ceq-%d", j - other_num - aeq_num);
6406
6407 for (j = 0; j < irq_num; j++) {
6408 if (j < other_num)
6409 ret = request_irq(hr_dev->irq[j],
6410 hns_roce_v2_msix_interrupt_abn,
6411 0, hr_dev->irq_names[j], hr_dev);
6412
6413 else if (j < (other_num + comp_num))
6414 ret = request_irq(eq_table->eq[j - other_num].irq,
6415 hns_roce_v2_msix_interrupt_eq,
6416 0, hr_dev->irq_names[j + aeq_num],
6417 &eq_table->eq[j - other_num]);
6418 else
6419 ret = request_irq(eq_table->eq[j - other_num].irq,
6420 hns_roce_v2_msix_interrupt_eq,
6421 0, hr_dev->irq_names[j - comp_num],
6422 &eq_table->eq[j - other_num]);
6423 if (ret) {
6424 dev_err(hr_dev->dev, "Request irq error!\n");
6425 goto err_request_failed;
6426 }
6427 }
6428
6429 return 0;
6430
6431 err_request_failed:
6432 for (j -= 1; j >= 0; j--)
6433 if (j < other_num)
6434 free_irq(hr_dev->irq[j], hr_dev);
6435 else
6436 free_irq(eq_table->eq[j - other_num].irq,
6437 &eq_table->eq[j - other_num]);
6438
6439 err_kzalloc_failed:
6440 for (i -= 1; i >= 0; i--)
6441 kfree(hr_dev->irq_names[i]);
6442
6443 return ret;
6444 }
6445
6446 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6447 {
6448 int irq_num;
6449 int eq_num;
6450 int i;
6451
6452 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6453 irq_num = eq_num + hr_dev->caps.num_other_vectors;
6454
6455 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6456 free_irq(hr_dev->irq[i], hr_dev);
6457
6458 for (i = 0; i < eq_num; i++)
6459 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6460
6461 for (i = 0; i < irq_num; i++)
6462 kfree(hr_dev->irq_names[i]);
6463 }
6464
6465 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6466 {
6467 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6468 struct device *dev = hr_dev->dev;
6469 struct hns_roce_eq *eq;
6470 int other_num;
6471 int comp_num;
6472 int aeq_num;
6473 int irq_num;
6474 int eq_num;
6475 u8 eq_cmd;
6476 int ret;
6477 int i;
6478
6479 other_num = hr_dev->caps.num_other_vectors;
6480 comp_num = hr_dev->caps.num_comp_vectors;
6481 aeq_num = hr_dev->caps.num_aeq_vectors;
6482
6483 eq_num = comp_num + aeq_num;
6484 irq_num = eq_num + other_num;
6485
6486 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6487 if (!eq_table->eq)
6488 return -ENOMEM;
6489
6490
6491 for (i = 0; i < eq_num; i++) {
6492 eq = &eq_table->eq[i];
6493 eq->hr_dev = hr_dev;
6494 eq->eqn = i;
6495 if (i < comp_num) {
6496
6497 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6498 eq->type_flag = HNS_ROCE_CEQ;
6499 eq->entries = hr_dev->caps.ceqe_depth;
6500 eq->eqe_size = hr_dev->caps.ceqe_size;
6501 eq->irq = hr_dev->irq[i + other_num + aeq_num];
6502 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6503 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6504 } else {
6505
6506 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6507 eq->type_flag = HNS_ROCE_AEQ;
6508 eq->entries = hr_dev->caps.aeqe_depth;
6509 eq->eqe_size = hr_dev->caps.aeqe_size;
6510 eq->irq = hr_dev->irq[i - comp_num + other_num];
6511 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6512 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6513 }
6514
6515 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6516 if (ret) {
6517 dev_err(dev, "failed to create eq.\n");
6518 goto err_create_eq_fail;
6519 }
6520 }
6521
6522 INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work);
6523
6524 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6525 if (!hr_dev->irq_workq) {
6526 dev_err(dev, "failed to create irq workqueue.\n");
6527 ret = -ENOMEM;
6528 goto err_create_eq_fail;
6529 }
6530
6531 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num,
6532 other_num);
6533 if (ret) {
6534 dev_err(dev, "failed to request irq.\n");
6535 goto err_request_irq_fail;
6536 }
6537
6538
6539 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6540
6541 return 0;
6542
6543 err_request_irq_fail:
6544 destroy_workqueue(hr_dev->irq_workq);
6545
6546 err_create_eq_fail:
6547 for (i -= 1; i >= 0; i--)
6548 free_eq_buf(hr_dev, &eq_table->eq[i]);
6549 kfree(eq_table->eq);
6550
6551 return ret;
6552 }
6553
6554 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6555 {
6556 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6557 int eq_num;
6558 int i;
6559
6560 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6561
6562
6563 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6564
6565 __hns_roce_free_irq(hr_dev);
6566 destroy_workqueue(hr_dev->irq_workq);
6567
6568 for (i = 0; i < eq_num; i++) {
6569 hns_roce_v2_destroy_eqc(hr_dev, i);
6570
6571 free_eq_buf(hr_dev, &eq_table->eq[i]);
6572 }
6573
6574 kfree(eq_table->eq);
6575 }
6576
6577 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6578 .query_cqc_info = hns_roce_v2_query_cqc_info,
6579 };
6580
6581 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6582 .destroy_qp = hns_roce_v2_destroy_qp,
6583 .modify_cq = hns_roce_v2_modify_cq,
6584 .poll_cq = hns_roce_v2_poll_cq,
6585 .post_recv = hns_roce_v2_post_recv,
6586 .post_send = hns_roce_v2_post_send,
6587 .query_qp = hns_roce_v2_query_qp,
6588 .req_notify_cq = hns_roce_v2_req_notify_cq,
6589 };
6590
6591 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6592 .modify_srq = hns_roce_v2_modify_srq,
6593 .post_srq_recv = hns_roce_v2_post_srq_recv,
6594 .query_srq = hns_roce_v2_query_srq,
6595 };
6596
6597 static const struct hns_roce_hw hns_roce_hw_v2 = {
6598 .cmq_init = hns_roce_v2_cmq_init,
6599 .cmq_exit = hns_roce_v2_cmq_exit,
6600 .hw_profile = hns_roce_v2_profile,
6601 .hw_init = hns_roce_v2_init,
6602 .hw_exit = hns_roce_v2_exit,
6603 .post_mbox = v2_post_mbox,
6604 .poll_mbox_done = v2_poll_mbox_done,
6605 .chk_mbox_avail = v2_chk_mbox_is_avail,
6606 .set_gid = hns_roce_v2_set_gid,
6607 .set_mac = hns_roce_v2_set_mac,
6608 .write_mtpt = hns_roce_v2_write_mtpt,
6609 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6610 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6611 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6612 .write_cqc = hns_roce_v2_write_cqc,
6613 .set_hem = hns_roce_v2_set_hem,
6614 .clear_hem = hns_roce_v2_clear_hem,
6615 .modify_qp = hns_roce_v2_modify_qp,
6616 .dereg_mr = hns_roce_v2_dereg_mr,
6617 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6618 .init_eq = hns_roce_v2_init_eq_table,
6619 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6620 .write_srqc = hns_roce_v2_write_srqc,
6621 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6622 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6623 };
6624
6625 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6626 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6627 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6628 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6629 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6630 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6631 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
6632 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
6633 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
6634
6635 {0, }
6636 };
6637
6638 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6639
6640 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6641 struct hnae3_handle *handle)
6642 {
6643 struct hns_roce_v2_priv *priv = hr_dev->priv;
6644 const struct pci_device_id *id;
6645 int i;
6646
6647 hr_dev->pci_dev = handle->pdev;
6648 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
6649 hr_dev->is_vf = id->driver_data;
6650 hr_dev->dev = &handle->pdev->dev;
6651 hr_dev->hw = &hns_roce_hw_v2;
6652 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6653 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6654 hr_dev->odb_offset = hr_dev->sdb_offset;
6655
6656
6657 hr_dev->reg_base = handle->rinfo.roce_io_base;
6658 hr_dev->mem_base = handle->rinfo.roce_mem_base;
6659 hr_dev->caps.num_ports = 1;
6660 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6661 hr_dev->iboe.phy_port[0] = 0;
6662
6663 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6664 hr_dev->iboe.netdevs[0]->dev_addr);
6665
6666 for (i = 0; i < handle->rinfo.num_vectors; i++)
6667 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6668 i + handle->rinfo.base_vector);
6669
6670
6671 hr_dev->cmd_mod = 1;
6672 hr_dev->loop_idc = 0;
6673
6674 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6675 priv->handle = handle;
6676 }
6677
6678 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6679 {
6680 struct hns_roce_dev *hr_dev;
6681 int ret;
6682
6683 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6684 if (!hr_dev)
6685 return -ENOMEM;
6686
6687 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6688 if (!hr_dev->priv) {
6689 ret = -ENOMEM;
6690 goto error_failed_kzalloc;
6691 }
6692
6693 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6694
6695 ret = hns_roce_init(hr_dev);
6696 if (ret) {
6697 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6698 goto error_failed_cfg;
6699 }
6700
6701 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
6702 ret = free_mr_init(hr_dev);
6703 if (ret) {
6704 dev_err(hr_dev->dev, "failed to init free mr!\n");
6705 goto error_failed_roce_init;
6706 }
6707 }
6708
6709 handle->priv = hr_dev;
6710
6711 return 0;
6712
6713 error_failed_roce_init:
6714 hns_roce_exit(hr_dev);
6715
6716 error_failed_cfg:
6717 kfree(hr_dev->priv);
6718
6719 error_failed_kzalloc:
6720 ib_dealloc_device(&hr_dev->ib_dev);
6721
6722 return ret;
6723 }
6724
6725 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6726 bool reset)
6727 {
6728 struct hns_roce_dev *hr_dev = handle->priv;
6729
6730 if (!hr_dev)
6731 return;
6732
6733 handle->priv = NULL;
6734
6735 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6736 hns_roce_handle_device_err(hr_dev);
6737
6738 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
6739 free_mr_exit(hr_dev);
6740
6741 hns_roce_exit(hr_dev);
6742 kfree(hr_dev->priv);
6743 ib_dealloc_device(&hr_dev->ib_dev);
6744 }
6745
6746 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6747 {
6748 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6749 const struct pci_device_id *id;
6750 struct device *dev = &handle->pdev->dev;
6751 int ret;
6752
6753 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6754
6755 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6756 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6757 goto reset_chk_err;
6758 }
6759
6760 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6761 if (!id)
6762 return 0;
6763
6764 if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
6765 return 0;
6766
6767 ret = __hns_roce_hw_v2_init_instance(handle);
6768 if (ret) {
6769 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6770 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6771 if (ops->ae_dev_resetting(handle) ||
6772 ops->get_hw_reset_stat(handle))
6773 goto reset_chk_err;
6774 else
6775 return ret;
6776 }
6777
6778 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6779
6780 return 0;
6781
6782 reset_chk_err:
6783 dev_err(dev, "Device is busy in resetting state.\n"
6784 "please retry later.\n");
6785
6786 return -EBUSY;
6787 }
6788
6789 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6790 bool reset)
6791 {
6792 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6793 return;
6794
6795 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6796
6797 __hns_roce_hw_v2_uninit_instance(handle, reset);
6798
6799 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6800 }
6801 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6802 {
6803 struct hns_roce_dev *hr_dev;
6804
6805 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6806 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6807 return 0;
6808 }
6809
6810 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6811 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6812
6813 hr_dev = handle->priv;
6814 if (!hr_dev)
6815 return 0;
6816
6817 hr_dev->active = false;
6818 hr_dev->dis_db = true;
6819 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6820
6821 return 0;
6822 }
6823
6824 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6825 {
6826 struct device *dev = &handle->pdev->dev;
6827 int ret;
6828
6829 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6830 &handle->rinfo.state)) {
6831 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6832 return 0;
6833 }
6834
6835 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6836
6837 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6838 ret = __hns_roce_hw_v2_init_instance(handle);
6839 if (ret) {
6840
6841
6842
6843
6844 handle->priv = NULL;
6845 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6846 } else {
6847 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6848 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6849 }
6850
6851 return ret;
6852 }
6853
6854 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6855 {
6856 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6857 return 0;
6858
6859 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6860 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6861 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6862 __hns_roce_hw_v2_uninit_instance(handle, false);
6863
6864 return 0;
6865 }
6866
6867 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6868 enum hnae3_reset_notify_type type)
6869 {
6870 int ret = 0;
6871
6872 switch (type) {
6873 case HNAE3_DOWN_CLIENT:
6874 ret = hns_roce_hw_v2_reset_notify_down(handle);
6875 break;
6876 case HNAE3_INIT_CLIENT:
6877 ret = hns_roce_hw_v2_reset_notify_init(handle);
6878 break;
6879 case HNAE3_UNINIT_CLIENT:
6880 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6881 break;
6882 default:
6883 break;
6884 }
6885
6886 return ret;
6887 }
6888
6889 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6890 .init_instance = hns_roce_hw_v2_init_instance,
6891 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6892 .reset_notify = hns_roce_hw_v2_reset_notify,
6893 };
6894
6895 static struct hnae3_client hns_roce_hw_v2_client = {
6896 .name = "hns_roce_hw_v2",
6897 .type = HNAE3_CLIENT_ROCE,
6898 .ops = &hns_roce_hw_v2_ops,
6899 };
6900
6901 static int __init hns_roce_hw_v2_init(void)
6902 {
6903 return hnae3_register_client(&hns_roce_hw_v2_client);
6904 }
6905
6906 static void __exit hns_roce_hw_v2_exit(void)
6907 {
6908 hnae3_unregister_client(&hns_roce_hw_v2_client);
6909 }
6910
6911 module_init(hns_roce_hw_v2_init);
6912 module_exit(hns_roce_hw_v2_exit);
6913
6914 MODULE_LICENSE("Dual BSD/GPL");
6915 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6916 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6917 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6918 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");