Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
0002 
0003 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
0004 /*          Kai Shen <kaishen@linux.alibaba.com> */
0005 /* Copyright (c) 2020-2021, Alibaba Group */
0006 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
0007 /* Copyright (c) 2008-2019, IBM Corporation */
0008 
0009 #include <linux/errno.h>
0010 #include <linux/pci.h>
0011 #include <linux/scatterlist.h>
0012 #include <linux/types.h>
0013 
0014 #include <rdma/ib_user_verbs.h>
0015 #include <rdma/ib_verbs.h>
0016 
0017 #include "erdma.h"
0018 #include "erdma_cm.h"
0019 #include "erdma_verbs.h"
0020 
0021 void erdma_qp_llp_close(struct erdma_qp *qp)
0022 {
0023     struct erdma_qp_attrs qp_attrs;
0024 
0025     down_write(&qp->state_lock);
0026 
0027     switch (qp->attrs.state) {
0028     case ERDMA_QP_STATE_RTS:
0029     case ERDMA_QP_STATE_RTR:
0030     case ERDMA_QP_STATE_IDLE:
0031     case ERDMA_QP_STATE_TERMINATE:
0032         qp_attrs.state = ERDMA_QP_STATE_CLOSING;
0033         erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
0034         break;
0035     case ERDMA_QP_STATE_CLOSING:
0036         qp->attrs.state = ERDMA_QP_STATE_IDLE;
0037         break;
0038     default:
0039         break;
0040     }
0041 
0042     if (qp->cep) {
0043         erdma_cep_put(qp->cep);
0044         qp->cep = NULL;
0045     }
0046 
0047     up_write(&qp->state_lock);
0048 }
0049 
0050 struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
0051 {
0052     struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
0053 
0054     if (qp)
0055         return &qp->ibqp;
0056 
0057     return NULL;
0058 }
0059 
0060 static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
0061                     struct erdma_qp_attrs *attrs,
0062                     enum erdma_qp_attr_mask mask)
0063 {
0064     int ret;
0065     struct erdma_dev *dev = qp->dev;
0066     struct erdma_cmdq_modify_qp_req req;
0067     struct tcp_sock *tp;
0068     struct erdma_cep *cep = qp->cep;
0069     struct sockaddr_storage local_addr, remote_addr;
0070 
0071     if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
0072         return -EINVAL;
0073 
0074     if (!(mask & ERDMA_QP_ATTR_MPA))
0075         return -EINVAL;
0076 
0077     ret = getname_local(cep->sock, &local_addr);
0078     if (ret < 0)
0079         return ret;
0080 
0081     ret = getname_peer(cep->sock, &remote_addr);
0082     if (ret < 0)
0083         return ret;
0084 
0085     qp->attrs.state = ERDMA_QP_STATE_RTS;
0086 
0087     tp = tcp_sk(qp->cep->sock->sk);
0088 
0089     erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
0090                 CMDQ_OPCODE_MODIFY_QP);
0091 
0092     req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
0093           FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
0094           FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
0095 
0096     req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
0097     req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
0098     req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
0099     req.dport = to_sockaddr_in(remote_addr).sin_port;
0100     req.sport = to_sockaddr_in(local_addr).sin_port;
0101 
0102     req.send_nxt = tp->snd_nxt;
0103     /* rsvd tcp seq for mpa-rsp in server. */
0104     if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
0105         req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
0106     req.recv_nxt = tp->rcv_nxt;
0107 
0108     return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
0109                    NULL);
0110 }
0111 
0112 static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
0113                      struct erdma_qp_attrs *attrs,
0114                      enum erdma_qp_attr_mask mask)
0115 {
0116     struct erdma_dev *dev = qp->dev;
0117     struct erdma_cmdq_modify_qp_req req;
0118 
0119     qp->attrs.state = attrs->state;
0120 
0121     erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
0122                 CMDQ_OPCODE_MODIFY_QP);
0123 
0124     req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
0125           FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
0126 
0127     return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
0128                    NULL);
0129 }
0130 
0131 int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
0132                  enum erdma_qp_attr_mask mask)
0133 {
0134     int drop_conn, ret = 0;
0135 
0136     if (!mask)
0137         return 0;
0138 
0139     if (!(mask & ERDMA_QP_ATTR_STATE))
0140         return 0;
0141 
0142     switch (qp->attrs.state) {
0143     case ERDMA_QP_STATE_IDLE:
0144     case ERDMA_QP_STATE_RTR:
0145         if (attrs->state == ERDMA_QP_STATE_RTS) {
0146             ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
0147         } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
0148             qp->attrs.state = ERDMA_QP_STATE_ERROR;
0149             if (qp->cep) {
0150                 erdma_cep_put(qp->cep);
0151                 qp->cep = NULL;
0152             }
0153             ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
0154         }
0155         break;
0156     case ERDMA_QP_STATE_RTS:
0157         drop_conn = 0;
0158 
0159         if (attrs->state == ERDMA_QP_STATE_CLOSING) {
0160             ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
0161             drop_conn = 1;
0162         } else if (attrs->state == ERDMA_QP_STATE_TERMINATE) {
0163             qp->attrs.state = ERDMA_QP_STATE_TERMINATE;
0164             ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
0165             drop_conn = 1;
0166         } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
0167             ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
0168             qp->attrs.state = ERDMA_QP_STATE_ERROR;
0169             drop_conn = 1;
0170         }
0171 
0172         if (drop_conn)
0173             erdma_qp_cm_drop(qp);
0174 
0175         break;
0176     case ERDMA_QP_STATE_TERMINATE:
0177         if (attrs->state == ERDMA_QP_STATE_ERROR)
0178             qp->attrs.state = ERDMA_QP_STATE_ERROR;
0179         break;
0180     case ERDMA_QP_STATE_CLOSING:
0181         if (attrs->state == ERDMA_QP_STATE_IDLE) {
0182             qp->attrs.state = ERDMA_QP_STATE_IDLE;
0183         } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
0184             ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
0185             qp->attrs.state = ERDMA_QP_STATE_ERROR;
0186         } else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
0187             return -ECONNABORTED;
0188         }
0189         break;
0190     default:
0191         break;
0192     }
0193 
0194     return ret;
0195 }
0196 
0197 static void erdma_qp_safe_free(struct kref *ref)
0198 {
0199     struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
0200 
0201     complete(&qp->safe_free);
0202 }
0203 
0204 void erdma_qp_put(struct erdma_qp *qp)
0205 {
0206     WARN_ON(kref_read(&qp->ref) < 1);
0207     kref_put(&qp->ref, erdma_qp_safe_free);
0208 }
0209 
0210 void erdma_qp_get(struct erdma_qp *qp)
0211 {
0212     kref_get(&qp->ref);
0213 }
0214 
0215 static int fill_inline_data(struct erdma_qp *qp,
0216                 const struct ib_send_wr *send_wr, u16 wqe_idx,
0217                 u32 sgl_offset, __le32 *length_field)
0218 {
0219     u32 remain_size, copy_size, data_off, bytes = 0;
0220     char *data;
0221     int i = 0;
0222 
0223     wqe_idx += (sgl_offset >> SQEBB_SHIFT);
0224     sgl_offset &= (SQEBB_SIZE - 1);
0225     data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
0226                    SQEBB_SHIFT);
0227 
0228     while (i < send_wr->num_sge) {
0229         bytes += send_wr->sg_list[i].length;
0230         if (bytes > (int)ERDMA_MAX_INLINE)
0231             return -EINVAL;
0232 
0233         remain_size = send_wr->sg_list[i].length;
0234         data_off = 0;
0235 
0236         while (1) {
0237             copy_size = min(remain_size, SQEBB_SIZE - sgl_offset);
0238 
0239             memcpy(data + sgl_offset,
0240                    (void *)(uintptr_t)send_wr->sg_list[i].addr +
0241                        data_off,
0242                    copy_size);
0243             remain_size -= copy_size;
0244             data_off += copy_size;
0245             sgl_offset += copy_size;
0246             wqe_idx += (sgl_offset >> SQEBB_SHIFT);
0247             sgl_offset &= (SQEBB_SIZE - 1);
0248 
0249             data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
0250                            qp->attrs.sq_size, SQEBB_SHIFT);
0251             if (!remain_size)
0252                 break;
0253         }
0254 
0255         i++;
0256     }
0257     *length_field = cpu_to_le32(bytes);
0258 
0259     return bytes;
0260 }
0261 
0262 static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
0263             u16 wqe_idx, u32 sgl_offset, __le32 *length_field)
0264 {
0265     int i = 0;
0266     u32 bytes = 0;
0267     char *sgl;
0268 
0269     if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
0270         return -EINVAL;
0271 
0272     if (sgl_offset & 0xF)
0273         return -EINVAL;
0274 
0275     while (i < send_wr->num_sge) {
0276         wqe_idx += (sgl_offset >> SQEBB_SHIFT);
0277         sgl_offset &= (SQEBB_SIZE - 1);
0278         sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
0279                       qp->attrs.sq_size, SQEBB_SHIFT);
0280 
0281         bytes += send_wr->sg_list[i].length;
0282         memcpy(sgl + sgl_offset, &send_wr->sg_list[i],
0283                sizeof(struct ib_sge));
0284 
0285         sgl_offset += sizeof(struct ib_sge);
0286         i++;
0287     }
0288 
0289     *length_field = cpu_to_le32(bytes);
0290     return 0;
0291 }
0292 
0293 static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
0294                   const struct ib_send_wr *send_wr)
0295 {
0296     u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
0297     u32 idx = *pi & (qp->attrs.sq_size - 1);
0298     enum ib_wr_opcode op = send_wr->opcode;
0299     struct erdma_readreq_sqe *read_sqe;
0300     struct erdma_reg_mr_sqe *regmr_sge;
0301     struct erdma_write_sqe *write_sqe;
0302     struct erdma_send_sqe *send_sqe;
0303     struct ib_rdma_wr *rdma_wr;
0304     struct erdma_mr *mr;
0305     __le32 *length_field;
0306     u64 wqe_hdr, *entry;
0307     struct ib_sge *sge;
0308     u32 attrs;
0309     int ret;
0310 
0311     entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
0312                 SQEBB_SHIFT);
0313 
0314     /* Clear the SQE header section. */
0315     *entry = 0;
0316 
0317     qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
0318     flags = send_wr->send_flags;
0319     wqe_hdr = FIELD_PREP(
0320         ERDMA_SQE_HDR_CE_MASK,
0321         ((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
0322     wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SE_MASK,
0323                   flags & IB_SEND_SOLICITED ? 1 : 0);
0324     wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_FENCE_MASK,
0325                   flags & IB_SEND_FENCE ? 1 : 0);
0326     wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_INLINE_MASK,
0327                   flags & IB_SEND_INLINE ? 1 : 0);
0328     wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
0329 
0330     switch (op) {
0331     case IB_WR_RDMA_WRITE:
0332     case IB_WR_RDMA_WRITE_WITH_IMM:
0333         hw_op = ERDMA_OP_WRITE;
0334         if (op == IB_WR_RDMA_WRITE_WITH_IMM)
0335             hw_op = ERDMA_OP_WRITE_WITH_IMM;
0336         wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
0337         rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
0338         write_sqe = (struct erdma_write_sqe *)entry;
0339 
0340         write_sqe->imm_data = send_wr->ex.imm_data;
0341         write_sqe->sink_stag = cpu_to_le32(rdma_wr->rkey);
0342         write_sqe->sink_to_h =
0343             cpu_to_le32(upper_32_bits(rdma_wr->remote_addr));
0344         write_sqe->sink_to_l =
0345             cpu_to_le32(lower_32_bits(rdma_wr->remote_addr));
0346 
0347         length_field = &write_sqe->length;
0348         wqe_size = sizeof(struct erdma_write_sqe);
0349         sgl_offset = wqe_size;
0350         break;
0351     case IB_WR_RDMA_READ:
0352     case IB_WR_RDMA_READ_WITH_INV:
0353         read_sqe = (struct erdma_readreq_sqe *)entry;
0354         if (unlikely(send_wr->num_sge != 1))
0355             return -EINVAL;
0356         hw_op = ERDMA_OP_READ;
0357         if (op == IB_WR_RDMA_READ_WITH_INV) {
0358             hw_op = ERDMA_OP_READ_WITH_INV;
0359             read_sqe->invalid_stag =
0360                 cpu_to_le32(send_wr->ex.invalidate_rkey);
0361         }
0362 
0363         wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
0364         rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
0365         read_sqe->length = cpu_to_le32(send_wr->sg_list[0].length);
0366         read_sqe->sink_stag = cpu_to_le32(send_wr->sg_list[0].lkey);
0367         read_sqe->sink_to_l =
0368             cpu_to_le32(lower_32_bits(send_wr->sg_list[0].addr));
0369         read_sqe->sink_to_h =
0370             cpu_to_le32(upper_32_bits(send_wr->sg_list[0].addr));
0371 
0372         sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
0373                       qp->attrs.sq_size, SQEBB_SHIFT);
0374         sge->addr = rdma_wr->remote_addr;
0375         sge->lkey = rdma_wr->rkey;
0376         sge->length = send_wr->sg_list[0].length;
0377         wqe_size = sizeof(struct erdma_readreq_sqe) +
0378                send_wr->num_sge * sizeof(struct ib_sge);
0379 
0380         goto out;
0381     case IB_WR_SEND:
0382     case IB_WR_SEND_WITH_IMM:
0383     case IB_WR_SEND_WITH_INV:
0384         send_sqe = (struct erdma_send_sqe *)entry;
0385         hw_op = ERDMA_OP_SEND;
0386         if (op == IB_WR_SEND_WITH_IMM) {
0387             hw_op = ERDMA_OP_SEND_WITH_IMM;
0388             send_sqe->imm_data = send_wr->ex.imm_data;
0389         } else if (op == IB_WR_SEND_WITH_INV) {
0390             hw_op = ERDMA_OP_SEND_WITH_INV;
0391             send_sqe->invalid_stag =
0392                 cpu_to_le32(send_wr->ex.invalidate_rkey);
0393         }
0394         wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
0395         length_field = &send_sqe->length;
0396         wqe_size = sizeof(struct erdma_send_sqe);
0397         sgl_offset = wqe_size;
0398 
0399         break;
0400     case IB_WR_REG_MR:
0401         wqe_hdr |=
0402             FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, ERDMA_OP_REG_MR);
0403         regmr_sge = (struct erdma_reg_mr_sqe *)entry;
0404         mr = to_emr(reg_wr(send_wr)->mr);
0405 
0406         mr->access = ERDMA_MR_ACC_LR |
0407                  to_erdma_access_flags(reg_wr(send_wr)->access);
0408         regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
0409         regmr_sge->length = cpu_to_le32(mr->ibmr.length);
0410         regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
0411         attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
0412             FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
0413             FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
0414                    mr->mem.mtt_nents);
0415 
0416         if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
0417             attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
0418             /* Copy SGLs to SQE content to accelerate */
0419             memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
0420                            qp->attrs.sq_size, SQEBB_SHIFT),
0421                    mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
0422             wqe_size = sizeof(struct erdma_reg_mr_sqe) +
0423                    MTT_SIZE(mr->mem.mtt_nents);
0424         } else {
0425             attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 1);
0426             wqe_size = sizeof(struct erdma_reg_mr_sqe);
0427         }
0428 
0429         regmr_sge->attrs = cpu_to_le32(attrs);
0430         goto out;
0431     case IB_WR_LOCAL_INV:
0432         wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
0433                       ERDMA_OP_LOCAL_INV);
0434         regmr_sge = (struct erdma_reg_mr_sqe *)entry;
0435         regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
0436         wqe_size = sizeof(struct erdma_reg_mr_sqe);
0437         goto out;
0438     default:
0439         return -EOPNOTSUPP;
0440     }
0441 
0442     if (flags & IB_SEND_INLINE) {
0443         ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
0444                        length_field);
0445         if (ret < 0)
0446             return -EINVAL;
0447         wqe_size += ret;
0448         wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK, ret);
0449     } else {
0450         ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
0451         if (ret)
0452             return -EINVAL;
0453         wqe_size += send_wr->num_sge * sizeof(struct ib_sge);
0454         wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK,
0455                       send_wr->num_sge);
0456     }
0457 
0458 out:
0459     wqebb_cnt = SQEBB_COUNT(wqe_size);
0460     wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_CNT_MASK, wqebb_cnt - 1);
0461     *pi += wqebb_cnt;
0462     wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, *pi);
0463 
0464     *entry = wqe_hdr;
0465 
0466     return 0;
0467 }
0468 
0469 static void kick_sq_db(struct erdma_qp *qp, u16 pi)
0470 {
0471     u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
0472               FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
0473 
0474     *(u64 *)qp->kern_qp.sq_db_info = db_data;
0475     writeq(db_data, qp->kern_qp.hw_sq_db);
0476 }
0477 
0478 int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
0479             const struct ib_send_wr **bad_send_wr)
0480 {
0481     struct erdma_qp *qp = to_eqp(ibqp);
0482     int ret = 0;
0483     const struct ib_send_wr *wr = send_wr;
0484     unsigned long flags;
0485     u16 sq_pi;
0486 
0487     if (!send_wr)
0488         return -EINVAL;
0489 
0490     spin_lock_irqsave(&qp->lock, flags);
0491     sq_pi = qp->kern_qp.sq_pi;
0492 
0493     while (wr) {
0494         if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
0495             ret = -ENOMEM;
0496             *bad_send_wr = send_wr;
0497             break;
0498         }
0499 
0500         ret = erdma_push_one_sqe(qp, &sq_pi, wr);
0501         if (ret) {
0502             *bad_send_wr = wr;
0503             break;
0504         }
0505         qp->kern_qp.sq_pi = sq_pi;
0506         kick_sq_db(qp, sq_pi);
0507 
0508         wr = wr->next;
0509     }
0510     spin_unlock_irqrestore(&qp->lock, flags);
0511 
0512     return ret;
0513 }
0514 
0515 static int erdma_post_recv_one(struct erdma_qp *qp,
0516                    const struct ib_recv_wr *recv_wr)
0517 {
0518     struct erdma_rqe *rqe =
0519         get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
0520                 qp->attrs.rq_size, RQE_SHIFT);
0521 
0522     rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
0523     rqe->qpn = cpu_to_le32(QP_ID(qp));
0524 
0525     if (recv_wr->num_sge == 0) {
0526         rqe->length = 0;
0527     } else if (recv_wr->num_sge == 1) {
0528         rqe->stag = cpu_to_le32(recv_wr->sg_list[0].lkey);
0529         rqe->to = cpu_to_le64(recv_wr->sg_list[0].addr);
0530         rqe->length = cpu_to_le32(recv_wr->sg_list[0].length);
0531     } else {
0532         return -EINVAL;
0533     }
0534 
0535     *(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
0536     writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
0537 
0538     qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
0539         recv_wr->wr_id;
0540     qp->kern_qp.rq_pi++;
0541 
0542     return 0;
0543 }
0544 
0545 int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
0546             const struct ib_recv_wr **bad_recv_wr)
0547 {
0548     const struct ib_recv_wr *wr = recv_wr;
0549     struct erdma_qp *qp = to_eqp(ibqp);
0550     unsigned long flags;
0551     int ret;
0552 
0553     spin_lock_irqsave(&qp->lock, flags);
0554 
0555     while (wr) {
0556         ret = erdma_post_recv_one(qp, wr);
0557         if (ret) {
0558             *bad_recv_wr = wr;
0559             break;
0560         }
0561         wr = wr->next;
0562     }
0563 
0564     spin_unlock_irqrestore(&qp->lock, flags);
0565     return ret;
0566 }