0001
0002
0003
0004
0005
0006
0007 #include <rdma/ib_verbs.h>
0008
0009 #include "erdma_hw.h"
0010 #include "erdma_verbs.h"
0011
0012 static void *get_next_valid_cqe(struct erdma_cq *cq)
0013 {
0014 __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
0015 cq->depth, CQE_SHIFT);
0016 u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
0017 __be32_to_cpu(READ_ONCE(*cqe)));
0018
0019 return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
0020 }
0021
0022 static void notify_cq(struct erdma_cq *cq, u8 solcitied)
0023 {
0024 u64 db_data =
0025 FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
0026 FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
0027 FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
0028 FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
0029 FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
0030 FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
0031
0032 *cq->kern_cq.db_record = db_data;
0033 writeq(db_data, cq->kern_cq.db);
0034 }
0035
0036 int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
0037 {
0038 struct erdma_cq *cq = to_ecq(ibcq);
0039 unsigned long irq_flags;
0040 int ret = 0;
0041
0042 spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
0043
0044 notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
0045
0046 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
0047 ret = 1;
0048
0049 cq->kern_cq.notify_cnt++;
0050
0051 spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
0052
0053 return ret;
0054 }
0055
0056 static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
0057 [ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
0058 [ERDMA_OP_READ] = IB_WC_RDMA_READ,
0059 [ERDMA_OP_SEND] = IB_WC_SEND,
0060 [ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
0061 [ERDMA_OP_RECEIVE] = IB_WC_RECV,
0062 [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
0063 [ERDMA_OP_RECV_INV] = IB_WC_RECV,
0064 [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
0065 [ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
0066 [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
0067 [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
0068 [ERDMA_OP_REG_MR] = IB_WC_REG_MR,
0069 [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
0070 [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
0071 };
0072
0073 static const struct {
0074 enum erdma_wc_status erdma;
0075 enum ib_wc_status base;
0076 enum erdma_vendor_err vendor;
0077 } map_cqe_status[ERDMA_NUM_WC_STATUS] = {
0078 { ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
0079 { ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
0080 { ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
0081 ERDMA_WC_VENDOR_INVALID_RQE },
0082 { ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
0083 ERDMA_WC_VENDOR_RQE_INVALID_STAG },
0084 { ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
0085 ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
0086 { ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
0087 ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
0088 { ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
0089 ERDMA_WC_VENDOR_RQE_INVALID_PD },
0090 { ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
0091 ERDMA_WC_VENDOR_RQE_WRAP_ERR },
0092 { ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
0093 ERDMA_WC_VENDOR_INVALID_SQE },
0094 { ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
0095 ERDMA_WC_VENDOR_ZERO_ORD },
0096 { ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
0097 ERDMA_WC_VENDOR_SQE_INVALID_STAG },
0098 { ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
0099 ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
0100 { ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
0101 ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
0102 { ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
0103 ERDMA_WC_VENDOR_SQE_INVALID_PD },
0104 { ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
0105 ERDMA_WC_VENDOR_SQE_WARP_ERR },
0106 { ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
0107 { ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
0108 };
0109
0110 #define ERDMA_POLLCQ_NO_QP 1
0111
0112 static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
0113 {
0114 struct erdma_dev *dev = to_edev(cq->ibcq.device);
0115 u8 opcode, syndrome, qtype;
0116 struct erdma_kqp *kern_qp;
0117 struct erdma_cqe *cqe;
0118 struct erdma_qp *qp;
0119 u16 wqe_idx, depth;
0120 u32 qpn, cqe_hdr;
0121 u64 *id_table;
0122 u64 *wqe_hdr;
0123
0124 cqe = get_next_valid_cqe(cq);
0125 if (!cqe)
0126 return -EAGAIN;
0127
0128 cq->kern_cq.ci++;
0129
0130
0131 dma_rmb();
0132
0133 qpn = be32_to_cpu(cqe->qpn);
0134 wqe_idx = be32_to_cpu(cqe->qe_idx);
0135 cqe_hdr = be32_to_cpu(cqe->hdr);
0136
0137 qp = find_qp_by_qpn(dev, qpn);
0138 if (!qp)
0139 return ERDMA_POLLCQ_NO_QP;
0140
0141 kern_qp = &qp->kern_qp;
0142
0143 qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
0144 syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
0145 opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
0146
0147 if (qtype == ERDMA_CQE_QTYPE_SQ) {
0148 id_table = kern_qp->swr_tbl;
0149 depth = qp->attrs.sq_size;
0150 wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
0151 qp->attrs.sq_size, SQEBB_SHIFT);
0152 kern_qp->sq_ci =
0153 FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
0154 wqe_idx + 1;
0155 } else {
0156 id_table = kern_qp->rwr_tbl;
0157 depth = qp->attrs.rq_size;
0158 }
0159 wc->wr_id = id_table[wqe_idx & (depth - 1)];
0160 wc->byte_len = be32_to_cpu(cqe->size);
0161
0162 wc->wc_flags = 0;
0163
0164 wc->opcode = wc_mapping_table[opcode];
0165 if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
0166 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
0167 wc->wc_flags |= IB_WC_WITH_IMM;
0168 } else if (opcode == ERDMA_OP_RECV_INV) {
0169 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
0170 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
0171 }
0172
0173 if (syndrome >= ERDMA_NUM_WC_STATUS)
0174 syndrome = ERDMA_WC_GENERAL_ERR;
0175
0176 wc->status = map_cqe_status[syndrome].base;
0177 wc->vendor_err = map_cqe_status[syndrome].vendor;
0178 wc->qp = &qp->ibqp;
0179
0180 return 0;
0181 }
0182
0183 int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
0184 {
0185 struct erdma_cq *cq = to_ecq(ibcq);
0186 unsigned long flags;
0187 int npolled, ret;
0188
0189 spin_lock_irqsave(&cq->kern_cq.lock, flags);
0190
0191 for (npolled = 0; npolled < num_entries;) {
0192 ret = erdma_poll_one_cqe(cq, wc + npolled);
0193
0194 if (ret == -EAGAIN)
0195 break;
0196 else if (ret)
0197 continue;
0198
0199 npolled++;
0200 }
0201
0202 spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
0203
0204 return npolled;
0205 }