0001
0002
0003
0004
0005
0006
0007 #include <linux/skbuff.h>
0008
0009 #include "rxe.h"
0010 #include "rxe_loc.h"
0011 #include "rxe_queue.h"
0012
0013 enum resp_states {
0014 RESPST_NONE,
0015 RESPST_GET_REQ,
0016 RESPST_CHK_PSN,
0017 RESPST_CHK_OP_SEQ,
0018 RESPST_CHK_OP_VALID,
0019 RESPST_CHK_RESOURCE,
0020 RESPST_CHK_LENGTH,
0021 RESPST_CHK_RKEY,
0022 RESPST_EXECUTE,
0023 RESPST_READ_REPLY,
0024 RESPST_ATOMIC_REPLY,
0025 RESPST_COMPLETE,
0026 RESPST_ACKNOWLEDGE,
0027 RESPST_CLEANUP,
0028 RESPST_DUPLICATE_REQUEST,
0029 RESPST_ERR_MALFORMED_WQE,
0030 RESPST_ERR_UNSUPPORTED_OPCODE,
0031 RESPST_ERR_MISALIGNED_ATOMIC,
0032 RESPST_ERR_PSN_OUT_OF_SEQ,
0033 RESPST_ERR_MISSING_OPCODE_FIRST,
0034 RESPST_ERR_MISSING_OPCODE_LAST_C,
0035 RESPST_ERR_MISSING_OPCODE_LAST_D1E,
0036 RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
0037 RESPST_ERR_RNR,
0038 RESPST_ERR_RKEY_VIOLATION,
0039 RESPST_ERR_INVALIDATE_RKEY,
0040 RESPST_ERR_LENGTH,
0041 RESPST_ERR_CQ_OVERFLOW,
0042 RESPST_ERROR,
0043 RESPST_RESET,
0044 RESPST_DONE,
0045 RESPST_EXIT,
0046 };
0047
0048 static char *resp_state_name[] = {
0049 [RESPST_NONE] = "NONE",
0050 [RESPST_GET_REQ] = "GET_REQ",
0051 [RESPST_CHK_PSN] = "CHK_PSN",
0052 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
0053 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
0054 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
0055 [RESPST_CHK_LENGTH] = "CHK_LENGTH",
0056 [RESPST_CHK_RKEY] = "CHK_RKEY",
0057 [RESPST_EXECUTE] = "EXECUTE",
0058 [RESPST_READ_REPLY] = "READ_REPLY",
0059 [RESPST_ATOMIC_REPLY] = "ATOMIC_REPLY",
0060 [RESPST_COMPLETE] = "COMPLETE",
0061 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
0062 [RESPST_CLEANUP] = "CLEANUP",
0063 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
0064 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
0065 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
0066 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
0067 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
0068 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
0069 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
0070 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
0071 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
0072 [RESPST_ERR_RNR] = "ERR_RNR",
0073 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
0074 [RESPST_ERR_INVALIDATE_RKEY] = "ERR_INVALIDATE_RKEY_VIOLATION",
0075 [RESPST_ERR_LENGTH] = "ERR_LENGTH",
0076 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
0077 [RESPST_ERROR] = "ERROR",
0078 [RESPST_RESET] = "RESET",
0079 [RESPST_DONE] = "DONE",
0080 [RESPST_EXIT] = "EXIT",
0081 };
0082
0083
0084 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
0085 {
0086 int must_sched;
0087 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
0088
0089 skb_queue_tail(&qp->req_pkts, skb);
0090
0091 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
0092 (skb_queue_len(&qp->req_pkts) > 1);
0093
0094 rxe_run_task(&qp->resp.task, must_sched);
0095 }
0096
0097 static inline enum resp_states get_req(struct rxe_qp *qp,
0098 struct rxe_pkt_info **pkt_p)
0099 {
0100 struct sk_buff *skb;
0101
0102 if (qp->resp.state == QP_STATE_ERROR) {
0103 while ((skb = skb_dequeue(&qp->req_pkts))) {
0104 rxe_put(qp);
0105 kfree_skb(skb);
0106 ib_device_put(qp->ibqp.device);
0107 }
0108
0109
0110 return RESPST_CHK_RESOURCE;
0111 }
0112
0113 skb = skb_peek(&qp->req_pkts);
0114 if (!skb)
0115 return RESPST_EXIT;
0116
0117 *pkt_p = SKB_TO_PKT(skb);
0118
0119 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
0120 }
0121
0122 static enum resp_states check_psn(struct rxe_qp *qp,
0123 struct rxe_pkt_info *pkt)
0124 {
0125 int diff = psn_compare(pkt->psn, qp->resp.psn);
0126 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
0127
0128 switch (qp_type(qp)) {
0129 case IB_QPT_RC:
0130 if (diff > 0) {
0131 if (qp->resp.sent_psn_nak)
0132 return RESPST_CLEANUP;
0133
0134 qp->resp.sent_psn_nak = 1;
0135 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
0136 return RESPST_ERR_PSN_OUT_OF_SEQ;
0137
0138 } else if (diff < 0) {
0139 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
0140 return RESPST_DUPLICATE_REQUEST;
0141 }
0142
0143 if (qp->resp.sent_psn_nak)
0144 qp->resp.sent_psn_nak = 0;
0145
0146 break;
0147
0148 case IB_QPT_UC:
0149 if (qp->resp.drop_msg || diff != 0) {
0150 if (pkt->mask & RXE_START_MASK) {
0151 qp->resp.drop_msg = 0;
0152 return RESPST_CHK_OP_SEQ;
0153 }
0154
0155 qp->resp.drop_msg = 1;
0156 return RESPST_CLEANUP;
0157 }
0158 break;
0159 default:
0160 break;
0161 }
0162
0163 return RESPST_CHK_OP_SEQ;
0164 }
0165
0166 static enum resp_states check_op_seq(struct rxe_qp *qp,
0167 struct rxe_pkt_info *pkt)
0168 {
0169 switch (qp_type(qp)) {
0170 case IB_QPT_RC:
0171 switch (qp->resp.opcode) {
0172 case IB_OPCODE_RC_SEND_FIRST:
0173 case IB_OPCODE_RC_SEND_MIDDLE:
0174 switch (pkt->opcode) {
0175 case IB_OPCODE_RC_SEND_MIDDLE:
0176 case IB_OPCODE_RC_SEND_LAST:
0177 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
0178 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
0179 return RESPST_CHK_OP_VALID;
0180 default:
0181 return RESPST_ERR_MISSING_OPCODE_LAST_C;
0182 }
0183
0184 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
0185 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
0186 switch (pkt->opcode) {
0187 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
0188 case IB_OPCODE_RC_RDMA_WRITE_LAST:
0189 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
0190 return RESPST_CHK_OP_VALID;
0191 default:
0192 return RESPST_ERR_MISSING_OPCODE_LAST_C;
0193 }
0194
0195 default:
0196 switch (pkt->opcode) {
0197 case IB_OPCODE_RC_SEND_MIDDLE:
0198 case IB_OPCODE_RC_SEND_LAST:
0199 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
0200 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
0201 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
0202 case IB_OPCODE_RC_RDMA_WRITE_LAST:
0203 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
0204 return RESPST_ERR_MISSING_OPCODE_FIRST;
0205 default:
0206 return RESPST_CHK_OP_VALID;
0207 }
0208 }
0209 break;
0210
0211 case IB_QPT_UC:
0212 switch (qp->resp.opcode) {
0213 case IB_OPCODE_UC_SEND_FIRST:
0214 case IB_OPCODE_UC_SEND_MIDDLE:
0215 switch (pkt->opcode) {
0216 case IB_OPCODE_UC_SEND_MIDDLE:
0217 case IB_OPCODE_UC_SEND_LAST:
0218 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
0219 return RESPST_CHK_OP_VALID;
0220 default:
0221 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
0222 }
0223
0224 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
0225 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
0226 switch (pkt->opcode) {
0227 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
0228 case IB_OPCODE_UC_RDMA_WRITE_LAST:
0229 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
0230 return RESPST_CHK_OP_VALID;
0231 default:
0232 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
0233 }
0234
0235 default:
0236 switch (pkt->opcode) {
0237 case IB_OPCODE_UC_SEND_MIDDLE:
0238 case IB_OPCODE_UC_SEND_LAST:
0239 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
0240 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
0241 case IB_OPCODE_UC_RDMA_WRITE_LAST:
0242 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
0243 qp->resp.drop_msg = 1;
0244 return RESPST_CLEANUP;
0245 default:
0246 return RESPST_CHK_OP_VALID;
0247 }
0248 }
0249 break;
0250
0251 default:
0252 return RESPST_CHK_OP_VALID;
0253 }
0254 }
0255
0256 static enum resp_states check_op_valid(struct rxe_qp *qp,
0257 struct rxe_pkt_info *pkt)
0258 {
0259 switch (qp_type(qp)) {
0260 case IB_QPT_RC:
0261 if (((pkt->mask & RXE_READ_MASK) &&
0262 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
0263 ((pkt->mask & RXE_WRITE_MASK) &&
0264 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
0265 ((pkt->mask & RXE_ATOMIC_MASK) &&
0266 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
0267 return RESPST_ERR_UNSUPPORTED_OPCODE;
0268 }
0269
0270 break;
0271
0272 case IB_QPT_UC:
0273 if ((pkt->mask & RXE_WRITE_MASK) &&
0274 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
0275 qp->resp.drop_msg = 1;
0276 return RESPST_CLEANUP;
0277 }
0278
0279 break;
0280
0281 case IB_QPT_UD:
0282 case IB_QPT_GSI:
0283 break;
0284
0285 default:
0286 WARN_ON_ONCE(1);
0287 break;
0288 }
0289
0290 return RESPST_CHK_RESOURCE;
0291 }
0292
0293 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
0294 {
0295 struct rxe_srq *srq = qp->srq;
0296 struct rxe_queue *q = srq->rq.queue;
0297 struct rxe_recv_wqe *wqe;
0298 struct ib_event ev;
0299 unsigned int count;
0300 size_t size;
0301 unsigned long flags;
0302
0303 if (srq->error)
0304 return RESPST_ERR_RNR;
0305
0306 spin_lock_irqsave(&srq->rq.consumer_lock, flags);
0307
0308 wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
0309 if (!wqe) {
0310 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
0311 return RESPST_ERR_RNR;
0312 }
0313
0314
0315 if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
0316 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
0317 pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
0318 return RESPST_ERR_MALFORMED_WQE;
0319 }
0320 size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
0321 memcpy(&qp->resp.srq_wqe, wqe, size);
0322
0323 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
0324 queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
0325 count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
0326
0327 if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
0328 srq->limit = 0;
0329 goto event;
0330 }
0331
0332 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
0333 return RESPST_CHK_LENGTH;
0334
0335 event:
0336 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
0337 ev.device = qp->ibqp.device;
0338 ev.element.srq = qp->ibqp.srq;
0339 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
0340 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
0341 return RESPST_CHK_LENGTH;
0342 }
0343
0344 static enum resp_states check_resource(struct rxe_qp *qp,
0345 struct rxe_pkt_info *pkt)
0346 {
0347 struct rxe_srq *srq = qp->srq;
0348
0349 if (qp->resp.state == QP_STATE_ERROR) {
0350 if (qp->resp.wqe) {
0351 qp->resp.status = IB_WC_WR_FLUSH_ERR;
0352 return RESPST_COMPLETE;
0353 } else if (!srq) {
0354 qp->resp.wqe = queue_head(qp->rq.queue,
0355 QUEUE_TYPE_FROM_CLIENT);
0356 if (qp->resp.wqe) {
0357 qp->resp.status = IB_WC_WR_FLUSH_ERR;
0358 return RESPST_COMPLETE;
0359 } else {
0360 return RESPST_EXIT;
0361 }
0362 } else {
0363 return RESPST_EXIT;
0364 }
0365 }
0366
0367 if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) {
0368
0369
0370
0371
0372 if (likely(qp->attr.max_dest_rd_atomic > 0))
0373 return RESPST_CHK_LENGTH;
0374 else
0375 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
0376 }
0377
0378 if (pkt->mask & RXE_RWR_MASK) {
0379 if (srq)
0380 return get_srq_wqe(qp);
0381
0382 qp->resp.wqe = queue_head(qp->rq.queue,
0383 QUEUE_TYPE_FROM_CLIENT);
0384 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
0385 }
0386
0387 return RESPST_CHK_LENGTH;
0388 }
0389
0390 static enum resp_states check_length(struct rxe_qp *qp,
0391 struct rxe_pkt_info *pkt)
0392 {
0393 switch (qp_type(qp)) {
0394 case IB_QPT_RC:
0395 return RESPST_CHK_RKEY;
0396
0397 case IB_QPT_UC:
0398 return RESPST_CHK_RKEY;
0399
0400 default:
0401 return RESPST_CHK_RKEY;
0402 }
0403 }
0404
0405 static enum resp_states check_rkey(struct rxe_qp *qp,
0406 struct rxe_pkt_info *pkt)
0407 {
0408 struct rxe_mr *mr = NULL;
0409 struct rxe_mw *mw = NULL;
0410 u64 va;
0411 u32 rkey;
0412 u32 resid;
0413 u32 pktlen;
0414 int mtu = qp->mtu;
0415 enum resp_states state;
0416 int access;
0417
0418 if (pkt->mask & RXE_READ_OR_WRITE_MASK) {
0419 if (pkt->mask & RXE_RETH_MASK) {
0420 qp->resp.va = reth_va(pkt);
0421 qp->resp.offset = 0;
0422 qp->resp.rkey = reth_rkey(pkt);
0423 qp->resp.resid = reth_len(pkt);
0424 qp->resp.length = reth_len(pkt);
0425 }
0426 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
0427 : IB_ACCESS_REMOTE_WRITE;
0428 } else if (pkt->mask & RXE_ATOMIC_MASK) {
0429 qp->resp.va = atmeth_va(pkt);
0430 qp->resp.offset = 0;
0431 qp->resp.rkey = atmeth_rkey(pkt);
0432 qp->resp.resid = sizeof(u64);
0433 access = IB_ACCESS_REMOTE_ATOMIC;
0434 } else {
0435 return RESPST_EXECUTE;
0436 }
0437
0438
0439 if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
0440 (pkt->mask & RXE_RETH_MASK) &&
0441 reth_len(pkt) == 0) {
0442 return RESPST_EXECUTE;
0443 }
0444
0445 va = qp->resp.va;
0446 rkey = qp->resp.rkey;
0447 resid = qp->resp.resid;
0448 pktlen = payload_size(pkt);
0449
0450 if (rkey_is_mw(rkey)) {
0451 mw = rxe_lookup_mw(qp, access, rkey);
0452 if (!mw) {
0453 pr_debug("%s: no MW matches rkey %#x\n",
0454 __func__, rkey);
0455 state = RESPST_ERR_RKEY_VIOLATION;
0456 goto err;
0457 }
0458
0459 mr = mw->mr;
0460 if (!mr) {
0461 pr_err("%s: MW doesn't have an MR\n", __func__);
0462 state = RESPST_ERR_RKEY_VIOLATION;
0463 goto err;
0464 }
0465
0466 if (mw->access & IB_ZERO_BASED)
0467 qp->resp.offset = mw->addr;
0468
0469 rxe_put(mw);
0470 rxe_get(mr);
0471 } else {
0472 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
0473 if (!mr) {
0474 pr_debug("%s: no MR matches rkey %#x\n",
0475 __func__, rkey);
0476 state = RESPST_ERR_RKEY_VIOLATION;
0477 goto err;
0478 }
0479 }
0480
0481 if (mr_check_range(mr, va + qp->resp.offset, resid)) {
0482 state = RESPST_ERR_RKEY_VIOLATION;
0483 goto err;
0484 }
0485
0486 if (pkt->mask & RXE_WRITE_MASK) {
0487 if (resid > mtu) {
0488 if (pktlen != mtu || bth_pad(pkt)) {
0489 state = RESPST_ERR_LENGTH;
0490 goto err;
0491 }
0492 } else {
0493 if (pktlen != resid) {
0494 state = RESPST_ERR_LENGTH;
0495 goto err;
0496 }
0497 if ((bth_pad(pkt) != (0x3 & (-resid)))) {
0498
0499
0500
0501 state = RESPST_ERR_LENGTH;
0502 goto err;
0503 }
0504 }
0505 }
0506
0507 WARN_ON_ONCE(qp->resp.mr);
0508
0509 qp->resp.mr = mr;
0510 return RESPST_EXECUTE;
0511
0512 err:
0513 if (mr)
0514 rxe_put(mr);
0515 if (mw)
0516 rxe_put(mw);
0517
0518 return state;
0519 }
0520
0521 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
0522 int data_len)
0523 {
0524 int err;
0525
0526 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
0527 data_addr, data_len, RXE_TO_MR_OBJ);
0528 if (unlikely(err))
0529 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
0530 : RESPST_ERR_MALFORMED_WQE;
0531
0532 return RESPST_NONE;
0533 }
0534
0535 static enum resp_states write_data_in(struct rxe_qp *qp,
0536 struct rxe_pkt_info *pkt)
0537 {
0538 enum resp_states rc = RESPST_NONE;
0539 int err;
0540 int data_len = payload_size(pkt);
0541
0542 err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
0543 payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
0544 if (err) {
0545 rc = RESPST_ERR_RKEY_VIOLATION;
0546 goto out;
0547 }
0548
0549 qp->resp.va += data_len;
0550 qp->resp.resid -= data_len;
0551
0552 out:
0553 return rc;
0554 }
0555
0556 static struct resp_res *rxe_prepare_res(struct rxe_qp *qp,
0557 struct rxe_pkt_info *pkt,
0558 int type)
0559 {
0560 struct resp_res *res;
0561 u32 pkts;
0562
0563 res = &qp->resp.resources[qp->resp.res_head];
0564 rxe_advance_resp_resource(qp);
0565 free_rd_atomic_resource(res);
0566
0567 res->type = type;
0568 res->replay = 0;
0569
0570 switch (type) {
0571 case RXE_READ_MASK:
0572 res->read.va = qp->resp.va + qp->resp.offset;
0573 res->read.va_org = qp->resp.va + qp->resp.offset;
0574 res->read.resid = qp->resp.resid;
0575 res->read.length = qp->resp.resid;
0576 res->read.rkey = qp->resp.rkey;
0577
0578 pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
0579 res->first_psn = pkt->psn;
0580 res->cur_psn = pkt->psn;
0581 res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
0582
0583 res->state = rdatm_res_state_new;
0584 break;
0585 case RXE_ATOMIC_MASK:
0586 res->first_psn = pkt->psn;
0587 res->last_psn = pkt->psn;
0588 res->cur_psn = pkt->psn;
0589 break;
0590 }
0591
0592 return res;
0593 }
0594
0595
0596 static DEFINE_SPINLOCK(atomic_ops_lock);
0597
0598 static enum resp_states atomic_reply(struct rxe_qp *qp,
0599 struct rxe_pkt_info *pkt)
0600 {
0601 u64 *vaddr;
0602 enum resp_states ret;
0603 struct rxe_mr *mr = qp->resp.mr;
0604 struct resp_res *res = qp->resp.res;
0605 u64 value;
0606
0607 if (!res) {
0608 res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
0609 qp->resp.res = res;
0610 }
0611
0612 if (!res->replay) {
0613 if (mr->state != RXE_MR_STATE_VALID) {
0614 ret = RESPST_ERR_RKEY_VIOLATION;
0615 goto out;
0616 }
0617
0618 vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
0619 sizeof(u64));
0620
0621
0622 if (!vaddr || (uintptr_t)vaddr & 7) {
0623 ret = RESPST_ERR_MISALIGNED_ATOMIC;
0624 goto out;
0625 }
0626
0627 spin_lock_bh(&atomic_ops_lock);
0628 res->atomic.orig_val = value = *vaddr;
0629
0630 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
0631 if (value == atmeth_comp(pkt))
0632 value = atmeth_swap_add(pkt);
0633 } else {
0634 value += atmeth_swap_add(pkt);
0635 }
0636
0637 *vaddr = value;
0638 spin_unlock_bh(&atomic_ops_lock);
0639
0640 qp->resp.msn++;
0641
0642
0643 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
0644 qp->resp.ack_psn = qp->resp.psn;
0645
0646 qp->resp.opcode = pkt->opcode;
0647 qp->resp.status = IB_WC_SUCCESS;
0648 }
0649
0650 ret = RESPST_ACKNOWLEDGE;
0651 out:
0652 return ret;
0653 }
0654
0655 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
0656 struct rxe_pkt_info *ack,
0657 int opcode,
0658 int payload,
0659 u32 psn,
0660 u8 syndrome)
0661 {
0662 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
0663 struct sk_buff *skb;
0664 int paylen;
0665 int pad;
0666 int err;
0667
0668
0669
0670
0671 pad = (-payload) & 0x3;
0672 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
0673
0674 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
0675 if (!skb)
0676 return NULL;
0677
0678 ack->qp = qp;
0679 ack->opcode = opcode;
0680 ack->mask = rxe_opcode[opcode].mask;
0681 ack->paylen = paylen;
0682 ack->psn = psn;
0683
0684 bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
0685 qp->attr.dest_qp_num, 0, psn);
0686
0687 if (ack->mask & RXE_AETH_MASK) {
0688 aeth_set_syn(ack, syndrome);
0689 aeth_set_msn(ack, qp->resp.msn);
0690 }
0691
0692 if (ack->mask & RXE_ATMACK_MASK)
0693 atmack_set_orig(ack, qp->resp.res->atomic.orig_val);
0694
0695 err = rxe_prepare(&qp->pri_av, ack, skb);
0696 if (err) {
0697 kfree_skb(skb);
0698 return NULL;
0699 }
0700
0701 return skb;
0702 }
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
0722 {
0723 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
0724 struct rxe_mr *mr;
0725 struct rxe_mw *mw;
0726
0727 if (rkey_is_mw(rkey)) {
0728 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
0729 if (!mw)
0730 return NULL;
0731
0732 mr = mw->mr;
0733 if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
0734 !mr || mr->state != RXE_MR_STATE_VALID) {
0735 rxe_put(mw);
0736 return NULL;
0737 }
0738
0739 rxe_get(mr);
0740 rxe_put(mw);
0741
0742 return mr;
0743 }
0744
0745 mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
0746 if (!mr)
0747 return NULL;
0748
0749 if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
0750 rxe_put(mr);
0751 return NULL;
0752 }
0753
0754 return mr;
0755 }
0756
0757
0758
0759
0760 static enum resp_states read_reply(struct rxe_qp *qp,
0761 struct rxe_pkt_info *req_pkt)
0762 {
0763 struct rxe_pkt_info ack_pkt;
0764 struct sk_buff *skb;
0765 int mtu = qp->mtu;
0766 enum resp_states state;
0767 int payload;
0768 int opcode;
0769 int err;
0770 struct resp_res *res = qp->resp.res;
0771 struct rxe_mr *mr;
0772
0773 if (!res) {
0774 res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK);
0775 qp->resp.res = res;
0776 }
0777
0778 if (res->state == rdatm_res_state_new) {
0779 if (!res->replay) {
0780 mr = qp->resp.mr;
0781 qp->resp.mr = NULL;
0782 } else {
0783 mr = rxe_recheck_mr(qp, res->read.rkey);
0784 if (!mr)
0785 return RESPST_ERR_RKEY_VIOLATION;
0786 }
0787
0788 if (res->read.resid <= mtu)
0789 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
0790 else
0791 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
0792 } else {
0793 mr = rxe_recheck_mr(qp, res->read.rkey);
0794 if (!mr)
0795 return RESPST_ERR_RKEY_VIOLATION;
0796
0797 if (res->read.resid > mtu)
0798 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
0799 else
0800 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
0801 }
0802
0803 res->state = rdatm_res_state_next;
0804
0805 payload = min_t(int, res->read.resid, mtu);
0806
0807 skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
0808 res->cur_psn, AETH_ACK_UNLIMITED);
0809 if (!skb)
0810 return RESPST_ERR_RNR;
0811
0812 err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
0813 payload, RXE_FROM_MR_OBJ);
0814 if (err)
0815 pr_err("Failed copying memory\n");
0816 if (mr)
0817 rxe_put(mr);
0818
0819 if (bth_pad(&ack_pkt)) {
0820 u8 *pad = payload_addr(&ack_pkt) + payload;
0821
0822 memset(pad, 0, bth_pad(&ack_pkt));
0823 }
0824
0825 err = rxe_xmit_packet(qp, &ack_pkt, skb);
0826 if (err) {
0827 pr_err("Failed sending RDMA reply.\n");
0828 return RESPST_ERR_RNR;
0829 }
0830
0831 res->read.va += payload;
0832 res->read.resid -= payload;
0833 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
0834
0835 if (res->read.resid > 0) {
0836 state = RESPST_DONE;
0837 } else {
0838 qp->resp.res = NULL;
0839 if (!res->replay)
0840 qp->resp.opcode = -1;
0841 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
0842 qp->resp.psn = res->cur_psn;
0843 state = RESPST_CLEANUP;
0844 }
0845
0846 return state;
0847 }
0848
0849 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
0850 {
0851 if (rkey_is_mw(rkey))
0852 return rxe_invalidate_mw(qp, rkey);
0853 else
0854 return rxe_invalidate_mr(qp, rkey);
0855 }
0856
0857
0858
0859
0860 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
0861 {
0862 enum resp_states err;
0863 struct sk_buff *skb = PKT_TO_SKB(pkt);
0864 union rdma_network_hdr hdr;
0865
0866 if (pkt->mask & RXE_SEND_MASK) {
0867 if (qp_type(qp) == IB_QPT_UD ||
0868 qp_type(qp) == IB_QPT_GSI) {
0869 if (skb->protocol == htons(ETH_P_IP)) {
0870 memset(&hdr.reserved, 0,
0871 sizeof(hdr.reserved));
0872 memcpy(&hdr.roce4grh, ip_hdr(skb),
0873 sizeof(hdr.roce4grh));
0874 err = send_data_in(qp, &hdr, sizeof(hdr));
0875 } else {
0876 err = send_data_in(qp, ipv6_hdr(skb),
0877 sizeof(hdr));
0878 }
0879 if (err)
0880 return err;
0881 }
0882 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
0883 if (err)
0884 return err;
0885 } else if (pkt->mask & RXE_WRITE_MASK) {
0886 err = write_data_in(qp, pkt);
0887 if (err)
0888 return err;
0889 } else if (pkt->mask & RXE_READ_MASK) {
0890
0891 qp->resp.msn++;
0892 return RESPST_READ_REPLY;
0893 } else if (pkt->mask & RXE_ATOMIC_MASK) {
0894 return RESPST_ATOMIC_REPLY;
0895 } else {
0896
0897 WARN_ON_ONCE(1);
0898 }
0899
0900 if (pkt->mask & RXE_IETH_MASK) {
0901 u32 rkey = ieth_rkey(pkt);
0902
0903 err = invalidate_rkey(qp, rkey);
0904 if (err)
0905 return RESPST_ERR_INVALIDATE_RKEY;
0906 }
0907
0908 if (pkt->mask & RXE_END_MASK)
0909
0910 qp->resp.msn++;
0911
0912
0913 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
0914 qp->resp.ack_psn = qp->resp.psn;
0915
0916 qp->resp.opcode = pkt->opcode;
0917 qp->resp.status = IB_WC_SUCCESS;
0918
0919 if (pkt->mask & RXE_COMP_MASK)
0920 return RESPST_COMPLETE;
0921 else if (qp_type(qp) == IB_QPT_RC)
0922 return RESPST_ACKNOWLEDGE;
0923 else
0924 return RESPST_CLEANUP;
0925 }
0926
0927 static enum resp_states do_complete(struct rxe_qp *qp,
0928 struct rxe_pkt_info *pkt)
0929 {
0930 struct rxe_cqe cqe;
0931 struct ib_wc *wc = &cqe.ibwc;
0932 struct ib_uverbs_wc *uwc = &cqe.uibwc;
0933 struct rxe_recv_wqe *wqe = qp->resp.wqe;
0934 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
0935
0936 if (!wqe)
0937 goto finish;
0938
0939 memset(&cqe, 0, sizeof(cqe));
0940
0941 if (qp->rcq->is_user) {
0942 uwc->status = qp->resp.status;
0943 uwc->qp_num = qp->ibqp.qp_num;
0944 uwc->wr_id = wqe->wr_id;
0945 } else {
0946 wc->status = qp->resp.status;
0947 wc->qp = &qp->ibqp;
0948 wc->wr_id = wqe->wr_id;
0949 }
0950
0951 if (wc->status == IB_WC_SUCCESS) {
0952 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
0953 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
0954 pkt->mask & RXE_WRITE_MASK) ?
0955 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
0956 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
0957 pkt->mask & RXE_WRITE_MASK) ?
0958 qp->resp.length : wqe->dma.length - wqe->dma.resid;
0959
0960
0961
0962
0963 if (qp->rcq->is_user) {
0964 uwc->wc_flags = IB_WC_GRH;
0965
0966 if (pkt->mask & RXE_IMMDT_MASK) {
0967 uwc->wc_flags |= IB_WC_WITH_IMM;
0968 uwc->ex.imm_data = immdt_imm(pkt);
0969 }
0970
0971 if (pkt->mask & RXE_IETH_MASK) {
0972 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
0973 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
0974 }
0975
0976 if (pkt->mask & RXE_DETH_MASK)
0977 uwc->src_qp = deth_sqp(pkt);
0978
0979 uwc->port_num = qp->attr.port_num;
0980 } else {
0981 struct sk_buff *skb = PKT_TO_SKB(pkt);
0982
0983 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
0984 if (skb->protocol == htons(ETH_P_IP))
0985 wc->network_hdr_type = RDMA_NETWORK_IPV4;
0986 else
0987 wc->network_hdr_type = RDMA_NETWORK_IPV6;
0988
0989 if (is_vlan_dev(skb->dev)) {
0990 wc->wc_flags |= IB_WC_WITH_VLAN;
0991 wc->vlan_id = vlan_dev_vlan_id(skb->dev);
0992 }
0993
0994 if (pkt->mask & RXE_IMMDT_MASK) {
0995 wc->wc_flags |= IB_WC_WITH_IMM;
0996 wc->ex.imm_data = immdt_imm(pkt);
0997 }
0998
0999 if (pkt->mask & RXE_IETH_MASK) {
1000 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
1001 wc->ex.invalidate_rkey = ieth_rkey(pkt);
1002 }
1003
1004 if (pkt->mask & RXE_DETH_MASK)
1005 wc->src_qp = deth_sqp(pkt);
1006
1007 wc->port_num = qp->attr.port_num;
1008 }
1009 }
1010
1011
1012 if (!qp->srq)
1013 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
1014
1015 qp->resp.wqe = NULL;
1016
1017 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
1018 return RESPST_ERR_CQ_OVERFLOW;
1019
1020 finish:
1021 if (unlikely(qp->resp.state == QP_STATE_ERROR))
1022 return RESPST_CHK_RESOURCE;
1023 if (unlikely(!pkt))
1024 return RESPST_DONE;
1025 if (qp_type(qp) == IB_QPT_RC)
1026 return RESPST_ACKNOWLEDGE;
1027 else
1028 return RESPST_CLEANUP;
1029 }
1030
1031 static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
1032 {
1033 int err = 0;
1034 struct rxe_pkt_info ack_pkt;
1035 struct sk_buff *skb;
1036
1037 skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
1038 0, psn, syndrome);
1039 if (!skb) {
1040 err = -ENOMEM;
1041 goto err1;
1042 }
1043
1044 err = rxe_xmit_packet(qp, &ack_pkt, skb);
1045 if (err)
1046 pr_err_ratelimited("Failed sending ack\n");
1047
1048 err1:
1049 return err;
1050 }
1051
1052 static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
1053 {
1054 int err = 0;
1055 struct rxe_pkt_info ack_pkt;
1056 struct sk_buff *skb;
1057
1058 skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
1059 0, psn, syndrome);
1060 if (!skb) {
1061 err = -ENOMEM;
1062 goto out;
1063 }
1064
1065 err = rxe_xmit_packet(qp, &ack_pkt, skb);
1066 if (err)
1067 pr_err_ratelimited("Failed sending atomic ack\n");
1068
1069
1070
1071
1072 qp->resp.res = NULL;
1073 out:
1074 return err;
1075 }
1076
1077 static enum resp_states acknowledge(struct rxe_qp *qp,
1078 struct rxe_pkt_info *pkt)
1079 {
1080 if (qp_type(qp) != IB_QPT_RC)
1081 return RESPST_CLEANUP;
1082
1083 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1084 send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
1085 else if (pkt->mask & RXE_ATOMIC_MASK)
1086 send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
1087 else if (bth_ack(pkt))
1088 send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
1089
1090 return RESPST_CLEANUP;
1091 }
1092
1093 static enum resp_states cleanup(struct rxe_qp *qp,
1094 struct rxe_pkt_info *pkt)
1095 {
1096 struct sk_buff *skb;
1097
1098 if (pkt) {
1099 skb = skb_dequeue(&qp->req_pkts);
1100 rxe_put(qp);
1101 kfree_skb(skb);
1102 ib_device_put(qp->ibqp.device);
1103 }
1104
1105 if (qp->resp.mr) {
1106 rxe_put(qp->resp.mr);
1107 qp->resp.mr = NULL;
1108 }
1109
1110 return RESPST_DONE;
1111 }
1112
1113 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1114 {
1115 int i;
1116
1117 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1118 struct resp_res *res = &qp->resp.resources[i];
1119
1120 if (res->type == 0)
1121 continue;
1122
1123 if (psn_compare(psn, res->first_psn) >= 0 &&
1124 psn_compare(psn, res->last_psn) <= 0) {
1125 return res;
1126 }
1127 }
1128
1129 return NULL;
1130 }
1131
1132 static enum resp_states duplicate_request(struct rxe_qp *qp,
1133 struct rxe_pkt_info *pkt)
1134 {
1135 enum resp_states rc;
1136 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1137
1138 if (pkt->mask & RXE_SEND_MASK ||
1139 pkt->mask & RXE_WRITE_MASK) {
1140
1141 send_ack(qp, AETH_ACK_UNLIMITED, prev_psn);
1142 return RESPST_CLEANUP;
1143 } else if (pkt->mask & RXE_READ_MASK) {
1144 struct resp_res *res;
1145
1146 res = find_resource(qp, pkt->psn);
1147 if (!res) {
1148
1149
1150
1151 rc = RESPST_CLEANUP;
1152 goto out;
1153 } else {
1154
1155
1156
1157 u64 iova = reth_va(pkt);
1158 u32 resid = reth_len(pkt);
1159
1160 if (iova < res->read.va_org ||
1161 resid > res->read.length ||
1162 (iova + resid) > (res->read.va_org +
1163 res->read.length)) {
1164 rc = RESPST_CLEANUP;
1165 goto out;
1166 }
1167
1168 if (reth_rkey(pkt) != res->read.rkey) {
1169 rc = RESPST_CLEANUP;
1170 goto out;
1171 }
1172
1173 res->cur_psn = pkt->psn;
1174 res->state = (pkt->psn == res->first_psn) ?
1175 rdatm_res_state_new :
1176 rdatm_res_state_replay;
1177 res->replay = 1;
1178
1179
1180 res->read.va_org = iova;
1181 res->read.va = iova;
1182 res->read.resid = resid;
1183
1184
1185 qp->resp.res = res;
1186 rc = RESPST_READ_REPLY;
1187 goto out;
1188 }
1189 } else {
1190 struct resp_res *res;
1191
1192
1193 res = find_resource(qp, pkt->psn);
1194 if (res) {
1195 res->replay = 1;
1196 res->cur_psn = pkt->psn;
1197 qp->resp.res = res;
1198 rc = RESPST_ATOMIC_REPLY;
1199 goto out;
1200 }
1201
1202
1203 rc = RESPST_CLEANUP;
1204 goto out;
1205 }
1206 out:
1207 return rc;
1208 }
1209
1210
1211 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1212 enum ib_wc_status status)
1213 {
1214 qp->resp.aeth_syndrome = syndrome;
1215 qp->resp.status = status;
1216
1217
1218 qp->resp.goto_error = 1;
1219 }
1220
1221 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1222 {
1223
1224 if (qp->srq) {
1225
1226 qp->resp.drop_msg = 1;
1227 if (qp->resp.wqe) {
1228 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1229 return RESPST_COMPLETE;
1230 } else {
1231 return RESPST_CLEANUP;
1232 }
1233 } else {
1234
1235
1236
1237
1238
1239 if (qp->resp.wqe) {
1240 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1241 qp->resp.wqe->dma.cur_sge = 0;
1242 qp->resp.wqe->dma.sge_offset = 0;
1243 qp->resp.opcode = -1;
1244 }
1245
1246 if (qp->resp.mr) {
1247 rxe_put(qp->resp.mr);
1248 qp->resp.mr = NULL;
1249 }
1250
1251 return RESPST_CLEANUP;
1252 }
1253 }
1254
1255 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1256 {
1257 struct sk_buff *skb;
1258 struct rxe_queue *q = qp->rq.queue;
1259
1260 while ((skb = skb_dequeue(&qp->req_pkts))) {
1261 rxe_put(qp);
1262 kfree_skb(skb);
1263 ib_device_put(qp->ibqp.device);
1264 }
1265
1266 if (notify)
1267 return;
1268
1269 while (!qp->srq && q && queue_head(q, q->type))
1270 queue_advance_consumer(q, q->type);
1271 }
1272
1273 int rxe_responder(void *arg)
1274 {
1275 struct rxe_qp *qp = (struct rxe_qp *)arg;
1276 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1277 enum resp_states state;
1278 struct rxe_pkt_info *pkt = NULL;
1279 int ret;
1280
1281 if (!rxe_get(qp))
1282 return -EAGAIN;
1283
1284 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1285
1286 if (!qp->valid)
1287 goto exit;
1288
1289 switch (qp->resp.state) {
1290 case QP_STATE_RESET:
1291 state = RESPST_RESET;
1292 break;
1293
1294 default:
1295 state = RESPST_GET_REQ;
1296 break;
1297 }
1298
1299 while (1) {
1300 pr_debug("qp#%d state = %s\n", qp_num(qp),
1301 resp_state_name[state]);
1302 switch (state) {
1303 case RESPST_GET_REQ:
1304 state = get_req(qp, &pkt);
1305 break;
1306 case RESPST_CHK_PSN:
1307 state = check_psn(qp, pkt);
1308 break;
1309 case RESPST_CHK_OP_SEQ:
1310 state = check_op_seq(qp, pkt);
1311 break;
1312 case RESPST_CHK_OP_VALID:
1313 state = check_op_valid(qp, pkt);
1314 break;
1315 case RESPST_CHK_RESOURCE:
1316 state = check_resource(qp, pkt);
1317 break;
1318 case RESPST_CHK_LENGTH:
1319 state = check_length(qp, pkt);
1320 break;
1321 case RESPST_CHK_RKEY:
1322 state = check_rkey(qp, pkt);
1323 break;
1324 case RESPST_EXECUTE:
1325 state = execute(qp, pkt);
1326 break;
1327 case RESPST_COMPLETE:
1328 state = do_complete(qp, pkt);
1329 break;
1330 case RESPST_READ_REPLY:
1331 state = read_reply(qp, pkt);
1332 break;
1333 case RESPST_ATOMIC_REPLY:
1334 state = atomic_reply(qp, pkt);
1335 break;
1336 case RESPST_ACKNOWLEDGE:
1337 state = acknowledge(qp, pkt);
1338 break;
1339 case RESPST_CLEANUP:
1340 state = cleanup(qp, pkt);
1341 break;
1342 case RESPST_DUPLICATE_REQUEST:
1343 state = duplicate_request(qp, pkt);
1344 break;
1345 case RESPST_ERR_PSN_OUT_OF_SEQ:
1346
1347 send_ack(qp, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1348 state = RESPST_CLEANUP;
1349 break;
1350
1351 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1352 case RESPST_ERR_MISSING_OPCODE_FIRST:
1353 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1354 case RESPST_ERR_UNSUPPORTED_OPCODE:
1355 case RESPST_ERR_MISALIGNED_ATOMIC:
1356
1357 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1358 IB_WC_REM_INV_REQ_ERR);
1359 state = RESPST_COMPLETE;
1360 break;
1361
1362 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1363 state = do_class_d1e_error(qp);
1364 break;
1365 case RESPST_ERR_RNR:
1366 if (qp_type(qp) == IB_QPT_RC) {
1367 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1368
1369 send_ack(qp, AETH_RNR_NAK |
1370 (~AETH_TYPE_MASK &
1371 qp->attr.min_rnr_timer),
1372 pkt->psn);
1373 } else {
1374
1375 qp->resp.drop_msg = 1;
1376 }
1377 state = RESPST_CLEANUP;
1378 break;
1379
1380 case RESPST_ERR_RKEY_VIOLATION:
1381 if (qp_type(qp) == IB_QPT_RC) {
1382
1383 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1384 IB_WC_REM_ACCESS_ERR);
1385 state = RESPST_COMPLETE;
1386 } else {
1387 qp->resp.drop_msg = 1;
1388 if (qp->srq) {
1389
1390 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1391 state = RESPST_COMPLETE;
1392 } else {
1393
1394 state = RESPST_CLEANUP;
1395 }
1396 }
1397 break;
1398
1399 case RESPST_ERR_INVALIDATE_RKEY:
1400
1401 qp->resp.goto_error = 1;
1402 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1403 state = RESPST_COMPLETE;
1404 break;
1405
1406 case RESPST_ERR_LENGTH:
1407 if (qp_type(qp) == IB_QPT_RC) {
1408
1409 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1410 IB_WC_REM_INV_REQ_ERR);
1411 state = RESPST_COMPLETE;
1412 } else if (qp->srq) {
1413
1414 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1415 state = RESPST_COMPLETE;
1416 } else {
1417
1418 qp->resp.drop_msg = 1;
1419 state = RESPST_CLEANUP;
1420 }
1421 break;
1422
1423 case RESPST_ERR_MALFORMED_WQE:
1424
1425 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1426 IB_WC_LOC_QP_OP_ERR);
1427 state = RESPST_COMPLETE;
1428 break;
1429
1430 case RESPST_ERR_CQ_OVERFLOW:
1431
1432 state = RESPST_ERROR;
1433 break;
1434
1435 case RESPST_DONE:
1436 if (qp->resp.goto_error) {
1437 state = RESPST_ERROR;
1438 break;
1439 }
1440
1441 goto done;
1442
1443 case RESPST_EXIT:
1444 if (qp->resp.goto_error) {
1445 state = RESPST_ERROR;
1446 break;
1447 }
1448
1449 goto exit;
1450
1451 case RESPST_RESET:
1452 rxe_drain_req_pkts(qp, false);
1453 qp->resp.wqe = NULL;
1454 goto exit;
1455
1456 case RESPST_ERROR:
1457 qp->resp.goto_error = 0;
1458 pr_debug("qp#%d moved to error state\n", qp_num(qp));
1459 rxe_qp_error(qp);
1460 goto exit;
1461
1462 default:
1463 WARN_ON_ONCE(1);
1464 }
1465 }
1466
1467
1468
1469
1470
1471 done:
1472 ret = 0;
1473 goto out;
1474 exit:
1475 ret = -EAGAIN;
1476 out:
1477 rxe_put(qp);
1478 return ret;
1479 }