0001
0002
0003
0004
0005
0006
0007 #ifndef HFI1_RC_H
0008 #define HFI1_RC_H
0009
0010
0011 #define OP(x) IB_OPCODE_RC_##x
0012
0013 static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
0014 {
0015 unsigned int next;
0016
0017 next = n + 1;
0018 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
0019 next = 0;
0020 qp->s_tail_ack_queue = next;
0021 qp->s_acked_ack_queue = next;
0022 qp->s_ack_state = OP(ACKNOWLEDGE);
0023 }
0024
0025 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
0026 struct rvt_qp *qp)
0027 {
0028 if (list_empty(&qp->rspwait)) {
0029 qp->r_flags |= RVT_R_RSP_NAK;
0030 rvt_get_qp(qp);
0031 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
0032 }
0033 }
0034
0035 static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
0036 u32 psn, u32 pmtu)
0037 {
0038 u32 len;
0039
0040 len = delta_psn(psn, wqe->psn) * pmtu;
0041 return rvt_restart_sge(ss, wqe, len);
0042 }
0043
0044 static inline void release_rdma_sge_mr(struct rvt_ack_entry *e)
0045 {
0046 if (e->rdma_sge.mr) {
0047 rvt_put_mr(e->rdma_sge.mr);
0048 e->rdma_sge.mr = NULL;
0049 }
0050 }
0051
0052 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
0053 u8 *prev_ack, bool *scheduled);
0054 int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
0055 struct hfi1_ctxtdata *rcd);
0056 struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
0057 struct hfi1_ibport *ibp);
0058
0059 #endif