0001
0002
0003
0004
0005
0006 #ifndef HFI1_TID_RDMA_H
0007 #define HFI1_TID_RDMA_H
0008
0009 #include <linux/circ_buf.h>
0010 #include "common.h"
0011
0012
0013 #define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
0014 #define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
0015 #define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
0016
0017 #define TID_RDMA_MIN_SEGMENT_SIZE BIT(18)
0018 #define TID_RDMA_MAX_SEGMENT_SIZE BIT(18)
0019 #define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
0020 #define TID_RDMA_SEGMENT_SHIFT 18
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #define HFI1_S_TID_BUSY_SET BIT(0)
0032
0033 #define HFI1_R_TID_RSC_TIMER BIT(2)
0034
0035
0036 #define HFI1_S_TID_WAIT_INTERLCK BIT(5)
0037 #define HFI1_R_TID_WAIT_INTERLCK BIT(6)
0038
0039
0040 #define HFI1_S_TID_RETRY_TIMER BIT(17)
0041
0042 #define HFI1_R_TID_SW_PSN BIT(19)
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 #define HFI1_TID_RDMA_WRITE_CNT 8
0057
0058 struct tid_rdma_params {
0059 struct rcu_head rcu_head;
0060 u32 qp;
0061 u32 max_len;
0062 u16 jkey;
0063 u8 max_read;
0064 u8 max_write;
0065 u8 timeout;
0066 u8 urg;
0067 u8 version;
0068 };
0069
0070 struct tid_rdma_qp_params {
0071 struct work_struct trigger_work;
0072 struct tid_rdma_params local;
0073 struct tid_rdma_params __rcu *remote;
0074 };
0075
0076
0077 struct tid_flow_state {
0078 u32 generation;
0079 u32 psn;
0080 u8 index;
0081 u8 last_index;
0082 };
0083
0084 enum tid_rdma_req_state {
0085 TID_REQUEST_INACTIVE = 0,
0086 TID_REQUEST_INIT,
0087 TID_REQUEST_INIT_RESEND,
0088 TID_REQUEST_ACTIVE,
0089 TID_REQUEST_RESEND,
0090 TID_REQUEST_RESEND_ACTIVE,
0091 TID_REQUEST_QUEUED,
0092 TID_REQUEST_SYNC,
0093 TID_REQUEST_RNR_NAK,
0094 TID_REQUEST_COMPLETE,
0095 };
0096
0097 struct tid_rdma_request {
0098 struct rvt_qp *qp;
0099 struct hfi1_ctxtdata *rcd;
0100 union {
0101 struct rvt_swqe *swqe;
0102 struct rvt_ack_entry *ack;
0103 } e;
0104
0105 struct tid_rdma_flow *flows;
0106 struct rvt_sge_state ss;
0107 u16 n_flows;
0108 u16 setup_head;
0109 u16 clear_tail;
0110 u16 flow_idx;
0111 u16 acked_tail;
0112
0113 u32 seg_len;
0114 u32 total_len;
0115 u32 r_ack_psn;
0116 u32 r_flow_psn;
0117 u32 r_last_acked;
0118 u32 s_next_psn;
0119
0120 u32 total_segs;
0121 u32 cur_seg;
0122 u32 comp_seg;
0123 u32 ack_seg;
0124 u32 alloc_seg;
0125 u32 isge;
0126 u32 ack_pending;
0127
0128 enum tid_rdma_req_state state;
0129 };
0130
0131
0132
0133
0134
0135
0136
0137 struct flow_state {
0138 u32 flags;
0139 u32 resp_ib_psn;
0140 u32 generation;
0141 u32 spsn;
0142 u32 lpsn;
0143 u32 r_next_psn;
0144
0145
0146 u32 ib_spsn;
0147 u32 ib_lpsn;
0148 };
0149
0150 struct tid_rdma_pageset {
0151 dma_addr_t addr : 48;
0152 u8 idx: 8;
0153 u8 count : 7;
0154 u8 mapped: 1;
0155 };
0156
0157
0158
0159
0160
0161
0162
0163
0164 struct kern_tid_node {
0165 struct tid_group *grp;
0166 u8 map;
0167 u8 cnt;
0168 };
0169
0170
0171 struct tid_rdma_flow {
0172
0173
0174
0175
0176
0177
0178 struct flow_state flow_state;
0179 struct tid_rdma_request *req;
0180 u32 tid_qpn;
0181 u32 tid_offset;
0182 u32 length;
0183 u32 sent;
0184 u8 tnode_cnt;
0185 u8 tidcnt;
0186 u8 tid_idx;
0187 u8 idx;
0188 u8 npagesets;
0189 u8 npkts;
0190 u8 pkt;
0191 u8 resync_npkts;
0192 struct kern_tid_node tnode[TID_RDMA_MAX_PAGES];
0193 struct tid_rdma_pageset pagesets[TID_RDMA_MAX_PAGES];
0194 u32 tid_entry[TID_RDMA_MAX_PAGES];
0195 };
0196
0197 enum tid_rnr_nak_state {
0198 TID_RNR_NAK_INIT = 0,
0199 TID_RNR_NAK_SEND,
0200 TID_RNR_NAK_SENT,
0201 };
0202
0203 bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
0204 bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
0205 bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
0206 void tid_rdma_conn_error(struct rvt_qp *qp);
0207 void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
0208
0209 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
0210 int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
0211 struct rvt_sge_state *ss, bool *last);
0212 int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req);
0213 void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req);
0214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
0215
0216
0217
0218
0219
0220
0221 static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
0222 {
0223 if (!wqe->priv)
0224 return;
0225 __trdma_clean_swqe(qp, wqe);
0226 }
0227
0228 void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp);
0229
0230 int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
0231 struct ib_qp_init_attr *init_attr);
0232 void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
0233
0234 void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp);
0235
0236 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
0237 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
0238 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
0239
0240 struct cntr_entry;
0241 u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
0242 void *context, int vl, int mode, u64 data);
0243
0244 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
0245 struct ib_other_headers *ohdr,
0246 u32 *bth1, u32 *bth2, u32 *len);
0247 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
0248 struct ib_other_headers *ohdr, u32 *bth1,
0249 u32 *bth2, u32 *len);
0250 void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet);
0251 u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
0252 struct ib_other_headers *ohdr, u32 *bth0,
0253 u32 *bth1, u32 *bth2, u32 *len, bool *last);
0254 void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet);
0255 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
0256 struct hfi1_pportdata *ppd,
0257 struct hfi1_packet *packet);
0258 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
0259 u32 *bth2);
0260 void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp);
0261 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe);
0262
0263 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
0264 static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
0265 struct rvt_swqe *wqe)
0266 {
0267 if (wqe->priv &&
0268 (wqe->wr.opcode == IB_WR_RDMA_READ ||
0269 wqe->wr.opcode == IB_WR_RDMA_WRITE) &&
0270 wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
0271 setup_tid_rdma_wqe(qp, wqe);
0272 }
0273
0274 u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
0275 struct ib_other_headers *ohdr,
0276 u32 *bth1, u32 *bth2, u32 *len);
0277
0278 void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
0279
0280 u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
0281 struct ib_other_headers *ohdr, u32 *bth1,
0282 u32 bth2, u32 *len,
0283 struct rvt_sge_state **ss);
0284
0285 void hfi1_del_tid_reap_timer(struct rvt_qp *qp);
0286
0287 void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet);
0288
0289 bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
0290 struct ib_other_headers *ohdr,
0291 u32 *bth1, u32 *bth2, u32 *len);
0292
0293 void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet);
0294
0295 u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
0296 struct ib_other_headers *ohdr, u16 iflow,
0297 u32 *bth1, u32 *bth2);
0298
0299 void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet);
0300
0301 void hfi1_add_tid_retry_timer(struct rvt_qp *qp);
0302 void hfi1_del_tid_retry_timer(struct rvt_qp *qp);
0303
0304 u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
0305 struct ib_other_headers *ohdr, u32 *bth1,
0306 u32 *bth2, u16 fidx);
0307
0308 void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet);
0309
0310 struct hfi1_pkt_state;
0311 int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
0312
0313 void _hfi1_do_tid_send(struct work_struct *work);
0314
0315 bool hfi1_schedule_tid_send(struct rvt_qp *qp);
0316
0317 bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e);
0318
0319 #endif