0001
0002
0003
0004
0005
0006 #include "hfi.h"
0007 #include "trace.h"
0008 #include "qp.h"
0009 #include "opfn.h"
0010
0011 #define IB_BTHE_E BIT(IB_BTHE_E_SHIFT)
0012
0013 #define OPFN_CODE(code) BIT((code) - 1)
0014 #define OPFN_MASK(code) OPFN_CODE(STL_VERBS_EXTD_##code)
0015
0016 struct hfi1_opfn_type {
0017 bool (*request)(struct rvt_qp *qp, u64 *data);
0018 bool (*response)(struct rvt_qp *qp, u64 *data);
0019 bool (*reply)(struct rvt_qp *qp, u64 data);
0020 void (*error)(struct rvt_qp *qp);
0021 };
0022
0023 static struct hfi1_opfn_type hfi1_opfn_handlers[STL_VERBS_EXTD_MAX] = {
0024 [STL_VERBS_EXTD_TID_RDMA] = {
0025 .request = tid_rdma_conn_req,
0026 .response = tid_rdma_conn_resp,
0027 .reply = tid_rdma_conn_reply,
0028 .error = tid_rdma_conn_error,
0029 },
0030 };
0031
0032 static struct workqueue_struct *opfn_wq;
0033
0034 static void opfn_schedule_conn_request(struct rvt_qp *qp);
0035
0036 static bool hfi1_opfn_extended(u32 bth1)
0037 {
0038 return !!(bth1 & IB_BTHE_E);
0039 }
0040
0041 static void opfn_conn_request(struct rvt_qp *qp)
0042 {
0043 struct hfi1_qp_priv *priv = qp->priv;
0044 struct ib_atomic_wr wr;
0045 u16 mask, capcode;
0046 struct hfi1_opfn_type *extd;
0047 u64 data;
0048 unsigned long flags;
0049 int ret = 0;
0050
0051 trace_hfi1_opfn_state_conn_request(qp);
0052 spin_lock_irqsave(&priv->opfn.lock, flags);
0053
0054
0055
0056
0057
0058 if (!priv->opfn.extended || !priv->opfn.requested ||
0059 priv->opfn.requested == priv->opfn.completed || priv->opfn.curr)
0060 goto done;
0061
0062 mask = priv->opfn.requested & ~priv->opfn.completed;
0063 capcode = ilog2(mask & ~(mask - 1)) + 1;
0064 if (capcode >= STL_VERBS_EXTD_MAX) {
0065 priv->opfn.completed |= OPFN_CODE(capcode);
0066 goto done;
0067 }
0068
0069 extd = &hfi1_opfn_handlers[capcode];
0070 if (!extd || !extd->request || !extd->request(qp, &data)) {
0071
0072
0073
0074
0075
0076 priv->opfn.completed |= OPFN_CODE(capcode);
0077 goto done;
0078 }
0079
0080 trace_hfi1_opfn_data_conn_request(qp, capcode, data);
0081 data = (data & ~0xf) | capcode;
0082
0083 memset(&wr, 0, sizeof(wr));
0084 wr.wr.opcode = IB_WR_OPFN;
0085 wr.remote_addr = HFI1_VERBS_E_ATOMIC_VADDR;
0086 wr.compare_add = data;
0087
0088 priv->opfn.curr = capcode;
0089
0090 spin_unlock_irqrestore(&priv->opfn.lock, flags);
0091
0092 ret = ib_post_send(&qp->ibqp, &wr.wr, NULL);
0093 if (ret)
0094 goto err;
0095 trace_hfi1_opfn_state_conn_request(qp);
0096 return;
0097 err:
0098 trace_hfi1_msg_opfn_conn_request(qp, "ib_ost_send failed: ret = ",
0099 (u64)ret);
0100 spin_lock_irqsave(&priv->opfn.lock, flags);
0101
0102
0103
0104
0105 priv->opfn.curr = STL_VERBS_EXTD_NONE;
0106 opfn_schedule_conn_request(qp);
0107 done:
0108 spin_unlock_irqrestore(&priv->opfn.lock, flags);
0109 }
0110
0111 void opfn_send_conn_request(struct work_struct *work)
0112 {
0113 struct hfi1_opfn_data *od;
0114 struct hfi1_qp_priv *qpriv;
0115
0116 od = container_of(work, struct hfi1_opfn_data, opfn_work);
0117 qpriv = container_of(od, struct hfi1_qp_priv, opfn);
0118
0119 opfn_conn_request(qpriv->owner);
0120 }
0121
0122
0123
0124
0125
0126
0127 static void opfn_schedule_conn_request(struct rvt_qp *qp)
0128 {
0129 struct hfi1_qp_priv *priv = qp->priv;
0130
0131 trace_hfi1_opfn_state_sched_conn_request(qp);
0132 queue_work(opfn_wq, &priv->opfn.opfn_work);
0133 }
0134
0135 void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
0136 struct ib_atomic_eth *ateth)
0137 {
0138 struct hfi1_qp_priv *priv = qp->priv;
0139 u64 data = be64_to_cpu(ateth->compare_data);
0140 struct hfi1_opfn_type *extd;
0141 u8 capcode;
0142 unsigned long flags;
0143
0144 trace_hfi1_opfn_state_conn_response(qp);
0145 capcode = data & 0xf;
0146 trace_hfi1_opfn_data_conn_response(qp, capcode, data);
0147 if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
0148 return;
0149
0150 extd = &hfi1_opfn_handlers[capcode];
0151
0152 if (!extd || !extd->response) {
0153 e->atomic_data = capcode;
0154 return;
0155 }
0156
0157 spin_lock_irqsave(&priv->opfn.lock, flags);
0158 if (priv->opfn.completed & OPFN_CODE(capcode)) {
0159
0160
0161
0162
0163 priv->opfn.completed &= ~OPFN_CODE(capcode);
0164 if (extd->error)
0165 extd->error(qp);
0166 }
0167
0168 if (extd->response(qp, &data))
0169 priv->opfn.completed |= OPFN_CODE(capcode);
0170 e->atomic_data = (data & ~0xf) | capcode;
0171 trace_hfi1_opfn_state_conn_response(qp);
0172 spin_unlock_irqrestore(&priv->opfn.lock, flags);
0173 }
0174
0175 void opfn_conn_reply(struct rvt_qp *qp, u64 data)
0176 {
0177 struct hfi1_qp_priv *priv = qp->priv;
0178 struct hfi1_opfn_type *extd;
0179 u8 capcode;
0180 unsigned long flags;
0181
0182 trace_hfi1_opfn_state_conn_reply(qp);
0183 capcode = data & 0xf;
0184 trace_hfi1_opfn_data_conn_reply(qp, capcode, data);
0185 if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
0186 return;
0187
0188 spin_lock_irqsave(&priv->opfn.lock, flags);
0189
0190
0191
0192
0193 if (!priv->opfn.curr || capcode != priv->opfn.curr)
0194 goto done;
0195
0196 extd = &hfi1_opfn_handlers[capcode];
0197
0198 if (!extd || !extd->reply)
0199 goto clear;
0200
0201 if (extd->reply(qp, data))
0202 priv->opfn.completed |= OPFN_CODE(capcode);
0203 clear:
0204
0205
0206
0207
0208 priv->opfn.curr = STL_VERBS_EXTD_NONE;
0209 trace_hfi1_opfn_state_conn_reply(qp);
0210 done:
0211 spin_unlock_irqrestore(&priv->opfn.lock, flags);
0212 }
0213
0214 void opfn_conn_error(struct rvt_qp *qp)
0215 {
0216 struct hfi1_qp_priv *priv = qp->priv;
0217 struct hfi1_opfn_type *extd = NULL;
0218 unsigned long flags;
0219 u16 capcode;
0220
0221 trace_hfi1_opfn_state_conn_error(qp);
0222 trace_hfi1_msg_opfn_conn_error(qp, "error. qp state ", (u64)qp->state);
0223
0224
0225
0226
0227
0228 spin_lock_irqsave(&priv->opfn.lock, flags);
0229 while (priv->opfn.completed) {
0230 capcode = priv->opfn.completed & ~(priv->opfn.completed - 1);
0231 extd = &hfi1_opfn_handlers[ilog2(capcode) + 1];
0232 if (extd->error)
0233 extd->error(qp);
0234 priv->opfn.completed &= ~OPFN_CODE(capcode);
0235 }
0236 priv->opfn.extended = 0;
0237 priv->opfn.requested = 0;
0238 priv->opfn.curr = STL_VERBS_EXTD_NONE;
0239 spin_unlock_irqrestore(&priv->opfn.lock, flags);
0240 }
0241
0242 void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask)
0243 {
0244 struct ib_qp *ibqp = &qp->ibqp;
0245 struct hfi1_qp_priv *priv = qp->priv;
0246 unsigned long flags;
0247
0248 if (attr_mask & IB_QP_RETRY_CNT)
0249 priv->s_retry = attr->retry_cnt;
0250
0251 spin_lock_irqsave(&priv->opfn.lock, flags);
0252 if (ibqp->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
0253 struct tid_rdma_params *local = &priv->tid_rdma.local;
0254
0255 if (attr_mask & IB_QP_TIMEOUT)
0256 priv->tid_retry_timeout_jiffies = qp->timeout_jiffies;
0257 if (qp->pmtu == enum_to_mtu(OPA_MTU_4096) ||
0258 qp->pmtu == enum_to_mtu(OPA_MTU_8192)) {
0259 tid_rdma_opfn_init(qp, local);
0260
0261
0262
0263
0264 if (attr_mask & IB_QP_STATE &&
0265 attr->qp_state == IB_QPS_RTS) {
0266 priv->opfn.requested |= OPFN_MASK(TID_RDMA);
0267
0268
0269
0270
0271
0272
0273
0274 if (priv->opfn.completed &
0275 OPFN_MASK(TID_RDMA)) {
0276 priv->opfn.completed &=
0277 ~OPFN_MASK(TID_RDMA);
0278
0279
0280
0281
0282
0283 opfn_schedule_conn_request(qp);
0284 }
0285 }
0286 } else {
0287 memset(local, 0, sizeof(*local));
0288 }
0289 }
0290 spin_unlock_irqrestore(&priv->opfn.lock, flags);
0291 }
0292
0293 void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1)
0294 {
0295 struct hfi1_qp_priv *priv = qp->priv;
0296
0297 if (!priv->opfn.extended && hfi1_opfn_extended(bth1) &&
0298 HFI1_CAP_IS_KSET(OPFN)) {
0299 priv->opfn.extended = 1;
0300 if (qp->state == IB_QPS_RTS)
0301 opfn_conn_request(qp);
0302 }
0303 }
0304
0305 int opfn_init(void)
0306 {
0307 opfn_wq = alloc_workqueue("hfi_opfn",
0308 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
0309 WQ_MEM_RECLAIM,
0310 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES);
0311 if (!opfn_wq)
0312 return -ENOMEM;
0313
0314 return 0;
0315 }
0316
0317 void opfn_exit(void)
0318 {
0319 if (opfn_wq) {
0320 destroy_workqueue(opfn_wq);
0321 opfn_wq = NULL;
0322 }
0323 }