0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/spinlock.h>
0035 #include <rdma/ib_smi.h>
0036
0037 #include "qib.h"
0038 #include "qib_mad.h"
0039
0040
0041
0042
0043
0044 void qib_migrate_qp(struct rvt_qp *qp)
0045 {
0046 struct ib_event ev;
0047
0048 qp->s_mig_state = IB_MIG_MIGRATED;
0049 qp->remote_ah_attr = qp->alt_ah_attr;
0050 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
0051 qp->s_pkey_index = qp->s_alt_pkey_index;
0052
0053 ev.device = qp->ibqp.device;
0054 ev.element.qp = &qp->ibqp;
0055 ev.event = IB_EVENT_PATH_MIG;
0056 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
0057 }
0058
0059 static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
0060 {
0061 if (!index) {
0062 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
0063
0064 return ppd->guid;
0065 }
0066 return ibp->guids[index - 1];
0067 }
0068
0069 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
0070 {
0071 return (gid->global.interface_id == id &&
0072 (gid->global.subnet_prefix == gid_prefix ||
0073 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
0074 }
0075
0076
0077
0078
0079
0080
0081
0082 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
0083 int has_grh, struct rvt_qp *qp, u32 bth0)
0084 {
0085 __be64 guid;
0086 unsigned long flags;
0087
0088 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
0089 if (!has_grh) {
0090 if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
0091 IB_AH_GRH)
0092 goto err;
0093 } else {
0094 const struct ib_global_route *grh;
0095
0096 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
0097 IB_AH_GRH))
0098 goto err;
0099 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
0100 guid = get_sguid(ibp, grh->sgid_index);
0101 if (!gid_ok(&hdr->u.l.grh.dgid,
0102 ibp->rvp.gid_prefix, guid))
0103 goto err;
0104 if (!gid_ok(&hdr->u.l.grh.sgid,
0105 grh->dgid.global.subnet_prefix,
0106 grh->dgid.global.interface_id))
0107 goto err;
0108 }
0109 if (!qib_pkey_ok((u16)bth0,
0110 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
0111 qib_bad_pkey(ibp,
0112 (u16)bth0,
0113 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
0114 0, qp->ibqp.qp_num,
0115 hdr->lrh[3], hdr->lrh[1]);
0116 goto err;
0117 }
0118
0119 if ((be16_to_cpu(hdr->lrh[3]) !=
0120 rdma_ah_get_dlid(&qp->alt_ah_attr)) ||
0121 ppd_from_ibp(ibp)->port !=
0122 rdma_ah_get_port_num(&qp->alt_ah_attr))
0123 goto err;
0124 spin_lock_irqsave(&qp->s_lock, flags);
0125 qib_migrate_qp(qp);
0126 spin_unlock_irqrestore(&qp->s_lock, flags);
0127 } else {
0128 if (!has_grh) {
0129 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
0130 IB_AH_GRH)
0131 goto err;
0132 } else {
0133 const struct ib_global_route *grh;
0134
0135 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
0136 IB_AH_GRH))
0137 goto err;
0138 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
0139 guid = get_sguid(ibp, grh->sgid_index);
0140 if (!gid_ok(&hdr->u.l.grh.dgid,
0141 ibp->rvp.gid_prefix, guid))
0142 goto err;
0143 if (!gid_ok(&hdr->u.l.grh.sgid,
0144 grh->dgid.global.subnet_prefix,
0145 grh->dgid.global.interface_id))
0146 goto err;
0147 }
0148 if (!qib_pkey_ok((u16)bth0,
0149 qib_get_pkey(ibp, qp->s_pkey_index))) {
0150 qib_bad_pkey(ibp,
0151 (u16)bth0,
0152 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
0153 0, qp->ibqp.qp_num,
0154 hdr->lrh[3], hdr->lrh[1]);
0155 goto err;
0156 }
0157
0158 if (be16_to_cpu(hdr->lrh[3]) !=
0159 rdma_ah_get_dlid(&qp->remote_ah_attr) ||
0160 ppd_from_ibp(ibp)->port != qp->port_num)
0161 goto err;
0162 if (qp->s_mig_state == IB_MIG_REARM &&
0163 !(bth0 & IB_BTH_MIG_REQ))
0164 qp->s_mig_state = IB_MIG_ARMED;
0165 }
0166
0167 return 0;
0168
0169 err:
0170 return 1;
0171 }
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
0184 const struct ib_global_route *grh, u32 hwords, u32 nwords)
0185 {
0186 hdr->version_tclass_flow =
0187 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
0188 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
0189 (grh->flow_label << IB_GRH_FLOW_SHIFT));
0190 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
0191
0192 hdr->next_hdr = IB_GRH_NEXT_HDR;
0193 hdr->hop_limit = grh->hop_limit;
0194
0195 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
0196 if (!grh->sgid_index)
0197 hdr->sgid.global.interface_id = ppd_from_ibp(ibp)->guid;
0198 else if (grh->sgid_index < QIB_GUIDS_PER_PORT)
0199 hdr->sgid.global.interface_id = ibp->guids[grh->sgid_index - 1];
0200 hdr->dgid = grh->dgid;
0201
0202
0203 return sizeof(struct ib_grh) / sizeof(u32);
0204 }
0205
0206 void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
0207 u32 bth0, u32 bth2)
0208 {
0209 struct qib_qp_priv *priv = qp->priv;
0210 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
0211 u16 lrh0;
0212 u32 nwords;
0213 u32 extra_bytes;
0214
0215
0216 extra_bytes = -qp->s_cur_size & 3;
0217 nwords = (qp->s_cur_size + extra_bytes) >> 2;
0218 lrh0 = QIB_LRH_BTH;
0219 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
0220 qp->s_hdrwords +=
0221 qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
0222 rdma_ah_read_grh(&qp->remote_ah_attr),
0223 qp->s_hdrwords, nwords);
0224 lrh0 = QIB_LRH_GRH;
0225 }
0226 lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
0227 rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
0228 priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
0229 priv->s_hdr->lrh[1] =
0230 cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
0231 priv->s_hdr->lrh[2] =
0232 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
0233 priv->s_hdr->lrh[3] =
0234 cpu_to_be16(ppd_from_ibp(ibp)->lid |
0235 rdma_ah_get_path_bits(&qp->remote_ah_attr));
0236 bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
0237 bth0 |= extra_bytes << 20;
0238 if (qp->s_mig_state == IB_MIG_MIGRATED)
0239 bth0 |= IB_BTH_MIG_REQ;
0240 ohdr->bth[0] = cpu_to_be32(bth0);
0241 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
0242 ohdr->bth[2] = cpu_to_be32(bth2);
0243 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
0244 }
0245
0246 void _qib_do_send(struct work_struct *work)
0247 {
0248 struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
0249 s_work);
0250 struct rvt_qp *qp = priv->owner;
0251
0252 qib_do_send(qp);
0253 }
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 void qib_do_send(struct rvt_qp *qp)
0264 {
0265 struct qib_qp_priv *priv = qp->priv;
0266 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
0267 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
0268 int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
0269 unsigned long flags;
0270
0271 if ((qp->ibqp.qp_type == IB_QPT_RC ||
0272 qp->ibqp.qp_type == IB_QPT_UC) &&
0273 (rdma_ah_get_dlid(&qp->remote_ah_attr) &
0274 ~((1 << ppd->lmc) - 1)) == ppd->lid) {
0275 rvt_ruc_loopback(qp);
0276 return;
0277 }
0278
0279 if (qp->ibqp.qp_type == IB_QPT_RC)
0280 make_req = qib_make_rc_req;
0281 else if (qp->ibqp.qp_type == IB_QPT_UC)
0282 make_req = qib_make_uc_req;
0283 else
0284 make_req = qib_make_ud_req;
0285
0286 spin_lock_irqsave(&qp->s_lock, flags);
0287
0288
0289 if (!qib_send_ok(qp)) {
0290 spin_unlock_irqrestore(&qp->s_lock, flags);
0291 return;
0292 }
0293
0294 qp->s_flags |= RVT_S_BUSY;
0295
0296 do {
0297
0298 if (qp->s_hdrwords != 0) {
0299 spin_unlock_irqrestore(&qp->s_lock, flags);
0300
0301
0302
0303
0304 if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
0305 qp->s_cur_sge, qp->s_cur_size))
0306 return;
0307
0308 qp->s_hdrwords = 0;
0309 spin_lock_irqsave(&qp->s_lock, flags);
0310 }
0311 } while (make_req(qp, &flags));
0312
0313 spin_unlock_irqrestore(&qp->s_lock, flags);
0314 }