0001
0002
0003
0004
0005
0006 #include <linux/spinlock.h>
0007
0008 #include "hfi.h"
0009 #include "mad.h"
0010 #include "qp.h"
0011 #include "verbs_txreq.h"
0012 #include "trace.h"
0013
0014 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
0015 {
0016 return (gid->global.interface_id == id &&
0017 (gid->global.subnet_prefix == gid_prefix ||
0018 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
0019 }
0020
0021
0022
0023
0024
0025
0026
0027 int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
0028 {
0029 __be64 guid;
0030 unsigned long flags;
0031 struct rvt_qp *qp = packet->qp;
0032 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
0033 u32 dlid = packet->dlid;
0034 u32 slid = packet->slid;
0035 u32 sl = packet->sl;
0036 bool migrated = packet->migrated;
0037 u16 pkey = packet->pkey;
0038
0039 if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
0040 if (!packet->grh) {
0041 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
0042 IB_AH_GRH) &&
0043 (packet->etype != RHF_RCV_TYPE_BYPASS))
0044 return 1;
0045 } else {
0046 const struct ib_global_route *grh;
0047
0048 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
0049 IB_AH_GRH))
0050 return 1;
0051 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
0052 guid = get_sguid(ibp, grh->sgid_index);
0053 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
0054 guid))
0055 return 1;
0056 if (!gid_ok(
0057 &packet->grh->sgid,
0058 grh->dgid.global.subnet_prefix,
0059 grh->dgid.global.interface_id))
0060 return 1;
0061 }
0062 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
0063 sc5, slid))) {
0064 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
0065 slid, dlid);
0066 return 1;
0067 }
0068
0069 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
0070 ppd_from_ibp(ibp)->port !=
0071 rdma_ah_get_port_num(&qp->alt_ah_attr))
0072 return 1;
0073 spin_lock_irqsave(&qp->s_lock, flags);
0074 hfi1_migrate_qp(qp);
0075 spin_unlock_irqrestore(&qp->s_lock, flags);
0076 } else {
0077 if (!packet->grh) {
0078 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
0079 IB_AH_GRH) &&
0080 (packet->etype != RHF_RCV_TYPE_BYPASS))
0081 return 1;
0082 } else {
0083 const struct ib_global_route *grh;
0084
0085 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
0086 IB_AH_GRH))
0087 return 1;
0088 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
0089 guid = get_sguid(ibp, grh->sgid_index);
0090 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
0091 guid))
0092 return 1;
0093 if (!gid_ok(
0094 &packet->grh->sgid,
0095 grh->dgid.global.subnet_prefix,
0096 grh->dgid.global.interface_id))
0097 return 1;
0098 }
0099 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
0100 sc5, slid))) {
0101 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
0102 slid, dlid);
0103 return 1;
0104 }
0105
0106 if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
0107 ppd_from_ibp(ibp)->port != qp->port_num)
0108 return 1;
0109 if (qp->s_mig_state == IB_MIG_REARM && !migrated)
0110 qp->s_mig_state = IB_MIG_ARMED;
0111 }
0112
0113 return 0;
0114 }
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
0127 const struct ib_global_route *grh, u32 hwords, u32 nwords)
0128 {
0129 hdr->version_tclass_flow =
0130 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
0131 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
0132 (grh->flow_label << IB_GRH_FLOW_SHIFT));
0133 hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
0134
0135 hdr->next_hdr = IB_GRH_NEXT_HDR;
0136 hdr->hop_limit = grh->hop_limit;
0137
0138 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
0139 hdr->sgid.global.interface_id =
0140 grh->sgid_index < HFI1_GUIDS_PER_PORT ?
0141 get_sguid(ibp, grh->sgid_index) :
0142 get_sguid(ibp, HFI1_PORT_GUID_INDEX);
0143 hdr->dgid = grh->dgid;
0144
0145
0146 return sizeof(struct ib_grh) / sizeof(u32);
0147 }
0148
0149 #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
0150 hdr.ibh.u.oth.bth[2]) / 4)
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
0164 {
0165 struct hfi1_qp_priv *priv = qp->priv;
0166
0167 if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
0168 clear_ahg(qp);
0169 if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
0170
0171 if (qp->s_ahgidx < 0)
0172 qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
0173 if (qp->s_ahgidx >= 0) {
0174 qp->s_ahgpsn = npsn;
0175 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
0176
0177 priv->s_ahg->ahgidx = qp->s_ahgidx;
0178 qp->s_flags |= HFI1_S_AHG_VALID;
0179 }
0180 } else {
0181
0182 if (qp->s_ahgidx >= 0) {
0183 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
0184 priv->s_ahg->ahgidx = qp->s_ahgidx;
0185 priv->s_ahg->ahgcount++;
0186 priv->s_ahg->ahgdesc[0] =
0187 sdma_build_ahg_descriptor(
0188 (__force u16)cpu_to_be16((u16)npsn),
0189 BTH2_OFFSET,
0190 16,
0191 16);
0192 if ((npsn & 0xffff0000) !=
0193 (qp->s_ahgpsn & 0xffff0000)) {
0194 priv->s_ahg->ahgcount++;
0195 priv->s_ahg->ahgdesc[1] =
0196 sdma_build_ahg_descriptor(
0197 (__force u16)cpu_to_be16(
0198 (u16)(npsn >> 16)),
0199 BTH2_OFFSET,
0200 0,
0201 16);
0202 }
0203 }
0204 }
0205 }
0206
0207 static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
0208 struct ib_other_headers *ohdr,
0209 u32 bth0, u32 bth1, u32 bth2)
0210 {
0211 ohdr->bth[0] = cpu_to_be32(bth0);
0212 ohdr->bth[1] = cpu_to_be32(bth1);
0213 ohdr->bth[2] = cpu_to_be32(bth2);
0214 }
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
0232 struct ib_other_headers *ohdr,
0233 u32 bth0, u32 bth1, u32 bth2,
0234 int middle,
0235 struct hfi1_pkt_state *ps)
0236 {
0237 struct hfi1_qp_priv *priv = qp->priv;
0238 struct hfi1_ibport *ibp = ps->ibp;
0239 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
0240 u32 slid;
0241 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
0242 u8 l4 = OPA_16B_L4_IB_LOCAL;
0243 u8 extra_bytes = hfi1_get_16b_padding(
0244 (ps->s_txreq->hdr_dwords << 2),
0245 ps->s_txreq->s_cur_size);
0246 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
0247 extra_bytes + SIZE_OF_LT) >> 2);
0248 bool becn = false;
0249
0250 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
0251 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
0252 struct ib_grh *grh;
0253 struct ib_global_route *grd =
0254 rdma_ah_retrieve_grh(&qp->remote_ah_attr);
0255
0256
0257
0258
0259 if (grd->sgid_index == OPA_GID_INDEX)
0260 grd->sgid_index = 0;
0261 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
0262 l4 = OPA_16B_L4_IB_GLOBAL;
0263 ps->s_txreq->hdr_dwords +=
0264 hfi1_make_grh(ibp, grh, grd,
0265 ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
0266 nwords);
0267 middle = 0;
0268 }
0269
0270 if (qp->s_mig_state == IB_MIG_MIGRATED)
0271 bth1 |= OPA_BTH_MIG_REQ;
0272 else
0273 middle = 0;
0274
0275 if (qp->s_flags & RVT_S_ECN) {
0276 qp->s_flags &= ~RVT_S_ECN;
0277
0278 becn = true;
0279 middle = 0;
0280 }
0281 if (middle)
0282 build_ahg(qp, bth2);
0283 else
0284 qp->s_flags &= ~HFI1_S_AHG_VALID;
0285
0286 bth0 |= pkey;
0287 bth0 |= extra_bytes << 20;
0288 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
0289
0290 if (!ppd->lid)
0291 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
0292 else
0293 slid = ppd->lid |
0294 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
0295 ((1 << ppd->lmc) - 1));
0296
0297 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
0298 slid,
0299 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
0300 16B),
0301 (ps->s_txreq->hdr_dwords + nwords) >> 1,
0302 pkey, becn, 0, l4, priv->s_sc);
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
0321 struct ib_other_headers *ohdr,
0322 u32 bth0, u32 bth1, u32 bth2,
0323 int middle,
0324 struct hfi1_pkt_state *ps)
0325 {
0326 struct hfi1_qp_priv *priv = qp->priv;
0327 struct hfi1_ibport *ibp = ps->ibp;
0328 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
0329 u16 lrh0 = HFI1_LRH_BTH;
0330 u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
0331 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
0332 extra_bytes) >> 2);
0333
0334 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
0335 struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
0336
0337 lrh0 = HFI1_LRH_GRH;
0338 ps->s_txreq->hdr_dwords +=
0339 hfi1_make_grh(ibp, grh,
0340 rdma_ah_read_grh(&qp->remote_ah_attr),
0341 ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
0342 nwords);
0343 middle = 0;
0344 }
0345 lrh0 |= (priv->s_sc & 0xf) << 12 |
0346 (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
0347
0348 if (qp->s_mig_state == IB_MIG_MIGRATED)
0349 bth0 |= IB_BTH_MIG_REQ;
0350 else
0351 middle = 0;
0352
0353 if (qp->s_flags & RVT_S_ECN) {
0354 qp->s_flags &= ~RVT_S_ECN;
0355
0356 bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
0357 middle = 0;
0358 }
0359 if (middle)
0360 build_ahg(qp, bth2);
0361 else
0362 qp->s_flags &= ~HFI1_S_AHG_VALID;
0363
0364 bth0 |= pkey;
0365 bth0 |= extra_bytes << 20;
0366 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
0367 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
0368 lrh0,
0369 ps->s_txreq->hdr_dwords + nwords,
0370 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
0371 ppd_from_ibp(ibp)->lid |
0372 rdma_ah_get_path_bits(&qp->remote_ah_attr));
0373 }
0374
0375 typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
0376 struct ib_other_headers *ohdr,
0377 u32 bth0, u32 bth1, u32 bth2, int middle,
0378 struct hfi1_pkt_state *ps);
0379
0380
0381 static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
0382 [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
0383 [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
0384 };
0385
0386 void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
0387 u32 bth0, u32 bth1, u32 bth2, int middle,
0388 struct hfi1_pkt_state *ps)
0389 {
0390 struct hfi1_qp_priv *priv = qp->priv;
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403 priv->s_ahg->tx_flags = 0;
0404 priv->s_ahg->ahgcount = 0;
0405 priv->s_ahg->ahgidx = 0;
0406
0407
0408 hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle,
0409 ps);
0410 }
0411
0412
0413 #define SEND_RESCHED_TIMEOUT (5 * HZ)
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429 bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
0430 bool tid)
0431 {
0432 ps->pkts_sent = true;
0433
0434 if (unlikely(time_after(jiffies, ps->timeout))) {
0435 if (!ps->in_thread ||
0436 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
0437 spin_lock_irqsave(&qp->s_lock, ps->flags);
0438 if (!tid) {
0439 qp->s_flags &= ~RVT_S_BUSY;
0440 hfi1_schedule_send(qp);
0441 } else {
0442 struct hfi1_qp_priv *priv = qp->priv;
0443
0444 if (priv->s_flags &
0445 HFI1_S_TID_BUSY_SET) {
0446 qp->s_flags &= ~RVT_S_BUSY;
0447 priv->s_flags &=
0448 ~(HFI1_S_TID_BUSY_SET |
0449 RVT_S_BUSY);
0450 } else {
0451 priv->s_flags &= ~RVT_S_BUSY;
0452 }
0453 hfi1_schedule_tid_send(qp);
0454 }
0455
0456 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
0457 this_cpu_inc(*ps->ppd->dd->send_schedule);
0458 trace_hfi1_rc_expired_time_slice(qp, true);
0459 return true;
0460 }
0461
0462 cond_resched();
0463 this_cpu_inc(*ps->ppd->dd->send_schedule);
0464 ps->timeout = jiffies + ps->timeout_int;
0465 }
0466
0467 trace_hfi1_rc_expired_time_slice(qp, false);
0468 return false;
0469 }
0470
0471 void hfi1_do_send_from_rvt(struct rvt_qp *qp)
0472 {
0473 hfi1_do_send(qp, false);
0474 }
0475
0476 void _hfi1_do_send(struct work_struct *work)
0477 {
0478 struct iowait_work *w = container_of(work, struct iowait_work, iowork);
0479 struct rvt_qp *qp = iowait_to_qp(w->iow);
0480
0481 hfi1_do_send(qp, true);
0482 }
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
0494 {
0495 struct hfi1_pkt_state ps;
0496 struct hfi1_qp_priv *priv = qp->priv;
0497 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
0498
0499 ps.dev = to_idev(qp->ibqp.device);
0500 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
0501 ps.ppd = ppd_from_ibp(ps.ibp);
0502 ps.in_thread = in_thread;
0503 ps.wait = iowait_get_ib_work(&priv->s_iowait);
0504
0505 trace_hfi1_rc_do_send(qp, in_thread);
0506
0507 switch (qp->ibqp.qp_type) {
0508 case IB_QPT_RC:
0509 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
0510 ~((1 << ps.ppd->lmc) - 1)) ==
0511 ps.ppd->lid)) {
0512 rvt_ruc_loopback(qp);
0513 return;
0514 }
0515 make_req = hfi1_make_rc_req;
0516 ps.timeout_int = qp->timeout_jiffies;
0517 break;
0518 case IB_QPT_UC:
0519 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
0520 ~((1 << ps.ppd->lmc) - 1)) ==
0521 ps.ppd->lid)) {
0522 rvt_ruc_loopback(qp);
0523 return;
0524 }
0525 make_req = hfi1_make_uc_req;
0526 ps.timeout_int = SEND_RESCHED_TIMEOUT;
0527 break;
0528 default:
0529 make_req = hfi1_make_ud_req;
0530 ps.timeout_int = SEND_RESCHED_TIMEOUT;
0531 }
0532
0533 spin_lock_irqsave(&qp->s_lock, ps.flags);
0534
0535
0536 if (!hfi1_send_ok(qp)) {
0537 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
0538 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
0539 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
0540 return;
0541 }
0542
0543 qp->s_flags |= RVT_S_BUSY;
0544
0545 ps.timeout_int = ps.timeout_int / 8;
0546 ps.timeout = jiffies + ps.timeout_int;
0547 ps.cpu = priv->s_sde ? priv->s_sde->cpu :
0548 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
0549 ps.pkts_sent = false;
0550
0551
0552 ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
0553 do {
0554
0555 if (ps.s_txreq) {
0556 if (priv->s_flags & HFI1_S_TID_BUSY_SET)
0557 qp->s_flags |= RVT_S_BUSY;
0558 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
0559
0560
0561
0562
0563 if (hfi1_verbs_send(qp, &ps))
0564 return;
0565
0566
0567 if (hfi1_schedule_send_yield(qp, &ps, false))
0568 return;
0569
0570 spin_lock_irqsave(&qp->s_lock, ps.flags);
0571 }
0572 } while (make_req(qp, &ps));
0573 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
0574 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
0575 }