Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2012 - 2019 Intel Corporation.  All rights reserved.
0003  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
0004  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
0005  *
0006  * This software is available to you under a choice of one of two
0007  * licenses.  You may choose to be licensed under the terms of the GNU
0008  * General Public License (GPL) Version 2, available from the file
0009  * COPYING in the main directory of this source tree, or the
0010  * OpenIB.org BSD license below:
0011  *
0012  *     Redistribution and use in source and binary forms, with or
0013  *     without modification, are permitted provided that the following
0014  *     conditions are met:
0015  *
0016  *      - Redistributions of source code must retain the above
0017  *        copyright notice, this list of conditions and the following
0018  *        disclaimer.
0019  *
0020  *      - Redistributions in binary form must reproduce the above
0021  *        copyright notice, this list of conditions and the following
0022  *        disclaimer in the documentation and/or other materials
0023  *        provided with the distribution.
0024  *
0025  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0026  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0027  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0028  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0029  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0030  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0031  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0032  * SOFTWARE.
0033  */
0034 
0035 #include <rdma/ib_smi.h>
0036 #include <rdma/ib_verbs.h>
0037 
0038 #include "qib.h"
0039 #include "qib_mad.h"
0040 
0041 /**
0042  * qib_ud_loopback - handle send on loopback QPs
0043  * @sqp: the sending QP
0044  * @swqe: the send work request
0045  *
0046  * This is called from qib_make_ud_req() to forward a WQE addressed
0047  * to the same HCA.
0048  * Note that the receive interrupt handler may be calling qib_ud_rcv()
0049  * while this is being called.
0050  */
0051 static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
0052 {
0053     struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
0054     struct qib_pportdata *ppd = ppd_from_ibp(ibp);
0055     struct qib_devdata *dd = ppd->dd;
0056     struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
0057     struct rvt_qp *qp;
0058     struct rdma_ah_attr *ah_attr;
0059     unsigned long flags;
0060     struct rvt_sge_state ssge;
0061     struct rvt_sge *sge;
0062     struct ib_wc wc;
0063     u32 length;
0064     enum ib_qp_type sqptype, dqptype;
0065 
0066     rcu_read_lock();
0067     qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe));
0068     if (!qp) {
0069         ibp->rvp.n_pkt_drops++;
0070         goto drop;
0071     }
0072 
0073     sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
0074             IB_QPT_UD : sqp->ibqp.qp_type;
0075     dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
0076             IB_QPT_UD : qp->ibqp.qp_type;
0077 
0078     if (dqptype != sqptype ||
0079         !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
0080         ibp->rvp.n_pkt_drops++;
0081         goto drop;
0082     }
0083 
0084     ah_attr = rvt_get_swqe_ah_attr(swqe);
0085     ppd = ppd_from_ibp(ibp);
0086 
0087     if (qp->ibqp.qp_num > 1) {
0088         u16 pkey1;
0089         u16 pkey2;
0090         u16 lid;
0091 
0092         pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
0093         pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
0094         if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
0095             lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
0096                       ((1 << ppd->lmc) - 1));
0097             qib_bad_pkey(ibp, pkey1,
0098                      rdma_ah_get_sl(ah_attr),
0099                      sqp->ibqp.qp_num, qp->ibqp.qp_num,
0100                      cpu_to_be16(lid),
0101                      cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
0102             goto drop;
0103         }
0104     }
0105 
0106     /*
0107      * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
0108      * Qkeys with the high order bit set mean use the
0109      * qkey from the QP context instead of the WR (see 10.2.5).
0110      */
0111     if (qp->ibqp.qp_num) {
0112         u32 qkey;
0113 
0114         qkey = (int)rvt_get_swqe_remote_qkey(swqe) < 0 ?
0115             sqp->qkey : rvt_get_swqe_remote_qkey(swqe);
0116         if (unlikely(qkey != qp->qkey))
0117             goto drop;
0118     }
0119 
0120     /*
0121      * A GRH is expected to precede the data even if not
0122      * present on the wire.
0123      */
0124     length = swqe->length;
0125     memset(&wc, 0, sizeof(wc));
0126     wc.byte_len = length + sizeof(struct ib_grh);
0127 
0128     if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
0129         wc.wc_flags = IB_WC_WITH_IMM;
0130         wc.ex.imm_data = swqe->wr.ex.imm_data;
0131     }
0132 
0133     spin_lock_irqsave(&qp->r_lock, flags);
0134 
0135     /*
0136      * Get the next work request entry to find where to put the data.
0137      */
0138     if (qp->r_flags & RVT_R_REUSE_SGE)
0139         qp->r_flags &= ~RVT_R_REUSE_SGE;
0140     else {
0141         int ret;
0142 
0143         ret = rvt_get_rwqe(qp, false);
0144         if (ret < 0) {
0145             rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
0146             goto bail_unlock;
0147         }
0148         if (!ret) {
0149             if (qp->ibqp.qp_num == 0)
0150                 ibp->rvp.n_vl15_dropped++;
0151             goto bail_unlock;
0152         }
0153     }
0154     /* Silently drop packets which are too big. */
0155     if (unlikely(wc.byte_len > qp->r_len)) {
0156         qp->r_flags |= RVT_R_REUSE_SGE;
0157         ibp->rvp.n_pkt_drops++;
0158         goto bail_unlock;
0159     }
0160 
0161     if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
0162         struct ib_grh grh;
0163         const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
0164 
0165         qib_make_grh(ibp, &grh, grd, 0, 0);
0166         rvt_copy_sge(qp, &qp->r_sge, &grh,
0167                  sizeof(grh), true, false);
0168         wc.wc_flags |= IB_WC_GRH;
0169     } else
0170         rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
0171     ssge.sg_list = swqe->sg_list + 1;
0172     ssge.sge = *swqe->sg_list;
0173     ssge.num_sge = swqe->wr.num_sge;
0174     sge = &ssge.sge;
0175     while (length) {
0176         u32 len = rvt_get_sge_length(sge, length);
0177 
0178         rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
0179         sge->vaddr += len;
0180         sge->length -= len;
0181         sge->sge_length -= len;
0182         if (sge->sge_length == 0) {
0183             if (--ssge.num_sge)
0184                 *sge = *ssge.sg_list++;
0185         } else if (sge->length == 0 && sge->mr->lkey) {
0186             if (++sge->n >= RVT_SEGSZ) {
0187                 if (++sge->m >= sge->mr->mapsz)
0188                     break;
0189                 sge->n = 0;
0190             }
0191             sge->vaddr =
0192                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
0193             sge->length =
0194                 sge->mr->map[sge->m]->segs[sge->n].length;
0195         }
0196         length -= len;
0197     }
0198     rvt_put_ss(&qp->r_sge);
0199     if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
0200         goto bail_unlock;
0201     wc.wr_id = qp->r_wr_id;
0202     wc.status = IB_WC_SUCCESS;
0203     wc.opcode = IB_WC_RECV;
0204     wc.qp = &qp->ibqp;
0205     wc.src_qp = sqp->ibqp.qp_num;
0206     wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
0207         rvt_get_swqe_pkey_index(swqe) : 0;
0208     wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
0209                 ((1 << ppd->lmc) - 1));
0210     wc.sl = rdma_ah_get_sl(ah_attr);
0211     wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
0212     wc.port_num = qp->port_num;
0213     /* Signal completion event if the solicited bit is set. */
0214     rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
0215     ibp->rvp.n_loop_pkts++;
0216 bail_unlock:
0217     spin_unlock_irqrestore(&qp->r_lock, flags);
0218 drop:
0219     rcu_read_unlock();
0220 }
0221 
0222 /**
0223  * qib_make_ud_req - construct a UD request packet
0224  * @qp: the QP
0225  * @flags: flags to modify and pass back to caller
0226  *
0227  * Assumes the s_lock is held.
0228  *
0229  * Return 1 if constructed; otherwise, return 0.
0230  */
0231 int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
0232 {
0233     struct qib_qp_priv *priv = qp->priv;
0234     struct ib_other_headers *ohdr;
0235     struct rdma_ah_attr *ah_attr;
0236     struct qib_pportdata *ppd;
0237     struct qib_ibport *ibp;
0238     struct rvt_swqe *wqe;
0239     u32 nwords;
0240     u32 extra_bytes;
0241     u32 bth0;
0242     u16 lrh0;
0243     u16 lid;
0244     int ret = 0;
0245     int next_cur;
0246 
0247     if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
0248         if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
0249             goto bail;
0250         /* We are in the error state, flush the work request. */
0251         if (qp->s_last == READ_ONCE(qp->s_head))
0252             goto bail;
0253         /* If DMAs are in progress, we can't flush immediately. */
0254         if (atomic_read(&priv->s_dma_busy)) {
0255             qp->s_flags |= RVT_S_WAIT_DMA;
0256             goto bail;
0257         }
0258         wqe = rvt_get_swqe_ptr(qp, qp->s_last);
0259         rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
0260         goto done;
0261     }
0262 
0263     /* see post_one_send() */
0264     if (qp->s_cur == READ_ONCE(qp->s_head))
0265         goto bail;
0266 
0267     wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
0268     next_cur = qp->s_cur + 1;
0269     if (next_cur >= qp->s_size)
0270         next_cur = 0;
0271 
0272     /* Construct the header. */
0273     ibp = to_iport(qp->ibqp.device, qp->port_num);
0274     ppd = ppd_from_ibp(ibp);
0275     ah_attr = rvt_get_swqe_ah_attr(wqe);
0276     if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
0277         if (rdma_ah_get_dlid(ah_attr) !=
0278                 be16_to_cpu(IB_LID_PERMISSIVE))
0279             this_cpu_inc(ibp->pmastats->n_multicast_xmit);
0280         else
0281             this_cpu_inc(ibp->pmastats->n_unicast_xmit);
0282     } else {
0283         this_cpu_inc(ibp->pmastats->n_unicast_xmit);
0284         lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
0285         if (unlikely(lid == ppd->lid)) {
0286             unsigned long tflags = *flags;
0287             /*
0288              * If DMAs are in progress, we can't generate
0289              * a completion for the loopback packet since
0290              * it would be out of order.
0291              * XXX Instead of waiting, we could queue a
0292              * zero length descriptor so we get a callback.
0293              */
0294             if (atomic_read(&priv->s_dma_busy)) {
0295                 qp->s_flags |= RVT_S_WAIT_DMA;
0296                 goto bail;
0297             }
0298             qp->s_cur = next_cur;
0299             spin_unlock_irqrestore(&qp->s_lock, tflags);
0300             qib_ud_loopback(qp, wqe);
0301             spin_lock_irqsave(&qp->s_lock, tflags);
0302             *flags = tflags;
0303             rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
0304             goto done;
0305         }
0306     }
0307 
0308     qp->s_cur = next_cur;
0309     extra_bytes = -wqe->length & 3;
0310     nwords = (wqe->length + extra_bytes) >> 2;
0311 
0312     /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
0313     qp->s_hdrwords = 7;
0314     qp->s_cur_size = wqe->length;
0315     qp->s_cur_sge = &qp->s_sge;
0316     qp->s_srate = rdma_ah_get_static_rate(ah_attr);
0317     qp->s_wqe = wqe;
0318     qp->s_sge.sge = wqe->sg_list[0];
0319     qp->s_sge.sg_list = wqe->sg_list + 1;
0320     qp->s_sge.num_sge = wqe->wr.num_sge;
0321     qp->s_sge.total_len = wqe->length;
0322 
0323     if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
0324         /* Header size in 32-bit words. */
0325         qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
0326                            rdma_ah_read_grh(ah_attr),
0327                            qp->s_hdrwords, nwords);
0328         lrh0 = QIB_LRH_GRH;
0329         ohdr = &priv->s_hdr->u.l.oth;
0330         /*
0331          * Don't worry about sending to locally attached multicast
0332          * QPs.  It is unspecified by the spec. what happens.
0333          */
0334     } else {
0335         /* Header size in 32-bit words. */
0336         lrh0 = QIB_LRH_BTH;
0337         ohdr = &priv->s_hdr->u.oth;
0338     }
0339     if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
0340         qp->s_hdrwords++;
0341         ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
0342         bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
0343     } else
0344         bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
0345     lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
0346     if (qp->ibqp.qp_type == IB_QPT_SMI)
0347         lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
0348     else
0349         lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
0350     priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
0351     priv->s_hdr->lrh[1] =
0352             cpu_to_be16(rdma_ah_get_dlid(ah_attr));  /* DEST LID */
0353     priv->s_hdr->lrh[2] =
0354             cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
0355     lid = ppd->lid;
0356     if (lid) {
0357         lid |= rdma_ah_get_path_bits(ah_attr) &
0358             ((1 << ppd->lmc) - 1);
0359         priv->s_hdr->lrh[3] = cpu_to_be16(lid);
0360     } else
0361         priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
0362     if (wqe->wr.send_flags & IB_SEND_SOLICITED)
0363         bth0 |= IB_BTH_SOLICITED;
0364     bth0 |= extra_bytes << 20;
0365     bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
0366         qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
0367                  rvt_get_swqe_pkey_index(wqe) : qp->s_pkey_index);
0368     ohdr->bth[0] = cpu_to_be32(bth0);
0369     /*
0370      * Use the multicast QP if the destination LID is a multicast LID.
0371      */
0372     ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
0373             be16_to_cpu(IB_MULTICAST_LID_BASE) &&
0374         rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
0375         cpu_to_be32(QIB_MULTICAST_QPN) :
0376         cpu_to_be32(rvt_get_swqe_remote_qpn(wqe));
0377     ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
0378     /*
0379      * Qkeys with the high order bit set mean use the
0380      * qkey from the QP context instead of the WR (see 10.2.5).
0381      */
0382     ohdr->u.ud.deth[0] =
0383         cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey :
0384                 rvt_get_swqe_remote_qkey(wqe));
0385     ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
0386 
0387 done:
0388     return 1;
0389 bail:
0390     qp->s_flags &= ~RVT_S_BUSY;
0391     return ret;
0392 }
0393 
0394 static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
0395 {
0396     struct qib_pportdata *ppd = ppd_from_ibp(ibp);
0397     struct qib_devdata *dd = ppd->dd;
0398     unsigned ctxt = ppd->hw_pidx;
0399     unsigned i;
0400 
0401     pkey &= 0x7fff; /* remove limited/full membership bit */
0402 
0403     for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
0404         if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
0405             return i;
0406 
0407     /*
0408      * Should not get here, this means hardware failed to validate pkeys.
0409      * Punt and return index 0.
0410      */
0411     return 0;
0412 }
0413 
0414 /**
0415  * qib_ud_rcv - receive an incoming UD packet
0416  * @ibp: the port the packet came in on
0417  * @hdr: the packet header
0418  * @has_grh: true if the packet has a GRH
0419  * @data: the packet data
0420  * @tlen: the packet length
0421  * @qp: the QP the packet came on
0422  *
0423  * This is called from qib_qp_rcv() to process an incoming UD packet
0424  * for the given QP.
0425  * Called at interrupt level.
0426  */
0427 void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
0428         int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
0429 {
0430     struct ib_other_headers *ohdr;
0431     int opcode;
0432     u32 hdrsize;
0433     u32 pad;
0434     struct ib_wc wc;
0435     u32 qkey;
0436     u32 src_qp;
0437     u16 dlid;
0438 
0439     /* Check for GRH */
0440     if (!has_grh) {
0441         ohdr = &hdr->u.oth;
0442         hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
0443     } else {
0444         ohdr = &hdr->u.l.oth;
0445         hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
0446     }
0447     qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
0448     src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
0449 
0450     /*
0451      * Get the number of bytes the message was padded by
0452      * and drop incomplete packets.
0453      */
0454     pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
0455     if (unlikely(tlen < (hdrsize + pad + 4)))
0456         goto drop;
0457 
0458     tlen -= hdrsize + pad + 4;
0459 
0460     /*
0461      * Check that the permissive LID is only used on QP0
0462      * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
0463      */
0464     if (qp->ibqp.qp_num) {
0465         if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
0466                  hdr->lrh[3] == IB_LID_PERMISSIVE))
0467             goto drop;
0468         if (qp->ibqp.qp_num > 1) {
0469             u16 pkey1, pkey2;
0470 
0471             pkey1 = be32_to_cpu(ohdr->bth[0]);
0472             pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
0473             if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
0474                 qib_bad_pkey(ibp,
0475                          pkey1,
0476                          (be16_to_cpu(hdr->lrh[0]) >> 4) &
0477                         0xF,
0478                          src_qp, qp->ibqp.qp_num,
0479                          hdr->lrh[3], hdr->lrh[1]);
0480                 return;
0481             }
0482         }
0483         if (unlikely(qkey != qp->qkey))
0484             return;
0485 
0486         /* Drop invalid MAD packets (see 13.5.3.1). */
0487         if (unlikely(qp->ibqp.qp_num == 1 &&
0488                  (tlen != 256 ||
0489                   (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
0490             goto drop;
0491     } else {
0492         struct ib_smp *smp;
0493 
0494         /* Drop invalid MAD packets (see 13.5.3.1). */
0495         if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
0496             goto drop;
0497         smp = (struct ib_smp *) data;
0498         if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
0499              hdr->lrh[3] == IB_LID_PERMISSIVE) &&
0500             smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
0501             goto drop;
0502     }
0503 
0504     /*
0505      * The opcode is in the low byte when its in network order
0506      * (top byte when in host order).
0507      */
0508     opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
0509     if (qp->ibqp.qp_num > 1 &&
0510         opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
0511         wc.ex.imm_data = ohdr->u.ud.imm_data;
0512         wc.wc_flags = IB_WC_WITH_IMM;
0513     } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
0514         wc.ex.imm_data = 0;
0515         wc.wc_flags = 0;
0516     } else
0517         goto drop;
0518 
0519     /*
0520      * A GRH is expected to precede the data even if not
0521      * present on the wire.
0522      */
0523     wc.byte_len = tlen + sizeof(struct ib_grh);
0524 
0525     /*
0526      * Get the next work request entry to find where to put the data.
0527      */
0528     if (qp->r_flags & RVT_R_REUSE_SGE)
0529         qp->r_flags &= ~RVT_R_REUSE_SGE;
0530     else {
0531         int ret;
0532 
0533         ret = rvt_get_rwqe(qp, false);
0534         if (ret < 0) {
0535             rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
0536             return;
0537         }
0538         if (!ret) {
0539             if (qp->ibqp.qp_num == 0)
0540                 ibp->rvp.n_vl15_dropped++;
0541             return;
0542         }
0543     }
0544     /* Silently drop packets which are too big. */
0545     if (unlikely(wc.byte_len > qp->r_len)) {
0546         qp->r_flags |= RVT_R_REUSE_SGE;
0547         goto drop;
0548     }
0549     if (has_grh) {
0550         rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
0551                  sizeof(struct ib_grh), true, false);
0552         wc.wc_flags |= IB_WC_GRH;
0553     } else
0554         rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
0555     rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
0556              true, false);
0557     rvt_put_ss(&qp->r_sge);
0558     if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
0559         return;
0560     wc.wr_id = qp->r_wr_id;
0561     wc.status = IB_WC_SUCCESS;
0562     wc.opcode = IB_WC_RECV;
0563     wc.vendor_err = 0;
0564     wc.qp = &qp->ibqp;
0565     wc.src_qp = src_qp;
0566     wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
0567         qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
0568     wc.slid = be16_to_cpu(hdr->lrh[3]);
0569     wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
0570     dlid = be16_to_cpu(hdr->lrh[1]);
0571     /*
0572      * Save the LMC lower bits if the destination LID is a unicast LID.
0573      */
0574     wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
0575         dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
0576     wc.port_num = qp->port_num;
0577     /* Signal completion event if the solicited bit is set. */
0578     rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
0579     return;
0580 
0581 drop:
0582     ibp->rvp.n_pkt_drops++;
0583 }