0001
0002
0003
0004
0005
0006
0007 #include <linux/skbuff.h>
0008
0009 #include "rxe.h"
0010 #include "rxe_loc.h"
0011
0012
0013 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
0014 struct rxe_qp *qp)
0015 {
0016 unsigned int pkt_type;
0017
0018 if (unlikely(!qp->valid))
0019 goto err1;
0020
0021 pkt_type = pkt->opcode & 0xe0;
0022
0023 switch (qp_type(qp)) {
0024 case IB_QPT_RC:
0025 if (unlikely(pkt_type != IB_OPCODE_RC)) {
0026 pr_warn_ratelimited("bad qp type\n");
0027 goto err1;
0028 }
0029 break;
0030 case IB_QPT_UC:
0031 if (unlikely(pkt_type != IB_OPCODE_UC)) {
0032 pr_warn_ratelimited("bad qp type\n");
0033 goto err1;
0034 }
0035 break;
0036 case IB_QPT_UD:
0037 case IB_QPT_GSI:
0038 if (unlikely(pkt_type != IB_OPCODE_UD)) {
0039 pr_warn_ratelimited("bad qp type\n");
0040 goto err1;
0041 }
0042 break;
0043 default:
0044 pr_warn_ratelimited("unsupported qp type\n");
0045 goto err1;
0046 }
0047
0048 if (pkt->mask & RXE_REQ_MASK) {
0049 if (unlikely(qp->resp.state != QP_STATE_READY))
0050 goto err1;
0051 } else if (unlikely(qp->req.state < QP_STATE_READY ||
0052 qp->req.state > QP_STATE_DRAINED)) {
0053 goto err1;
0054 }
0055
0056 return 0;
0057
0058 err1:
0059 return -EINVAL;
0060 }
0061
0062 static void set_bad_pkey_cntr(struct rxe_port *port)
0063 {
0064 spin_lock_bh(&port->port_lock);
0065 port->attr.bad_pkey_cntr = min((u32)0xffff,
0066 port->attr.bad_pkey_cntr + 1);
0067 spin_unlock_bh(&port->port_lock);
0068 }
0069
0070 static void set_qkey_viol_cntr(struct rxe_port *port)
0071 {
0072 spin_lock_bh(&port->port_lock);
0073 port->attr.qkey_viol_cntr = min((u32)0xffff,
0074 port->attr.qkey_viol_cntr + 1);
0075 spin_unlock_bh(&port->port_lock);
0076 }
0077
0078 static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
0079 u32 qpn, struct rxe_qp *qp)
0080 {
0081 struct rxe_port *port = &rxe->port;
0082 u16 pkey = bth_pkey(pkt);
0083
0084 pkt->pkey_index = 0;
0085
0086 if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
0087 pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
0088 set_bad_pkey_cntr(port);
0089 goto err1;
0090 }
0091
0092 if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
0093 u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
0094
0095 if (unlikely(deth_qkey(pkt) != qkey)) {
0096 pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
0097 deth_qkey(pkt), qkey, qpn);
0098 set_qkey_viol_cntr(port);
0099 goto err1;
0100 }
0101 }
0102
0103 return 0;
0104
0105 err1:
0106 return -EINVAL;
0107 }
0108
0109 static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
0110 struct rxe_qp *qp)
0111 {
0112 struct sk_buff *skb = PKT_TO_SKB(pkt);
0113
0114 if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
0115 goto done;
0116
0117 if (unlikely(pkt->port_num != qp->attr.port_num)) {
0118 pr_warn_ratelimited("port %d != qp port %d\n",
0119 pkt->port_num, qp->attr.port_num);
0120 goto err1;
0121 }
0122
0123 if (skb->protocol == htons(ETH_P_IP)) {
0124 struct in_addr *saddr =
0125 &qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
0126 struct in_addr *daddr =
0127 &qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
0128
0129 if (ip_hdr(skb)->daddr != saddr->s_addr) {
0130 pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
0131 &ip_hdr(skb)->daddr,
0132 &saddr->s_addr);
0133 goto err1;
0134 }
0135
0136 if (ip_hdr(skb)->saddr != daddr->s_addr) {
0137 pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
0138 &ip_hdr(skb)->saddr,
0139 &daddr->s_addr);
0140 goto err1;
0141 }
0142
0143 } else if (skb->protocol == htons(ETH_P_IPV6)) {
0144 struct in6_addr *saddr =
0145 &qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
0146 struct in6_addr *daddr =
0147 &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
0148
0149 if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
0150 pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
0151 &ipv6_hdr(skb)->daddr, saddr);
0152 goto err1;
0153 }
0154
0155 if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
0156 pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
0157 &ipv6_hdr(skb)->saddr, daddr);
0158 goto err1;
0159 }
0160 }
0161
0162 done:
0163 return 0;
0164
0165 err1:
0166 return -EINVAL;
0167 }
0168
0169 static int hdr_check(struct rxe_pkt_info *pkt)
0170 {
0171 struct rxe_dev *rxe = pkt->rxe;
0172 struct rxe_port *port = &rxe->port;
0173 struct rxe_qp *qp = NULL;
0174 u32 qpn = bth_qpn(pkt);
0175 int index;
0176 int err;
0177
0178 if (unlikely(bth_tver(pkt) != BTH_TVER)) {
0179 pr_warn_ratelimited("bad tver\n");
0180 goto err1;
0181 }
0182
0183 if (unlikely(qpn == 0)) {
0184 pr_warn_once("QP 0 not supported");
0185 goto err1;
0186 }
0187
0188 if (qpn != IB_MULTICAST_QPN) {
0189 index = (qpn == 1) ? port->qp_gsi_index : qpn;
0190
0191 qp = rxe_pool_get_index(&rxe->qp_pool, index);
0192 if (unlikely(!qp)) {
0193 pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
0194 goto err1;
0195 }
0196
0197 err = check_type_state(rxe, pkt, qp);
0198 if (unlikely(err))
0199 goto err2;
0200
0201 err = check_addr(rxe, pkt, qp);
0202 if (unlikely(err))
0203 goto err2;
0204
0205 err = check_keys(rxe, pkt, qpn, qp);
0206 if (unlikely(err))
0207 goto err2;
0208 } else {
0209 if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
0210 pr_warn_ratelimited("no grh for mcast qpn\n");
0211 goto err1;
0212 }
0213 }
0214
0215 pkt->qp = qp;
0216 return 0;
0217
0218 err2:
0219 rxe_put(qp);
0220 err1:
0221 return -EINVAL;
0222 }
0223
0224 static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
0225 {
0226 if (pkt->mask & RXE_REQ_MASK)
0227 rxe_resp_queue_pkt(pkt->qp, skb);
0228 else
0229 rxe_comp_queue_pkt(pkt->qp, skb);
0230 }
0231
0232 static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
0233 {
0234 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
0235 struct rxe_mcg *mcg;
0236 struct rxe_mca *mca;
0237 struct rxe_qp *qp;
0238 union ib_gid dgid;
0239 int err;
0240
0241 if (skb->protocol == htons(ETH_P_IP))
0242 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
0243 (struct in6_addr *)&dgid);
0244 else if (skb->protocol == htons(ETH_P_IPV6))
0245 memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
0246
0247
0248 mcg = rxe_lookup_mcg(rxe, &dgid);
0249 if (!mcg)
0250 goto drop;
0251
0252 spin_lock_bh(&rxe->mcg_lock);
0253
0254
0255
0256
0257
0258
0259 list_for_each_entry(mca, &mcg->qp_list, qp_list) {
0260 qp = mca->qp;
0261
0262
0263 err = check_type_state(rxe, pkt, qp);
0264 if (err)
0265 continue;
0266
0267 err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
0268 if (err)
0269 continue;
0270
0271
0272
0273
0274
0275 if (mca->qp_list.next != &mcg->qp_list) {
0276 struct sk_buff *cskb;
0277 struct rxe_pkt_info *cpkt;
0278
0279 cskb = skb_clone(skb, GFP_ATOMIC);
0280 if (unlikely(!cskb))
0281 continue;
0282
0283 if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
0284 kfree_skb(cskb);
0285 break;
0286 }
0287
0288 cpkt = SKB_TO_PKT(cskb);
0289 cpkt->qp = qp;
0290 rxe_get(qp);
0291 rxe_rcv_pkt(cpkt, cskb);
0292 } else {
0293 pkt->qp = qp;
0294 rxe_get(qp);
0295 rxe_rcv_pkt(pkt, skb);
0296 skb = NULL;
0297 }
0298 }
0299
0300 spin_unlock_bh(&rxe->mcg_lock);
0301
0302 kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
0303
0304 if (likely(!skb))
0305 return;
0306
0307
0308
0309
0310
0311 drop:
0312 kfree_skb(skb);
0313 ib_device_put(&rxe->ib_dev);
0314 }
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 static int rxe_chk_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
0327 {
0328 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
0329 const struct ib_gid_attr *gid_attr;
0330 union ib_gid dgid;
0331 union ib_gid *pdgid;
0332
0333 if (pkt->mask & RXE_LOOPBACK_MASK)
0334 return 0;
0335
0336 if (skb->protocol == htons(ETH_P_IP)) {
0337 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
0338 (struct in6_addr *)&dgid);
0339 pdgid = &dgid;
0340 } else {
0341 pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
0342 }
0343
0344 if (rdma_is_multicast_addr((struct in6_addr *)pdgid))
0345 return 0;
0346
0347 gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
0348 IB_GID_TYPE_ROCE_UDP_ENCAP,
0349 1, skb->dev);
0350 if (IS_ERR(gid_attr))
0351 return PTR_ERR(gid_attr);
0352
0353 rdma_put_gid_attr(gid_attr);
0354 return 0;
0355 }
0356
0357
0358 void rxe_rcv(struct sk_buff *skb)
0359 {
0360 int err;
0361 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
0362 struct rxe_dev *rxe = pkt->rxe;
0363
0364 if (unlikely(skb->len < RXE_BTH_BYTES))
0365 goto drop;
0366
0367 if (rxe_chk_dgid(rxe, skb) < 0) {
0368 pr_warn_ratelimited("failed checking dgid\n");
0369 goto drop;
0370 }
0371
0372 pkt->opcode = bth_opcode(pkt);
0373 pkt->psn = bth_psn(pkt);
0374 pkt->qp = NULL;
0375 pkt->mask |= rxe_opcode[pkt->opcode].mask;
0376
0377 if (unlikely(skb->len < header_size(pkt)))
0378 goto drop;
0379
0380 err = hdr_check(pkt);
0381 if (unlikely(err))
0382 goto drop;
0383
0384 err = rxe_icrc_check(skb, pkt);
0385 if (unlikely(err))
0386 goto drop;
0387
0388 rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS);
0389
0390 if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
0391 rxe_rcv_mcast_pkt(rxe, skb);
0392 else
0393 rxe_rcv_pkt(pkt, skb);
0394
0395 return;
0396
0397 drop:
0398 if (pkt->qp)
0399 rxe_put(pkt->qp);
0400
0401 kfree_skb(skb);
0402 ib_device_put(&rxe->ib_dev);
0403 }