Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* SCTP kernel implementation
0003  * Copyright (c) 1999-2000 Cisco, Inc.
0004  * Copyright (c) 1999-2001 Motorola, Inc.
0005  * Copyright (c) 2001-2003 International Business Machines, Corp.
0006  * Copyright (c) 2001 Intel Corp.
0007  * Copyright (c) 2001 Nokia, Inc.
0008  * Copyright (c) 2001 La Monte H.P. Yarroll
0009  *
0010  * This file is part of the SCTP kernel implementation
0011  *
0012  * These functions handle all input from the IP layer into SCTP.
0013  *
0014  * Please send any bug reports or fixes you make to the
0015  * email address(es):
0016  *    lksctp developers <linux-sctp@vger.kernel.org>
0017  *
0018  * Written or modified by:
0019  *    La Monte H.P. Yarroll <piggy@acm.org>
0020  *    Karl Knutson <karl@athena.chicago.il.us>
0021  *    Xingang Guo <xingang.guo@intel.com>
0022  *    Jon Grimm <jgrimm@us.ibm.com>
0023  *    Hui Huang <hui.huang@nokia.com>
0024  *    Daisy Chang <daisyc@us.ibm.com>
0025  *    Sridhar Samudrala <sri@us.ibm.com>
0026  *    Ardelle Fan <ardelle.fan@intel.com>
0027  */
0028 
0029 #include <linux/types.h>
0030 #include <linux/list.h> /* For struct list_head */
0031 #include <linux/socket.h>
0032 #include <linux/ip.h>
0033 #include <linux/time.h> /* For struct timeval */
0034 #include <linux/slab.h>
0035 #include <net/ip.h>
0036 #include <net/icmp.h>
0037 #include <net/snmp.h>
0038 #include <net/sock.h>
0039 #include <net/xfrm.h>
0040 #include <net/sctp/sctp.h>
0041 #include <net/sctp/sm.h>
0042 #include <net/sctp/checksum.h>
0043 #include <net/net_namespace.h>
0044 #include <linux/rhashtable.h>
0045 #include <net/sock_reuseport.h>
0046 
0047 /* Forward declarations for internal helpers. */
0048 static int sctp_rcv_ootb(struct sk_buff *);
0049 static struct sctp_association *__sctp_rcv_lookup(struct net *net,
0050                       struct sk_buff *skb,
0051                       const union sctp_addr *paddr,
0052                       const union sctp_addr *laddr,
0053                       struct sctp_transport **transportp);
0054 static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
0055                     struct net *net, struct sk_buff *skb,
0056                     const union sctp_addr *laddr,
0057                     const union sctp_addr *daddr);
0058 static struct sctp_association *__sctp_lookup_association(
0059                     struct net *net,
0060                     const union sctp_addr *local,
0061                     const union sctp_addr *peer,
0062                     struct sctp_transport **pt);
0063 
0064 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
0065 
0066 
0067 /* Calculate the SCTP checksum of an SCTP packet.  */
0068 static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
0069 {
0070     struct sctphdr *sh = sctp_hdr(skb);
0071     __le32 cmp = sh->checksum;
0072     __le32 val = sctp_compute_cksum(skb, 0);
0073 
0074     if (val != cmp) {
0075         /* CRC failure, dump it. */
0076         __SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
0077         return -1;
0078     }
0079     return 0;
0080 }
0081 
0082 /*
0083  * This is the routine which IP calls when receiving an SCTP packet.
0084  */
0085 int sctp_rcv(struct sk_buff *skb)
0086 {
0087     struct sock *sk;
0088     struct sctp_association *asoc;
0089     struct sctp_endpoint *ep = NULL;
0090     struct sctp_ep_common *rcvr;
0091     struct sctp_transport *transport = NULL;
0092     struct sctp_chunk *chunk;
0093     union sctp_addr src;
0094     union sctp_addr dest;
0095     int bound_dev_if;
0096     int family;
0097     struct sctp_af *af;
0098     struct net *net = dev_net(skb->dev);
0099     bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
0100 
0101     if (skb->pkt_type != PACKET_HOST)
0102         goto discard_it;
0103 
0104     __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
0105 
0106     /* If packet is too small to contain a single chunk, let's not
0107      * waste time on it anymore.
0108      */
0109     if (skb->len < sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) +
0110                skb_transport_offset(skb))
0111         goto discard_it;
0112 
0113     /* If the packet is fragmented and we need to do crc checking,
0114      * it's better to just linearize it otherwise crc computing
0115      * takes longer.
0116      */
0117     if ((!is_gso && skb_linearize(skb)) ||
0118         !pskb_may_pull(skb, sizeof(struct sctphdr)))
0119         goto discard_it;
0120 
0121     /* Pull up the IP header. */
0122     __skb_pull(skb, skb_transport_offset(skb));
0123 
0124     skb->csum_valid = 0; /* Previous value not applicable */
0125     if (skb_csum_unnecessary(skb))
0126         __skb_decr_checksum_unnecessary(skb);
0127     else if (!sctp_checksum_disable &&
0128          !is_gso &&
0129          sctp_rcv_checksum(net, skb) < 0)
0130         goto discard_it;
0131     skb->csum_valid = 1;
0132 
0133     __skb_pull(skb, sizeof(struct sctphdr));
0134 
0135     family = ipver2af(ip_hdr(skb)->version);
0136     af = sctp_get_af_specific(family);
0137     if (unlikely(!af))
0138         goto discard_it;
0139     SCTP_INPUT_CB(skb)->af = af;
0140 
0141     /* Initialize local addresses for lookups. */
0142     af->from_skb(&src, skb, 1);
0143     af->from_skb(&dest, skb, 0);
0144 
0145     /* If the packet is to or from a non-unicast address,
0146      * silently discard the packet.
0147      *
0148      * This is not clearly defined in the RFC except in section
0149      * 8.4 - OOTB handling.  However, based on the book "Stream Control
0150      * Transmission Protocol" 2.1, "It is important to note that the
0151      * IP address of an SCTP transport address must be a routable
0152      * unicast address.  In other words, IP multicast addresses and
0153      * IP broadcast addresses cannot be used in an SCTP transport
0154      * address."
0155      */
0156     if (!af->addr_valid(&src, NULL, skb) ||
0157         !af->addr_valid(&dest, NULL, skb))
0158         goto discard_it;
0159 
0160     asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
0161 
0162     if (!asoc)
0163         ep = __sctp_rcv_lookup_endpoint(net, skb, &dest, &src);
0164 
0165     /* Retrieve the common input handling substructure. */
0166     rcvr = asoc ? &asoc->base : &ep->base;
0167     sk = rcvr->sk;
0168 
0169     /*
0170      * If a frame arrives on an interface and the receiving socket is
0171      * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
0172      */
0173     bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
0174     if (bound_dev_if && (bound_dev_if != af->skb_iif(skb))) {
0175         if (transport) {
0176             sctp_transport_put(transport);
0177             asoc = NULL;
0178             transport = NULL;
0179         } else {
0180             sctp_endpoint_put(ep);
0181             ep = NULL;
0182         }
0183         sk = net->sctp.ctl_sock;
0184         ep = sctp_sk(sk)->ep;
0185         sctp_endpoint_hold(ep);
0186         rcvr = &ep->base;
0187     }
0188 
0189     /*
0190      * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
0191      * An SCTP packet is called an "out of the blue" (OOTB)
0192      * packet if it is correctly formed, i.e., passed the
0193      * receiver's checksum check, but the receiver is not
0194      * able to identify the association to which this
0195      * packet belongs.
0196      */
0197     if (!asoc) {
0198         if (sctp_rcv_ootb(skb)) {
0199             __SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
0200             goto discard_release;
0201         }
0202     }
0203 
0204     if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
0205         goto discard_release;
0206     nf_reset_ct(skb);
0207 
0208     if (sk_filter(sk, skb))
0209         goto discard_release;
0210 
0211     /* Create an SCTP packet structure. */
0212     chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC);
0213     if (!chunk)
0214         goto discard_release;
0215     SCTP_INPUT_CB(skb)->chunk = chunk;
0216 
0217     /* Remember what endpoint is to handle this packet. */
0218     chunk->rcvr = rcvr;
0219 
0220     /* Remember the SCTP header. */
0221     chunk->sctp_hdr = sctp_hdr(skb);
0222 
0223     /* Set the source and destination addresses of the incoming chunk.  */
0224     sctp_init_addrs(chunk, &src, &dest);
0225 
0226     /* Remember where we came from.  */
0227     chunk->transport = transport;
0228 
0229     /* Acquire access to the sock lock. Note: We are safe from other
0230      * bottom halves on this lock, but a user may be in the lock too,
0231      * so check if it is busy.
0232      */
0233     bh_lock_sock(sk);
0234 
0235     if (sk != rcvr->sk) {
0236         /* Our cached sk is different from the rcvr->sk.  This is
0237          * because migrate()/accept() may have moved the association
0238          * to a new socket and released all the sockets.  So now we
0239          * are holding a lock on the old socket while the user may
0240          * be doing something with the new socket.  Switch our veiw
0241          * of the current sk.
0242          */
0243         bh_unlock_sock(sk);
0244         sk = rcvr->sk;
0245         bh_lock_sock(sk);
0246     }
0247 
0248     if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
0249         if (sctp_add_backlog(sk, skb)) {
0250             bh_unlock_sock(sk);
0251             sctp_chunk_free(chunk);
0252             skb = NULL; /* sctp_chunk_free already freed the skb */
0253             goto discard_release;
0254         }
0255         __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
0256     } else {
0257         __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
0258         sctp_inq_push(&chunk->rcvr->inqueue, chunk);
0259     }
0260 
0261     bh_unlock_sock(sk);
0262 
0263     /* Release the asoc/ep ref we took in the lookup calls. */
0264     if (transport)
0265         sctp_transport_put(transport);
0266     else
0267         sctp_endpoint_put(ep);
0268 
0269     return 0;
0270 
0271 discard_it:
0272     __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
0273     kfree_skb(skb);
0274     return 0;
0275 
0276 discard_release:
0277     /* Release the asoc/ep ref we took in the lookup calls. */
0278     if (transport)
0279         sctp_transport_put(transport);
0280     else
0281         sctp_endpoint_put(ep);
0282 
0283     goto discard_it;
0284 }
0285 
0286 /* Process the backlog queue of the socket.  Every skb on
0287  * the backlog holds a ref on an association or endpoint.
0288  * We hold this ref throughout the state machine to make
0289  * sure that the structure we need is still around.
0290  */
0291 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
0292 {
0293     struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
0294     struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
0295     struct sctp_transport *t = chunk->transport;
0296     struct sctp_ep_common *rcvr = NULL;
0297     int backloged = 0;
0298 
0299     rcvr = chunk->rcvr;
0300 
0301     /* If the rcvr is dead then the association or endpoint
0302      * has been deleted and we can safely drop the chunk
0303      * and refs that we are holding.
0304      */
0305     if (rcvr->dead) {
0306         sctp_chunk_free(chunk);
0307         goto done;
0308     }
0309 
0310     if (unlikely(rcvr->sk != sk)) {
0311         /* In this case, the association moved from one socket to
0312          * another.  We are currently sitting on the backlog of the
0313          * old socket, so we need to move.
0314          * However, since we are here in the process context we
0315          * need to take make sure that the user doesn't own
0316          * the new socket when we process the packet.
0317          * If the new socket is user-owned, queue the chunk to the
0318          * backlog of the new socket without dropping any refs.
0319          * Otherwise, we can safely push the chunk on the inqueue.
0320          */
0321 
0322         sk = rcvr->sk;
0323         local_bh_disable();
0324         bh_lock_sock(sk);
0325 
0326         if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
0327             if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
0328                 sctp_chunk_free(chunk);
0329             else
0330                 backloged = 1;
0331         } else
0332             sctp_inq_push(inqueue, chunk);
0333 
0334         bh_unlock_sock(sk);
0335         local_bh_enable();
0336 
0337         /* If the chunk was backloged again, don't drop refs */
0338         if (backloged)
0339             return 0;
0340     } else {
0341         if (!sctp_newsk_ready(sk)) {
0342             if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
0343                 return 0;
0344             sctp_chunk_free(chunk);
0345         } else {
0346             sctp_inq_push(inqueue, chunk);
0347         }
0348     }
0349 
0350 done:
0351     /* Release the refs we took in sctp_add_backlog */
0352     if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
0353         sctp_transport_put(t);
0354     else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
0355         sctp_endpoint_put(sctp_ep(rcvr));
0356     else
0357         BUG();
0358 
0359     return 0;
0360 }
0361 
0362 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
0363 {
0364     struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
0365     struct sctp_transport *t = chunk->transport;
0366     struct sctp_ep_common *rcvr = chunk->rcvr;
0367     int ret;
0368 
0369     ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
0370     if (!ret) {
0371         /* Hold the assoc/ep while hanging on the backlog queue.
0372          * This way, we know structures we need will not disappear
0373          * from us
0374          */
0375         if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
0376             sctp_transport_hold(t);
0377         else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
0378             sctp_endpoint_hold(sctp_ep(rcvr));
0379         else
0380             BUG();
0381     }
0382     return ret;
0383 
0384 }
0385 
0386 /* Handle icmp frag needed error. */
0387 void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
0388                struct sctp_transport *t, __u32 pmtu)
0389 {
0390     if (!t ||
0391         (t->pathmtu <= pmtu &&
0392          t->pl.probe_size + sctp_transport_pl_hlen(t) <= pmtu))
0393         return;
0394 
0395     if (sock_owned_by_user(sk)) {
0396         atomic_set(&t->mtu_info, pmtu);
0397         asoc->pmtu_pending = 1;
0398         t->pmtu_pending = 1;
0399         return;
0400     }
0401 
0402     if (!(t->param_flags & SPP_PMTUD_ENABLE))
0403         /* We can't allow retransmitting in such case, as the
0404          * retransmission would be sized just as before, and thus we
0405          * would get another icmp, and retransmit again.
0406          */
0407         return;
0408 
0409     /* Update transports view of the MTU. Return if no update was needed.
0410      * If an update wasn't needed/possible, it also doesn't make sense to
0411      * try to retransmit now.
0412      */
0413     if (!sctp_transport_update_pmtu(t, pmtu))
0414         return;
0415 
0416     /* Update association pmtu. */
0417     sctp_assoc_sync_pmtu(asoc);
0418 
0419     /* Retransmit with the new pmtu setting. */
0420     sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
0421 }
0422 
0423 void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
0424             struct sk_buff *skb)
0425 {
0426     struct dst_entry *dst;
0427 
0428     if (sock_owned_by_user(sk) || !t)
0429         return;
0430     dst = sctp_transport_dst_check(t);
0431     if (dst)
0432         dst->ops->redirect(dst, sk, skb);
0433 }
0434 
0435 /*
0436  * SCTP Implementer's Guide, 2.37 ICMP handling procedures
0437  *
0438  * ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
0439  *        or a "Protocol Unreachable" treat this message as an abort
0440  *        with the T bit set.
0441  *
0442  * This function sends an event to the state machine, which will abort the
0443  * association.
0444  *
0445  */
0446 void sctp_icmp_proto_unreachable(struct sock *sk,
0447                struct sctp_association *asoc,
0448                struct sctp_transport *t)
0449 {
0450     if (sock_owned_by_user(sk)) {
0451         if (timer_pending(&t->proto_unreach_timer))
0452             return;
0453         else {
0454             if (!mod_timer(&t->proto_unreach_timer,
0455                         jiffies + (HZ/20)))
0456                 sctp_transport_hold(t);
0457         }
0458     } else {
0459         struct net *net = sock_net(sk);
0460 
0461         pr_debug("%s: unrecognized next header type "
0462              "encountered!\n", __func__);
0463 
0464         if (del_timer(&t->proto_unreach_timer))
0465             sctp_transport_put(t);
0466 
0467         sctp_do_sm(net, SCTP_EVENT_T_OTHER,
0468                SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
0469                asoc->state, asoc->ep, asoc, t,
0470                GFP_ATOMIC);
0471     }
0472 }
0473 
0474 /* Common lookup code for icmp/icmpv6 error handler. */
0475 struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
0476                  struct sctphdr *sctphdr,
0477                  struct sctp_association **app,
0478                  struct sctp_transport **tpp)
0479 {
0480     struct sctp_init_chunk *chunkhdr, _chunkhdr;
0481     union sctp_addr saddr;
0482     union sctp_addr daddr;
0483     struct sctp_af *af;
0484     struct sock *sk = NULL;
0485     struct sctp_association *asoc;
0486     struct sctp_transport *transport = NULL;
0487     __u32 vtag = ntohl(sctphdr->vtag);
0488 
0489     *app = NULL; *tpp = NULL;
0490 
0491     af = sctp_get_af_specific(family);
0492     if (unlikely(!af)) {
0493         return NULL;
0494     }
0495 
0496     /* Initialize local addresses for lookups. */
0497     af->from_skb(&saddr, skb, 1);
0498     af->from_skb(&daddr, skb, 0);
0499 
0500     /* Look for an association that matches the incoming ICMP error
0501      * packet.
0502      */
0503     asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
0504     if (!asoc)
0505         return NULL;
0506 
0507     sk = asoc->base.sk;
0508 
0509     /* RFC 4960, Appendix C. ICMP Handling
0510      *
0511      * ICMP6) An implementation MUST validate that the Verification Tag
0512      * contained in the ICMP message matches the Verification Tag of
0513      * the peer.  If the Verification Tag is not 0 and does NOT
0514      * match, discard the ICMP message.  If it is 0 and the ICMP
0515      * message contains enough bytes to verify that the chunk type is
0516      * an INIT chunk and that the Initiate Tag matches the tag of the
0517      * peer, continue with ICMP7.  If the ICMP message is too short
0518      * or the chunk type or the Initiate Tag does not match, silently
0519      * discard the packet.
0520      */
0521     if (vtag == 0) {
0522         /* chunk header + first 4 octects of init header */
0523         chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
0524                           sizeof(struct sctphdr),
0525                           sizeof(struct sctp_chunkhdr) +
0526                           sizeof(__be32), &_chunkhdr);
0527         if (!chunkhdr ||
0528             chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
0529             ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
0530             goto out;
0531 
0532     } else if (vtag != asoc->c.peer_vtag) {
0533         goto out;
0534     }
0535 
0536     bh_lock_sock(sk);
0537 
0538     /* If too many ICMPs get dropped on busy
0539      * servers this needs to be solved differently.
0540      */
0541     if (sock_owned_by_user(sk))
0542         __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
0543 
0544     *app = asoc;
0545     *tpp = transport;
0546     return sk;
0547 
0548 out:
0549     sctp_transport_put(transport);
0550     return NULL;
0551 }
0552 
0553 /* Common cleanup code for icmp/icmpv6 error handler. */
0554 void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
0555     __releases(&((__sk)->sk_lock.slock))
0556 {
0557     bh_unlock_sock(sk);
0558     sctp_transport_put(t);
0559 }
0560 
0561 static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb,
0562                    __u8 type, __u8 code, __u32 info)
0563 {
0564     struct sctp_association *asoc = t->asoc;
0565     struct sock *sk = asoc->base.sk;
0566     int err = 0;
0567 
0568     switch (type) {
0569     case ICMP_PARAMETERPROB:
0570         err = EPROTO;
0571         break;
0572     case ICMP_DEST_UNREACH:
0573         if (code > NR_ICMP_UNREACH)
0574             return;
0575         if (code == ICMP_FRAG_NEEDED) {
0576             sctp_icmp_frag_needed(sk, asoc, t, SCTP_TRUNC4(info));
0577             return;
0578         }
0579         if (code == ICMP_PROT_UNREACH) {
0580             sctp_icmp_proto_unreachable(sk, asoc, t);
0581             return;
0582         }
0583         err = icmp_err_convert[code].errno;
0584         break;
0585     case ICMP_TIME_EXCEEDED:
0586         if (code == ICMP_EXC_FRAGTIME)
0587             return;
0588 
0589         err = EHOSTUNREACH;
0590         break;
0591     case ICMP_REDIRECT:
0592         sctp_icmp_redirect(sk, t, skb);
0593         return;
0594     default:
0595         return;
0596     }
0597     if (!sock_owned_by_user(sk) && inet_sk(sk)->recverr) {
0598         sk->sk_err = err;
0599         sk_error_report(sk);
0600     } else {  /* Only an error on timeout */
0601         sk->sk_err_soft = err;
0602     }
0603 }
0604 
0605 /*
0606  * This routine is called by the ICMP module when it gets some
0607  * sort of error condition.  If err < 0 then the socket should
0608  * be closed and the error returned to the user.  If err > 0
0609  * it's just the icmp type << 8 | icmp code.  After adjustment
0610  * header points to the first 8 bytes of the sctp header.  We need
0611  * to find the appropriate port.
0612  *
0613  * The locking strategy used here is very "optimistic". When
0614  * someone else accesses the socket the ICMP is just dropped
0615  * and for some paths there is no check at all.
0616  * A more general error queue to queue errors for later handling
0617  * is probably better.
0618  *
0619  */
0620 int sctp_v4_err(struct sk_buff *skb, __u32 info)
0621 {
0622     const struct iphdr *iph = (const struct iphdr *)skb->data;
0623     const int type = icmp_hdr(skb)->type;
0624     const int code = icmp_hdr(skb)->code;
0625     struct net *net = dev_net(skb->dev);
0626     struct sctp_transport *transport;
0627     struct sctp_association *asoc;
0628     __u16 saveip, savesctp;
0629     struct sock *sk;
0630 
0631     /* Fix up skb to look at the embedded net header. */
0632     saveip = skb->network_header;
0633     savesctp = skb->transport_header;
0634     skb_reset_network_header(skb);
0635     skb_set_transport_header(skb, iph->ihl * 4);
0636     sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
0637     /* Put back, the original values. */
0638     skb->network_header = saveip;
0639     skb->transport_header = savesctp;
0640     if (!sk) {
0641         __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
0642         return -ENOENT;
0643     }
0644 
0645     sctp_v4_err_handle(transport, skb, type, code, info);
0646     sctp_err_finish(sk, transport);
0647 
0648     return 0;
0649 }
0650 
0651 int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb)
0652 {
0653     struct net *net = dev_net(skb->dev);
0654     struct sctp_association *asoc;
0655     struct sctp_transport *t;
0656     struct icmphdr *hdr;
0657     __u32 info = 0;
0658 
0659     skb->transport_header += sizeof(struct udphdr);
0660     sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &t);
0661     if (!sk) {
0662         __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
0663         return -ENOENT;
0664     }
0665 
0666     skb->transport_header -= sizeof(struct udphdr);
0667     hdr = (struct icmphdr *)(skb_network_header(skb) - sizeof(struct icmphdr));
0668     if (hdr->type == ICMP_REDIRECT) {
0669         /* can't be handled without outer iphdr known, leave it to udp_err */
0670         sctp_err_finish(sk, t);
0671         return 0;
0672     }
0673     if (hdr->type == ICMP_DEST_UNREACH && hdr->code == ICMP_FRAG_NEEDED)
0674         info = ntohs(hdr->un.frag.mtu);
0675     sctp_v4_err_handle(t, skb, hdr->type, hdr->code, info);
0676 
0677     sctp_err_finish(sk, t);
0678     return 1;
0679 }
0680 
0681 /*
0682  * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
0683  *
0684  * This function scans all the chunks in the OOTB packet to determine if
0685  * the packet should be discarded right away.  If a response might be needed
0686  * for this packet, or, if further processing is possible, the packet will
0687  * be queued to a proper inqueue for the next phase of handling.
0688  *
0689  * Output:
0690  * Return 0 - If further processing is needed.
0691  * Return 1 - If the packet can be discarded right away.
0692  */
0693 static int sctp_rcv_ootb(struct sk_buff *skb)
0694 {
0695     struct sctp_chunkhdr *ch, _ch;
0696     int ch_end, offset = 0;
0697 
0698     /* Scan through all the chunks in the packet.  */
0699     do {
0700         /* Make sure we have at least the header there */
0701         if (offset + sizeof(_ch) > skb->len)
0702             break;
0703 
0704         ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
0705 
0706         /* Break out if chunk length is less then minimal. */
0707         if (!ch || ntohs(ch->length) < sizeof(_ch))
0708             break;
0709 
0710         ch_end = offset + SCTP_PAD4(ntohs(ch->length));
0711         if (ch_end > skb->len)
0712             break;
0713 
0714         /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
0715          * receiver MUST silently discard the OOTB packet and take no
0716          * further action.
0717          */
0718         if (SCTP_CID_ABORT == ch->type)
0719             goto discard;
0720 
0721         /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
0722          * chunk, the receiver should silently discard the packet
0723          * and take no further action.
0724          */
0725         if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
0726             goto discard;
0727 
0728         /* RFC 4460, 2.11.2
0729          * This will discard packets with INIT chunk bundled as
0730          * subsequent chunks in the packet.  When INIT is first,
0731          * the normal INIT processing will discard the chunk.
0732          */
0733         if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
0734             goto discard;
0735 
0736         offset = ch_end;
0737     } while (ch_end < skb->len);
0738 
0739     return 0;
0740 
0741 discard:
0742     return 1;
0743 }
0744 
0745 /* Insert endpoint into the hash table.  */
0746 static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
0747 {
0748     struct sock *sk = ep->base.sk;
0749     struct net *net = sock_net(sk);
0750     struct sctp_hashbucket *head;
0751 
0752     ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
0753     head = &sctp_ep_hashtable[ep->hashent];
0754 
0755     if (sk->sk_reuseport) {
0756         bool any = sctp_is_ep_boundall(sk);
0757         struct sctp_endpoint *ep2;
0758         struct list_head *list;
0759         int cnt = 0, err = 1;
0760 
0761         list_for_each(list, &ep->base.bind_addr.address_list)
0762             cnt++;
0763 
0764         sctp_for_each_hentry(ep2, &head->chain) {
0765             struct sock *sk2 = ep2->base.sk;
0766 
0767             if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
0768                 !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
0769                 !sk2->sk_reuseport)
0770                 continue;
0771 
0772             err = sctp_bind_addrs_check(sctp_sk(sk2),
0773                             sctp_sk(sk), cnt);
0774             if (!err) {
0775                 err = reuseport_add_sock(sk, sk2, any);
0776                 if (err)
0777                     return err;
0778                 break;
0779             } else if (err < 0) {
0780                 return err;
0781             }
0782         }
0783 
0784         if (err) {
0785             err = reuseport_alloc(sk, any);
0786             if (err)
0787                 return err;
0788         }
0789     }
0790 
0791     write_lock(&head->lock);
0792     hlist_add_head(&ep->node, &head->chain);
0793     write_unlock(&head->lock);
0794     return 0;
0795 }
0796 
0797 /* Add an endpoint to the hash. Local BH-safe. */
0798 int sctp_hash_endpoint(struct sctp_endpoint *ep)
0799 {
0800     int err;
0801 
0802     local_bh_disable();
0803     err = __sctp_hash_endpoint(ep);
0804     local_bh_enable();
0805 
0806     return err;
0807 }
0808 
0809 /* Remove endpoint from the hash table.  */
0810 static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
0811 {
0812     struct sock *sk = ep->base.sk;
0813     struct sctp_hashbucket *head;
0814 
0815     ep->hashent = sctp_ep_hashfn(sock_net(sk), ep->base.bind_addr.port);
0816 
0817     head = &sctp_ep_hashtable[ep->hashent];
0818 
0819     if (rcu_access_pointer(sk->sk_reuseport_cb))
0820         reuseport_detach_sock(sk);
0821 
0822     write_lock(&head->lock);
0823     hlist_del_init(&ep->node);
0824     write_unlock(&head->lock);
0825 }
0826 
0827 /* Remove endpoint from the hash.  Local BH-safe. */
0828 void sctp_unhash_endpoint(struct sctp_endpoint *ep)
0829 {
0830     local_bh_disable();
0831     __sctp_unhash_endpoint(ep);
0832     local_bh_enable();
0833 }
0834 
0835 static inline __u32 sctp_hashfn(const struct net *net, __be16 lport,
0836                 const union sctp_addr *paddr, __u32 seed)
0837 {
0838     __u32 addr;
0839 
0840     if (paddr->sa.sa_family == AF_INET6)
0841         addr = jhash(&paddr->v6.sin6_addr, 16, seed);
0842     else
0843         addr = (__force __u32)paddr->v4.sin_addr.s_addr;
0844 
0845     return  jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
0846                  (__force __u32)lport, net_hash_mix(net), seed);
0847 }
0848 
0849 /* Look up an endpoint. */
0850 static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
0851                     struct net *net, struct sk_buff *skb,
0852                     const union sctp_addr *laddr,
0853                     const union sctp_addr *paddr)
0854 {
0855     struct sctp_hashbucket *head;
0856     struct sctp_endpoint *ep;
0857     struct sock *sk;
0858     __be16 lport;
0859     int hash;
0860 
0861     lport = laddr->v4.sin_port;
0862     hash = sctp_ep_hashfn(net, ntohs(lport));
0863     head = &sctp_ep_hashtable[hash];
0864     read_lock(&head->lock);
0865     sctp_for_each_hentry(ep, &head->chain) {
0866         if (sctp_endpoint_is_match(ep, net, laddr))
0867             goto hit;
0868     }
0869 
0870     ep = sctp_sk(net->sctp.ctl_sock)->ep;
0871 
0872 hit:
0873     sk = ep->base.sk;
0874     if (sk->sk_reuseport) {
0875         __u32 phash = sctp_hashfn(net, lport, paddr, 0);
0876 
0877         sk = reuseport_select_sock(sk, phash, skb,
0878                        sizeof(struct sctphdr));
0879         if (sk)
0880             ep = sctp_sk(sk)->ep;
0881     }
0882     sctp_endpoint_hold(ep);
0883     read_unlock(&head->lock);
0884     return ep;
0885 }
0886 
0887 /* rhashtable for transport */
0888 struct sctp_hash_cmp_arg {
0889     const union sctp_addr   *paddr;
0890     const struct net    *net;
0891     __be16          lport;
0892 };
0893 
0894 static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
0895                 const void *ptr)
0896 {
0897     struct sctp_transport *t = (struct sctp_transport *)ptr;
0898     const struct sctp_hash_cmp_arg *x = arg->key;
0899     int err = 1;
0900 
0901     if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
0902         return err;
0903     if (!sctp_transport_hold(t))
0904         return err;
0905 
0906     if (!net_eq(t->asoc->base.net, x->net))
0907         goto out;
0908     if (x->lport != htons(t->asoc->base.bind_addr.port))
0909         goto out;
0910 
0911     err = 0;
0912 out:
0913     sctp_transport_put(t);
0914     return err;
0915 }
0916 
0917 static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
0918 {
0919     const struct sctp_transport *t = data;
0920 
0921     return sctp_hashfn(t->asoc->base.net,
0922                htons(t->asoc->base.bind_addr.port),
0923                &t->ipaddr, seed);
0924 }
0925 
0926 static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
0927 {
0928     const struct sctp_hash_cmp_arg *x = data;
0929 
0930     return sctp_hashfn(x->net, x->lport, x->paddr, seed);
0931 }
0932 
0933 static const struct rhashtable_params sctp_hash_params = {
0934     .head_offset        = offsetof(struct sctp_transport, node),
0935     .hashfn         = sctp_hash_key,
0936     .obj_hashfn     = sctp_hash_obj,
0937     .obj_cmpfn      = sctp_hash_cmp,
0938     .automatic_shrinking    = true,
0939 };
0940 
0941 int sctp_transport_hashtable_init(void)
0942 {
0943     return rhltable_init(&sctp_transport_hashtable, &sctp_hash_params);
0944 }
0945 
0946 void sctp_transport_hashtable_destroy(void)
0947 {
0948     rhltable_destroy(&sctp_transport_hashtable);
0949 }
0950 
0951 int sctp_hash_transport(struct sctp_transport *t)
0952 {
0953     struct sctp_transport *transport;
0954     struct rhlist_head *tmp, *list;
0955     struct sctp_hash_cmp_arg arg;
0956     int err;
0957 
0958     if (t->asoc->temp)
0959         return 0;
0960 
0961     arg.net   = t->asoc->base.net;
0962     arg.paddr = &t->ipaddr;
0963     arg.lport = htons(t->asoc->base.bind_addr.port);
0964 
0965     rcu_read_lock();
0966     list = rhltable_lookup(&sctp_transport_hashtable, &arg,
0967                    sctp_hash_params);
0968 
0969     rhl_for_each_entry_rcu(transport, tmp, list, node)
0970         if (transport->asoc->ep == t->asoc->ep) {
0971             rcu_read_unlock();
0972             return -EEXIST;
0973         }
0974     rcu_read_unlock();
0975 
0976     err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
0977                   &t->node, sctp_hash_params);
0978     if (err)
0979         pr_err_once("insert transport fail, errno %d\n", err);
0980 
0981     return err;
0982 }
0983 
0984 void sctp_unhash_transport(struct sctp_transport *t)
0985 {
0986     if (t->asoc->temp)
0987         return;
0988 
0989     rhltable_remove(&sctp_transport_hashtable, &t->node,
0990             sctp_hash_params);
0991 }
0992 
0993 /* return a transport with holding it */
0994 struct sctp_transport *sctp_addrs_lookup_transport(
0995                 struct net *net,
0996                 const union sctp_addr *laddr,
0997                 const union sctp_addr *paddr)
0998 {
0999     struct rhlist_head *tmp, *list;
1000     struct sctp_transport *t;
1001     struct sctp_hash_cmp_arg arg = {
1002         .paddr = paddr,
1003         .net   = net,
1004         .lport = laddr->v4.sin_port,
1005     };
1006 
1007     list = rhltable_lookup(&sctp_transport_hashtable, &arg,
1008                    sctp_hash_params);
1009 
1010     rhl_for_each_entry_rcu(t, tmp, list, node) {
1011         if (!sctp_transport_hold(t))
1012             continue;
1013 
1014         if (sctp_bind_addr_match(&t->asoc->base.bind_addr,
1015                      laddr, sctp_sk(t->asoc->base.sk)))
1016             return t;
1017         sctp_transport_put(t);
1018     }
1019 
1020     return NULL;
1021 }
1022 
1023 /* return a transport without holding it, as it's only used under sock lock */
1024 struct sctp_transport *sctp_epaddr_lookup_transport(
1025                 const struct sctp_endpoint *ep,
1026                 const union sctp_addr *paddr)
1027 {
1028     struct rhlist_head *tmp, *list;
1029     struct sctp_transport *t;
1030     struct sctp_hash_cmp_arg arg = {
1031         .paddr = paddr,
1032         .net   = ep->base.net,
1033         .lport = htons(ep->base.bind_addr.port),
1034     };
1035 
1036     list = rhltable_lookup(&sctp_transport_hashtable, &arg,
1037                    sctp_hash_params);
1038 
1039     rhl_for_each_entry_rcu(t, tmp, list, node)
1040         if (ep == t->asoc->ep)
1041             return t;
1042 
1043     return NULL;
1044 }
1045 
1046 /* Look up an association. */
1047 static struct sctp_association *__sctp_lookup_association(
1048                     struct net *net,
1049                     const union sctp_addr *local,
1050                     const union sctp_addr *peer,
1051                     struct sctp_transport **pt)
1052 {
1053     struct sctp_transport *t;
1054     struct sctp_association *asoc = NULL;
1055 
1056     t = sctp_addrs_lookup_transport(net, local, peer);
1057     if (!t)
1058         goto out;
1059 
1060     asoc = t->asoc;
1061     *pt = t;
1062 
1063 out:
1064     return asoc;
1065 }
1066 
1067 /* Look up an association. protected by RCU read lock */
1068 static
1069 struct sctp_association *sctp_lookup_association(struct net *net,
1070                          const union sctp_addr *laddr,
1071                          const union sctp_addr *paddr,
1072                          struct sctp_transport **transportp)
1073 {
1074     struct sctp_association *asoc;
1075 
1076     rcu_read_lock();
1077     asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1078     rcu_read_unlock();
1079 
1080     return asoc;
1081 }
1082 
1083 /* Is there an association matching the given local and peer addresses? */
1084 bool sctp_has_association(struct net *net,
1085               const union sctp_addr *laddr,
1086               const union sctp_addr *paddr)
1087 {
1088     struct sctp_transport *transport;
1089 
1090     if (sctp_lookup_association(net, laddr, paddr, &transport)) {
1091         sctp_transport_put(transport);
1092         return true;
1093     }
1094 
1095     return false;
1096 }
1097 
1098 /*
1099  * SCTP Implementors Guide, 2.18 Handling of address
1100  * parameters within the INIT or INIT-ACK.
1101  *
1102  * D) When searching for a matching TCB upon reception of an INIT
1103  *    or INIT-ACK chunk the receiver SHOULD use not only the
1104  *    source address of the packet (containing the INIT or
1105  *    INIT-ACK) but the receiver SHOULD also use all valid
1106  *    address parameters contained within the chunk.
1107  *
1108  * 2.18.3 Solution description
1109  *
1110  * This new text clearly specifies to an implementor the need
1111  * to look within the INIT or INIT-ACK. Any implementation that
1112  * does not do this, may not be able to establish associations
1113  * in certain circumstances.
1114  *
1115  */
1116 static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
1117     struct sk_buff *skb,
1118     const union sctp_addr *laddr, struct sctp_transport **transportp)
1119 {
1120     struct sctp_association *asoc;
1121     union sctp_addr addr;
1122     union sctp_addr *paddr = &addr;
1123     struct sctphdr *sh = sctp_hdr(skb);
1124     union sctp_params params;
1125     struct sctp_init_chunk *init;
1126     struct sctp_af *af;
1127 
1128     /*
1129      * This code will NOT touch anything inside the chunk--it is
1130      * strictly READ-ONLY.
1131      *
1132      * RFC 2960 3  SCTP packet Format
1133      *
1134      * Multiple chunks can be bundled into one SCTP packet up to
1135      * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
1136      * COMPLETE chunks.  These chunks MUST NOT be bundled with any
1137      * other chunk in a packet.  See Section 6.10 for more details
1138      * on chunk bundling.
1139      */
1140 
1141     /* Find the start of the TLVs and the end of the chunk.  This is
1142      * the region we search for address parameters.
1143      */
1144     init = (struct sctp_init_chunk *)skb->data;
1145 
1146     /* Walk the parameters looking for embedded addresses. */
1147     sctp_walk_params(params, init, init_hdr.params) {
1148 
1149         /* Note: Ignoring hostname addresses. */
1150         af = sctp_get_af_specific(param_type2af(params.p->type));
1151         if (!af)
1152             continue;
1153 
1154         if (!af->from_addr_param(paddr, params.addr, sh->source, 0))
1155             continue;
1156 
1157         asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1158         if (asoc)
1159             return asoc;
1160     }
1161 
1162     return NULL;
1163 }
1164 
1165 /* ADD-IP, Section 5.2
1166  * When an endpoint receives an ASCONF Chunk from the remote peer
1167  * special procedures may be needed to identify the association the
1168  * ASCONF Chunk is associated with. To properly find the association
1169  * the following procedures SHOULD be followed:
1170  *
1171  * D2) If the association is not found, use the address found in the
1172  * Address Parameter TLV combined with the port number found in the
1173  * SCTP common header. If found proceed to rule D4.
1174  *
1175  * D2-ext) If more than one ASCONF Chunks are packed together, use the
1176  * address found in the ASCONF Address Parameter TLV of each of the
1177  * subsequent ASCONF Chunks. If found, proceed to rule D4.
1178  */
1179 static struct sctp_association *__sctp_rcv_asconf_lookup(
1180                     struct net *net,
1181                     struct sctp_chunkhdr *ch,
1182                     const union sctp_addr *laddr,
1183                     __be16 peer_port,
1184                     struct sctp_transport **transportp)
1185 {
1186     struct sctp_addip_chunk *asconf = (struct sctp_addip_chunk *)ch;
1187     struct sctp_af *af;
1188     union sctp_addr_param *param;
1189     union sctp_addr paddr;
1190 
1191     if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr))
1192         return NULL;
1193 
1194     /* Skip over the ADDIP header and find the Address parameter */
1195     param = (union sctp_addr_param *)(asconf + 1);
1196 
1197     af = sctp_get_af_specific(param_type2af(param->p.type));
1198     if (unlikely(!af))
1199         return NULL;
1200 
1201     if (!af->from_addr_param(&paddr, param, peer_port, 0))
1202         return NULL;
1203 
1204     return __sctp_lookup_association(net, laddr, &paddr, transportp);
1205 }
1206 
1207 
1208 /* SCTP-AUTH, Section 6.3:
1209 *    If the receiver does not find a STCB for a packet containing an AUTH
1210 *    chunk as the first chunk and not a COOKIE-ECHO chunk as the second
1211 *    chunk, it MUST use the chunks after the AUTH chunk to look up an existing
1212 *    association.
1213 *
1214 * This means that any chunks that can help us identify the association need
1215 * to be looked at to find this association.
1216 */
1217 static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
1218                       struct sk_buff *skb,
1219                       const union sctp_addr *laddr,
1220                       struct sctp_transport **transportp)
1221 {
1222     struct sctp_association *asoc = NULL;
1223     struct sctp_chunkhdr *ch;
1224     int have_auth = 0;
1225     unsigned int chunk_num = 1;
1226     __u8 *ch_end;
1227 
1228     /* Walk through the chunks looking for AUTH or ASCONF chunks
1229      * to help us find the association.
1230      */
1231     ch = (struct sctp_chunkhdr *)skb->data;
1232     do {
1233         /* Break out if chunk length is less then minimal. */
1234         if (ntohs(ch->length) < sizeof(*ch))
1235             break;
1236 
1237         ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
1238         if (ch_end > skb_tail_pointer(skb))
1239             break;
1240 
1241         switch (ch->type) {
1242         case SCTP_CID_AUTH:
1243             have_auth = chunk_num;
1244             break;
1245 
1246         case SCTP_CID_COOKIE_ECHO:
1247             /* If a packet arrives containing an AUTH chunk as
1248              * a first chunk, a COOKIE-ECHO chunk as the second
1249              * chunk, and possibly more chunks after them, and
1250              * the receiver does not have an STCB for that
1251              * packet, then authentication is based on
1252              * the contents of the COOKIE- ECHO chunk.
1253              */
1254             if (have_auth == 1 && chunk_num == 2)
1255                 return NULL;
1256             break;
1257 
1258         case SCTP_CID_ASCONF:
1259             if (have_auth || net->sctp.addip_noauth)
1260                 asoc = __sctp_rcv_asconf_lookup(
1261                         net, ch, laddr,
1262                         sctp_hdr(skb)->source,
1263                         transportp);
1264             break;
1265         default:
1266             break;
1267         }
1268 
1269         if (asoc)
1270             break;
1271 
1272         ch = (struct sctp_chunkhdr *)ch_end;
1273         chunk_num++;
1274     } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
1275 
1276     return asoc;
1277 }
1278 
1279 /*
1280  * There are circumstances when we need to look inside the SCTP packet
1281  * for information to help us find the association.   Examples
1282  * include looking inside of INIT/INIT-ACK chunks or after the AUTH
1283  * chunks.
1284  */
1285 static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1286                       struct sk_buff *skb,
1287                       const union sctp_addr *laddr,
1288                       struct sctp_transport **transportp)
1289 {
1290     struct sctp_chunkhdr *ch;
1291 
1292     /* We do not allow GSO frames here as we need to linearize and
1293      * then cannot guarantee frame boundaries. This shouldn't be an
1294      * issue as packets hitting this are mostly INIT or INIT-ACK and
1295      * those cannot be on GSO-style anyway.
1296      */
1297     if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
1298         return NULL;
1299 
1300     ch = (struct sctp_chunkhdr *)skb->data;
1301 
1302     /* The code below will attempt to walk the chunk and extract
1303      * parameter information.  Before we do that, we need to verify
1304      * that the chunk length doesn't cause overflow.  Otherwise, we'll
1305      * walk off the end.
1306      */
1307     if (SCTP_PAD4(ntohs(ch->length)) > skb->len)
1308         return NULL;
1309 
1310     /* If this is INIT/INIT-ACK look inside the chunk too. */
1311     if (ch->type == SCTP_CID_INIT || ch->type == SCTP_CID_INIT_ACK)
1312         return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
1313 
1314     return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
1315 }
1316 
1317 /* Lookup an association for an inbound skb. */
1318 static struct sctp_association *__sctp_rcv_lookup(struct net *net,
1319                       struct sk_buff *skb,
1320                       const union sctp_addr *paddr,
1321                       const union sctp_addr *laddr,
1322                       struct sctp_transport **transportp)
1323 {
1324     struct sctp_association *asoc;
1325 
1326     asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1327     if (asoc)
1328         goto out;
1329 
1330     /* Further lookup for INIT/INIT-ACK packets.
1331      * SCTP Implementors Guide, 2.18 Handling of address
1332      * parameters within the INIT or INIT-ACK.
1333      */
1334     asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
1335     if (asoc)
1336         goto out;
1337 
1338     if (paddr->sa.sa_family == AF_INET)
1339         pr_debug("sctp: asoc not found for src:%pI4:%d dst:%pI4:%d\n",
1340              &laddr->v4.sin_addr, ntohs(laddr->v4.sin_port),
1341              &paddr->v4.sin_addr, ntohs(paddr->v4.sin_port));
1342     else
1343         pr_debug("sctp: asoc not found for src:%pI6:%d dst:%pI6:%d\n",
1344              &laddr->v6.sin6_addr, ntohs(laddr->v6.sin6_port),
1345              &paddr->v6.sin6_addr, ntohs(paddr->v6.sin6_port));
1346 
1347 out:
1348     return asoc;
1349 }