Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* SCTP kernel implementation
0003  * (C) Copyright IBM Corp. 2001, 2004
0004  * Copyright (c) 1999-2000 Cisco, Inc.
0005  * Copyright (c) 1999-2001 Motorola, Inc.
0006  * Copyright (c) 2001 Intel Corp.
0007  * Copyright (c) 2001 Nokia, Inc.
0008  * Copyright (c) 2001 La Monte H.P. Yarroll
0009  *
0010  * This abstraction carries sctp events to the ULP (sockets).
0011  *
0012  * Please send any bug reports or fixes you make to the
0013  * email address(es):
0014  *    lksctp developers <linux-sctp@vger.kernel.org>
0015  *
0016  * Written or modified by:
0017  *    Jon Grimm             <jgrimm@us.ibm.com>
0018  *    La Monte H.P. Yarroll <piggy@acm.org>
0019  *    Sridhar Samudrala     <sri@us.ibm.com>
0020  */
0021 
0022 #include <linux/slab.h>
0023 #include <linux/types.h>
0024 #include <linux/skbuff.h>
0025 #include <net/sock.h>
0026 #include <net/busy_poll.h>
0027 #include <net/sctp/structs.h>
0028 #include <net/sctp/sctp.h>
0029 #include <net/sctp/sm.h>
0030 
0031 /* Forward declarations for internal helpers.  */
0032 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
0033                           struct sctp_ulpevent *);
0034 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
0035                           struct sctp_ulpevent *);
0036 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
0037 
0038 /* 1st Level Abstractions */
0039 
0040 /* Initialize a ULP queue from a block of memory.  */
0041 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
0042                  struct sctp_association *asoc)
0043 {
0044     memset(ulpq, 0, sizeof(struct sctp_ulpq));
0045 
0046     ulpq->asoc = asoc;
0047     skb_queue_head_init(&ulpq->reasm);
0048     skb_queue_head_init(&ulpq->reasm_uo);
0049     skb_queue_head_init(&ulpq->lobby);
0050     ulpq->pd_mode  = 0;
0051 
0052     return ulpq;
0053 }
0054 
0055 
0056 /* Flush the reassembly and ordering queues.  */
0057 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
0058 {
0059     struct sk_buff *skb;
0060     struct sctp_ulpevent *event;
0061 
0062     while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
0063         event = sctp_skb2event(skb);
0064         sctp_ulpevent_free(event);
0065     }
0066 
0067     while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
0068         event = sctp_skb2event(skb);
0069         sctp_ulpevent_free(event);
0070     }
0071 
0072     while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
0073         event = sctp_skb2event(skb);
0074         sctp_ulpevent_free(event);
0075     }
0076 }
0077 
0078 /* Dispose of a ulpqueue.  */
0079 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
0080 {
0081     sctp_ulpq_flush(ulpq);
0082 }
0083 
0084 /* Process an incoming DATA chunk.  */
0085 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
0086             gfp_t gfp)
0087 {
0088     struct sk_buff_head temp;
0089     struct sctp_ulpevent *event;
0090     int event_eor = 0;
0091 
0092     /* Create an event from the incoming chunk. */
0093     event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
0094     if (!event)
0095         return -ENOMEM;
0096 
0097     event->ssn = ntohs(chunk->subh.data_hdr->ssn);
0098     event->ppid = chunk->subh.data_hdr->ppid;
0099 
0100     /* Do reassembly if needed.  */
0101     event = sctp_ulpq_reasm(ulpq, event);
0102 
0103     /* Do ordering if needed.  */
0104     if (event) {
0105         /* Create a temporary list to collect chunks on.  */
0106         skb_queue_head_init(&temp);
0107         __skb_queue_tail(&temp, sctp_event2skb(event));
0108 
0109         if (event->msg_flags & MSG_EOR)
0110             event = sctp_ulpq_order(ulpq, event);
0111     }
0112 
0113     /* Send event to the ULP.  'event' is the sctp_ulpevent for
0114      * very first SKB on the 'temp' list.
0115      */
0116     if (event) {
0117         event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
0118         sctp_ulpq_tail_event(ulpq, &temp);
0119     }
0120 
0121     return event_eor;
0122 }
0123 
0124 /* Add a new event for propagation to the ULP.  */
0125 /* Clear the partial delivery mode for this socket.   Note: This
0126  * assumes that no association is currently in partial delivery mode.
0127  */
0128 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
0129 {
0130     struct sctp_sock *sp = sctp_sk(sk);
0131 
0132     if (atomic_dec_and_test(&sp->pd_mode)) {
0133         /* This means there are no other associations in PD, so
0134          * we can go ahead and clear out the lobby in one shot
0135          */
0136         if (!skb_queue_empty(&sp->pd_lobby)) {
0137             skb_queue_splice_tail_init(&sp->pd_lobby,
0138                            &sk->sk_receive_queue);
0139             return 1;
0140         }
0141     } else {
0142         /* There are other associations in PD, so we only need to
0143          * pull stuff out of the lobby that belongs to the
0144          * associations that is exiting PD (all of its notifications
0145          * are posted here).
0146          */
0147         if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
0148             struct sk_buff *skb, *tmp;
0149             struct sctp_ulpevent *event;
0150 
0151             sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
0152                 event = sctp_skb2event(skb);
0153                 if (event->asoc == asoc) {
0154                     __skb_unlink(skb, &sp->pd_lobby);
0155                     __skb_queue_tail(&sk->sk_receive_queue,
0156                              skb);
0157                 }
0158             }
0159         }
0160     }
0161 
0162     return 0;
0163 }
0164 
0165 /* Set the pd_mode on the socket and ulpq */
0166 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
0167 {
0168     struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
0169 
0170     atomic_inc(&sp->pd_mode);
0171     ulpq->pd_mode = 1;
0172 }
0173 
0174 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
0175 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
0176 {
0177     ulpq->pd_mode = 0;
0178     sctp_ulpq_reasm_drain(ulpq);
0179     return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
0180 }
0181 
0182 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
0183 {
0184     struct sock *sk = ulpq->asoc->base.sk;
0185     struct sctp_sock *sp = sctp_sk(sk);
0186     struct sctp_ulpevent *event;
0187     struct sk_buff_head *queue;
0188     struct sk_buff *skb;
0189     int clear_pd = 0;
0190 
0191     skb = __skb_peek(skb_list);
0192     event = sctp_skb2event(skb);
0193 
0194     /* If the socket is just going to throw this away, do not
0195      * even try to deliver it.
0196      */
0197     if (sk->sk_shutdown & RCV_SHUTDOWN &&
0198         (sk->sk_shutdown & SEND_SHUTDOWN ||
0199          !sctp_ulpevent_is_notification(event)))
0200         goto out_free;
0201 
0202     if (!sctp_ulpevent_is_notification(event)) {
0203         sk_mark_napi_id(sk, skb);
0204         sk_incoming_cpu_update(sk);
0205     }
0206     /* Check if the user wishes to receive this event.  */
0207     if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
0208         goto out_free;
0209 
0210     /* If we are in partial delivery mode, post to the lobby until
0211      * partial delivery is cleared, unless, of course _this_ is
0212      * the association the cause of the partial delivery.
0213      */
0214 
0215     if (atomic_read(&sp->pd_mode) == 0) {
0216         queue = &sk->sk_receive_queue;
0217     } else {
0218         if (ulpq->pd_mode) {
0219             /* If the association is in partial delivery, we
0220              * need to finish delivering the partially processed
0221              * packet before passing any other data.  This is
0222              * because we don't truly support stream interleaving.
0223              */
0224             if ((event->msg_flags & MSG_NOTIFICATION) ||
0225                 (SCTP_DATA_NOT_FRAG ==
0226                     (event->msg_flags & SCTP_DATA_FRAG_MASK)))
0227                 queue = &sp->pd_lobby;
0228             else {
0229                 clear_pd = event->msg_flags & MSG_EOR;
0230                 queue = &sk->sk_receive_queue;
0231             }
0232         } else {
0233             /*
0234              * If fragment interleave is enabled, we
0235              * can queue this to the receive queue instead
0236              * of the lobby.
0237              */
0238             if (sp->frag_interleave)
0239                 queue = &sk->sk_receive_queue;
0240             else
0241                 queue = &sp->pd_lobby;
0242         }
0243     }
0244 
0245     skb_queue_splice_tail_init(skb_list, queue);
0246 
0247     /* Did we just complete partial delivery and need to get
0248      * rolling again?  Move pending data to the receive
0249      * queue.
0250      */
0251     if (clear_pd)
0252         sctp_ulpq_clear_pd(ulpq);
0253 
0254     if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
0255         if (!sock_owned_by_user(sk))
0256             sp->data_ready_signalled = 1;
0257         sk->sk_data_ready(sk);
0258     }
0259     return 1;
0260 
0261 out_free:
0262     if (skb_list)
0263         sctp_queue_purge_ulpevents(skb_list);
0264     else
0265         sctp_ulpevent_free(event);
0266 
0267     return 0;
0268 }
0269 
0270 /* 2nd Level Abstractions */
0271 
0272 /* Helper function to store chunks that need to be reassembled.  */
0273 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
0274                      struct sctp_ulpevent *event)
0275 {
0276     struct sk_buff *pos;
0277     struct sctp_ulpevent *cevent;
0278     __u32 tsn, ctsn;
0279 
0280     tsn = event->tsn;
0281 
0282     /* See if it belongs at the end. */
0283     pos = skb_peek_tail(&ulpq->reasm);
0284     if (!pos) {
0285         __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
0286         return;
0287     }
0288 
0289     /* Short circuit just dropping it at the end. */
0290     cevent = sctp_skb2event(pos);
0291     ctsn = cevent->tsn;
0292     if (TSN_lt(ctsn, tsn)) {
0293         __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
0294         return;
0295     }
0296 
0297     /* Find the right place in this list. We store them by TSN.  */
0298     skb_queue_walk(&ulpq->reasm, pos) {
0299         cevent = sctp_skb2event(pos);
0300         ctsn = cevent->tsn;
0301 
0302         if (TSN_lt(tsn, ctsn))
0303             break;
0304     }
0305 
0306     /* Insert before pos. */
0307     __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
0308 
0309 }
0310 
0311 /* Helper function to return an event corresponding to the reassembled
0312  * datagram.
0313  * This routine creates a re-assembled skb given the first and last skb's
0314  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
0315  * payload was fragmented on the way and ip had to reassemble them.
0316  * We add the rest of skb's to the first skb's fraglist.
0317  */
0318 struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
0319                           struct sk_buff_head *queue,
0320                           struct sk_buff *f_frag,
0321                           struct sk_buff *l_frag)
0322 {
0323     struct sk_buff *pos;
0324     struct sk_buff *new = NULL;
0325     struct sctp_ulpevent *event;
0326     struct sk_buff *pnext, *last;
0327     struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
0328 
0329     /* Store the pointer to the 2nd skb */
0330     if (f_frag == l_frag)
0331         pos = NULL;
0332     else
0333         pos = f_frag->next;
0334 
0335     /* Get the last skb in the f_frag's frag_list if present. */
0336     for (last = list; list; last = list, list = list->next)
0337         ;
0338 
0339     /* Add the list of remaining fragments to the first fragments
0340      * frag_list.
0341      */
0342     if (last)
0343         last->next = pos;
0344     else {
0345         if (skb_cloned(f_frag)) {
0346             /* This is a cloned skb, we can't just modify
0347              * the frag_list.  We need a new skb to do that.
0348              * Instead of calling skb_unshare(), we'll do it
0349              * ourselves since we need to delay the free.
0350              */
0351             new = skb_copy(f_frag, GFP_ATOMIC);
0352             if (!new)
0353                 return NULL;    /* try again later */
0354 
0355             sctp_skb_set_owner_r(new, f_frag->sk);
0356 
0357             skb_shinfo(new)->frag_list = pos;
0358         } else
0359             skb_shinfo(f_frag)->frag_list = pos;
0360     }
0361 
0362     /* Remove the first fragment from the reassembly queue.  */
0363     __skb_unlink(f_frag, queue);
0364 
0365     /* if we did unshare, then free the old skb and re-assign */
0366     if (new) {
0367         kfree_skb(f_frag);
0368         f_frag = new;
0369     }
0370 
0371     while (pos) {
0372 
0373         pnext = pos->next;
0374 
0375         /* Update the len and data_len fields of the first fragment. */
0376         f_frag->len += pos->len;
0377         f_frag->data_len += pos->len;
0378 
0379         /* Remove the fragment from the reassembly queue.  */
0380         __skb_unlink(pos, queue);
0381 
0382         /* Break if we have reached the last fragment.  */
0383         if (pos == l_frag)
0384             break;
0385         pos->next = pnext;
0386         pos = pnext;
0387     }
0388 
0389     event = sctp_skb2event(f_frag);
0390     SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
0391 
0392     return event;
0393 }
0394 
0395 
0396 /* Helper function to check if an incoming chunk has filled up the last
0397  * missing fragment in a SCTP datagram and return the corresponding event.
0398  */
0399 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
0400 {
0401     struct sk_buff *pos;
0402     struct sctp_ulpevent *cevent;
0403     struct sk_buff *first_frag = NULL;
0404     __u32 ctsn, next_tsn;
0405     struct sctp_ulpevent *retval = NULL;
0406     struct sk_buff *pd_first = NULL;
0407     struct sk_buff *pd_last = NULL;
0408     size_t pd_len = 0;
0409     struct sctp_association *asoc;
0410     u32 pd_point;
0411 
0412     /* Initialized to 0 just to avoid compiler warning message.  Will
0413      * never be used with this value. It is referenced only after it
0414      * is set when we find the first fragment of a message.
0415      */
0416     next_tsn = 0;
0417 
0418     /* The chunks are held in the reasm queue sorted by TSN.
0419      * Walk through the queue sequentially and look for a sequence of
0420      * fragmented chunks that complete a datagram.
0421      * 'first_frag' and next_tsn are reset when we find a chunk which
0422      * is the first fragment of a datagram. Once these 2 fields are set
0423      * we expect to find the remaining middle fragments and the last
0424      * fragment in order. If not, first_frag is reset to NULL and we
0425      * start the next pass when we find another first fragment.
0426      *
0427      * There is a potential to do partial delivery if user sets
0428      * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
0429      * to see if can do PD.
0430      */
0431     skb_queue_walk(&ulpq->reasm, pos) {
0432         cevent = sctp_skb2event(pos);
0433         ctsn = cevent->tsn;
0434 
0435         switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0436         case SCTP_DATA_FIRST_FRAG:
0437             /* If this "FIRST_FRAG" is the first
0438              * element in the queue, then count it towards
0439              * possible PD.
0440              */
0441             if (skb_queue_is_first(&ulpq->reasm, pos)) {
0442                 pd_first = pos;
0443                 pd_last = pos;
0444                 pd_len = pos->len;
0445             } else {
0446                 pd_first = NULL;
0447                 pd_last = NULL;
0448                 pd_len = 0;
0449             }
0450 
0451             first_frag = pos;
0452             next_tsn = ctsn + 1;
0453             break;
0454 
0455         case SCTP_DATA_MIDDLE_FRAG:
0456             if ((first_frag) && (ctsn == next_tsn)) {
0457                 next_tsn++;
0458                 if (pd_first) {
0459                     pd_last = pos;
0460                     pd_len += pos->len;
0461                 }
0462             } else
0463                 first_frag = NULL;
0464             break;
0465 
0466         case SCTP_DATA_LAST_FRAG:
0467             if (first_frag && (ctsn == next_tsn))
0468                 goto found;
0469             else
0470                 first_frag = NULL;
0471             break;
0472         }
0473     }
0474 
0475     asoc = ulpq->asoc;
0476     if (pd_first) {
0477         /* Make sure we can enter partial deliver.
0478          * We can trigger partial delivery only if framgent
0479          * interleave is set, or the socket is not already
0480          * in  partial delivery.
0481          */
0482         if (!sctp_sk(asoc->base.sk)->frag_interleave &&
0483             atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
0484             goto done;
0485 
0486         cevent = sctp_skb2event(pd_first);
0487         pd_point = sctp_sk(asoc->base.sk)->pd_point;
0488         if (pd_point && pd_point <= pd_len) {
0489             retval = sctp_make_reassembled_event(asoc->base.net,
0490                                  &ulpq->reasm,
0491                                  pd_first, pd_last);
0492             if (retval)
0493                 sctp_ulpq_set_pd(ulpq);
0494         }
0495     }
0496 done:
0497     return retval;
0498 found:
0499     retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
0500                          &ulpq->reasm, first_frag, pos);
0501     if (retval)
0502         retval->msg_flags |= MSG_EOR;
0503     goto done;
0504 }
0505 
0506 /* Retrieve the next set of fragments of a partial message. */
0507 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
0508 {
0509     struct sk_buff *pos, *last_frag, *first_frag;
0510     struct sctp_ulpevent *cevent;
0511     __u32 ctsn, next_tsn;
0512     int is_last;
0513     struct sctp_ulpevent *retval;
0514 
0515     /* The chunks are held in the reasm queue sorted by TSN.
0516      * Walk through the queue sequentially and look for the first
0517      * sequence of fragmented chunks.
0518      */
0519 
0520     if (skb_queue_empty(&ulpq->reasm))
0521         return NULL;
0522 
0523     last_frag = first_frag = NULL;
0524     retval = NULL;
0525     next_tsn = 0;
0526     is_last = 0;
0527 
0528     skb_queue_walk(&ulpq->reasm, pos) {
0529         cevent = sctp_skb2event(pos);
0530         ctsn = cevent->tsn;
0531 
0532         switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0533         case SCTP_DATA_FIRST_FRAG:
0534             if (!first_frag)
0535                 return NULL;
0536             goto done;
0537         case SCTP_DATA_MIDDLE_FRAG:
0538             if (!first_frag) {
0539                 first_frag = pos;
0540                 next_tsn = ctsn + 1;
0541                 last_frag = pos;
0542             } else if (next_tsn == ctsn) {
0543                 next_tsn++;
0544                 last_frag = pos;
0545             } else
0546                 goto done;
0547             break;
0548         case SCTP_DATA_LAST_FRAG:
0549             if (!first_frag)
0550                 first_frag = pos;
0551             else if (ctsn != next_tsn)
0552                 goto done;
0553             last_frag = pos;
0554             is_last = 1;
0555             goto done;
0556         default:
0557             return NULL;
0558         }
0559     }
0560 
0561     /* We have the reassembled event. There is no need to look
0562      * further.
0563      */
0564 done:
0565     retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
0566                          first_frag, last_frag);
0567     if (retval && is_last)
0568         retval->msg_flags |= MSG_EOR;
0569 
0570     return retval;
0571 }
0572 
0573 
0574 /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
0575  * need reassembling.
0576  */
0577 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
0578                         struct sctp_ulpevent *event)
0579 {
0580     struct sctp_ulpevent *retval = NULL;
0581 
0582     /* Check if this is part of a fragmented message.  */
0583     if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
0584         event->msg_flags |= MSG_EOR;
0585         return event;
0586     }
0587 
0588     sctp_ulpq_store_reasm(ulpq, event);
0589     if (!ulpq->pd_mode)
0590         retval = sctp_ulpq_retrieve_reassembled(ulpq);
0591     else {
0592         __u32 ctsn, ctsnap;
0593 
0594         /* Do not even bother unless this is the next tsn to
0595          * be delivered.
0596          */
0597         ctsn = event->tsn;
0598         ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
0599         if (TSN_lte(ctsn, ctsnap))
0600             retval = sctp_ulpq_retrieve_partial(ulpq);
0601     }
0602 
0603     return retval;
0604 }
0605 
0606 /* Retrieve the first part (sequential fragments) for partial delivery.  */
0607 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
0608 {
0609     struct sk_buff *pos, *last_frag, *first_frag;
0610     struct sctp_ulpevent *cevent;
0611     __u32 ctsn, next_tsn;
0612     struct sctp_ulpevent *retval;
0613 
0614     /* The chunks are held in the reasm queue sorted by TSN.
0615      * Walk through the queue sequentially and look for a sequence of
0616      * fragmented chunks that start a datagram.
0617      */
0618 
0619     if (skb_queue_empty(&ulpq->reasm))
0620         return NULL;
0621 
0622     last_frag = first_frag = NULL;
0623     retval = NULL;
0624     next_tsn = 0;
0625 
0626     skb_queue_walk(&ulpq->reasm, pos) {
0627         cevent = sctp_skb2event(pos);
0628         ctsn = cevent->tsn;
0629 
0630         switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0631         case SCTP_DATA_FIRST_FRAG:
0632             if (!first_frag) {
0633                 first_frag = pos;
0634                 next_tsn = ctsn + 1;
0635                 last_frag = pos;
0636             } else
0637                 goto done;
0638             break;
0639 
0640         case SCTP_DATA_MIDDLE_FRAG:
0641             if (!first_frag)
0642                 return NULL;
0643             if (ctsn == next_tsn) {
0644                 next_tsn++;
0645                 last_frag = pos;
0646             } else
0647                 goto done;
0648             break;
0649 
0650         case SCTP_DATA_LAST_FRAG:
0651             if (!first_frag)
0652                 return NULL;
0653             else
0654                 goto done;
0655             break;
0656 
0657         default:
0658             return NULL;
0659         }
0660     }
0661 
0662     /* We have the reassembled event. There is no need to look
0663      * further.
0664      */
0665 done:
0666     retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
0667                          first_frag, last_frag);
0668     return retval;
0669 }
0670 
0671 /*
0672  * Flush out stale fragments from the reassembly queue when processing
0673  * a Forward TSN.
0674  *
0675  * RFC 3758, Section 3.6
0676  *
0677  * After receiving and processing a FORWARD TSN, the data receiver MUST
0678  * take cautions in updating its re-assembly queue.  The receiver MUST
0679  * remove any partially reassembled message, which is still missing one
0680  * or more TSNs earlier than or equal to the new cumulative TSN point.
0681  * In the event that the receiver has invoked the partial delivery API,
0682  * a notification SHOULD also be generated to inform the upper layer API
0683  * that the message being partially delivered will NOT be completed.
0684  */
0685 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
0686 {
0687     struct sk_buff *pos, *tmp;
0688     struct sctp_ulpevent *event;
0689     __u32 tsn;
0690 
0691     if (skb_queue_empty(&ulpq->reasm))
0692         return;
0693 
0694     skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
0695         event = sctp_skb2event(pos);
0696         tsn = event->tsn;
0697 
0698         /* Since the entire message must be abandoned by the
0699          * sender (item A3 in Section 3.5, RFC 3758), we can
0700          * free all fragments on the list that are less then
0701          * or equal to ctsn_point
0702          */
0703         if (TSN_lte(tsn, fwd_tsn)) {
0704             __skb_unlink(pos, &ulpq->reasm);
0705             sctp_ulpevent_free(event);
0706         } else
0707             break;
0708     }
0709 }
0710 
0711 /*
0712  * Drain the reassembly queue.  If we just cleared parted delivery, it
0713  * is possible that the reassembly queue will contain already reassembled
0714  * messages.  Retrieve any such messages and give them to the user.
0715  */
0716 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
0717 {
0718     struct sctp_ulpevent *event = NULL;
0719 
0720     if (skb_queue_empty(&ulpq->reasm))
0721         return;
0722 
0723     while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
0724         struct sk_buff_head temp;
0725 
0726         skb_queue_head_init(&temp);
0727         __skb_queue_tail(&temp, sctp_event2skb(event));
0728 
0729         /* Do ordering if needed.  */
0730         if (event->msg_flags & MSG_EOR)
0731             event = sctp_ulpq_order(ulpq, event);
0732 
0733         /* Send event to the ULP.  'event' is the
0734          * sctp_ulpevent for  very first SKB on the  temp' list.
0735          */
0736         if (event)
0737             sctp_ulpq_tail_event(ulpq, &temp);
0738     }
0739 }
0740 
0741 
0742 /* Helper function to gather skbs that have possibly become
0743  * ordered by an incoming chunk.
0744  */
0745 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
0746                           struct sctp_ulpevent *event)
0747 {
0748     struct sk_buff_head *event_list;
0749     struct sk_buff *pos, *tmp;
0750     struct sctp_ulpevent *cevent;
0751     struct sctp_stream *stream;
0752     __u16 sid, csid, cssn;
0753 
0754     sid = event->stream;
0755     stream  = &ulpq->asoc->stream;
0756 
0757     event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
0758 
0759     /* We are holding the chunks by stream, by SSN.  */
0760     sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
0761         cevent = (struct sctp_ulpevent *) pos->cb;
0762         csid = cevent->stream;
0763         cssn = cevent->ssn;
0764 
0765         /* Have we gone too far?  */
0766         if (csid > sid)
0767             break;
0768 
0769         /* Have we not gone far enough?  */
0770         if (csid < sid)
0771             continue;
0772 
0773         if (cssn != sctp_ssn_peek(stream, in, sid))
0774             break;
0775 
0776         /* Found it, so mark in the stream. */
0777         sctp_ssn_next(stream, in, sid);
0778 
0779         __skb_unlink(pos, &ulpq->lobby);
0780 
0781         /* Attach all gathered skbs to the event.  */
0782         __skb_queue_tail(event_list, pos);
0783     }
0784 }
0785 
0786 /* Helper function to store chunks needing ordering.  */
0787 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
0788                        struct sctp_ulpevent *event)
0789 {
0790     struct sk_buff *pos;
0791     struct sctp_ulpevent *cevent;
0792     __u16 sid, csid;
0793     __u16 ssn, cssn;
0794 
0795     pos = skb_peek_tail(&ulpq->lobby);
0796     if (!pos) {
0797         __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0798         return;
0799     }
0800 
0801     sid = event->stream;
0802     ssn = event->ssn;
0803 
0804     cevent = (struct sctp_ulpevent *) pos->cb;
0805     csid = cevent->stream;
0806     cssn = cevent->ssn;
0807     if (sid > csid) {
0808         __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0809         return;
0810     }
0811 
0812     if ((sid == csid) && SSN_lt(cssn, ssn)) {
0813         __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0814         return;
0815     }
0816 
0817     /* Find the right place in this list.  We store them by
0818      * stream ID and then by SSN.
0819      */
0820     skb_queue_walk(&ulpq->lobby, pos) {
0821         cevent = (struct sctp_ulpevent *) pos->cb;
0822         csid = cevent->stream;
0823         cssn = cevent->ssn;
0824 
0825         if (csid > sid)
0826             break;
0827         if (csid == sid && SSN_lt(ssn, cssn))
0828             break;
0829     }
0830 
0831 
0832     /* Insert before pos. */
0833     __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
0834 }
0835 
0836 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
0837                          struct sctp_ulpevent *event)
0838 {
0839     __u16 sid, ssn;
0840     struct sctp_stream *stream;
0841 
0842     /* Check if this message needs ordering.  */
0843     if (event->msg_flags & SCTP_DATA_UNORDERED)
0844         return event;
0845 
0846     /* Note: The stream ID must be verified before this routine.  */
0847     sid = event->stream;
0848     ssn = event->ssn;
0849     stream  = &ulpq->asoc->stream;
0850 
0851     /* Is this the expected SSN for this stream ID?  */
0852     if (ssn != sctp_ssn_peek(stream, in, sid)) {
0853         /* We've received something out of order, so find where it
0854          * needs to be placed.  We order by stream and then by SSN.
0855          */
0856         sctp_ulpq_store_ordered(ulpq, event);
0857         return NULL;
0858     }
0859 
0860     /* Mark that the next chunk has been found.  */
0861     sctp_ssn_next(stream, in, sid);
0862 
0863     /* Go find any other chunks that were waiting for
0864      * ordering.
0865      */
0866     sctp_ulpq_retrieve_ordered(ulpq, event);
0867 
0868     return event;
0869 }
0870 
0871 /* Helper function to gather skbs that have possibly become
0872  * ordered by forward tsn skipping their dependencies.
0873  */
0874 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
0875 {
0876     struct sk_buff *pos, *tmp;
0877     struct sctp_ulpevent *cevent;
0878     struct sctp_ulpevent *event;
0879     struct sctp_stream *stream;
0880     struct sk_buff_head temp;
0881     struct sk_buff_head *lobby = &ulpq->lobby;
0882     __u16 csid, cssn;
0883 
0884     stream = &ulpq->asoc->stream;
0885 
0886     /* We are holding the chunks by stream, by SSN.  */
0887     skb_queue_head_init(&temp);
0888     event = NULL;
0889     sctp_skb_for_each(pos, lobby, tmp) {
0890         cevent = (struct sctp_ulpevent *) pos->cb;
0891         csid = cevent->stream;
0892         cssn = cevent->ssn;
0893 
0894         /* Have we gone too far?  */
0895         if (csid > sid)
0896             break;
0897 
0898         /* Have we not gone far enough?  */
0899         if (csid < sid)
0900             continue;
0901 
0902         /* see if this ssn has been marked by skipping */
0903         if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
0904             break;
0905 
0906         __skb_unlink(pos, lobby);
0907         if (!event)
0908             /* Create a temporary list to collect chunks on.  */
0909             event = sctp_skb2event(pos);
0910 
0911         /* Attach all gathered skbs to the event.  */
0912         __skb_queue_tail(&temp, pos);
0913     }
0914 
0915     /* If we didn't reap any data, see if the next expected SSN
0916      * is next on the queue and if so, use that.
0917      */
0918     if (event == NULL && pos != (struct sk_buff *)lobby) {
0919         cevent = (struct sctp_ulpevent *) pos->cb;
0920         csid = cevent->stream;
0921         cssn = cevent->ssn;
0922 
0923         if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
0924             sctp_ssn_next(stream, in, csid);
0925             __skb_unlink(pos, lobby);
0926             __skb_queue_tail(&temp, pos);
0927             event = sctp_skb2event(pos);
0928         }
0929     }
0930 
0931     /* Send event to the ULP.  'event' is the sctp_ulpevent for
0932      * very first SKB on the 'temp' list.
0933      */
0934     if (event) {
0935         /* see if we have more ordered that we can deliver */
0936         sctp_ulpq_retrieve_ordered(ulpq, event);
0937         sctp_ulpq_tail_event(ulpq, &temp);
0938     }
0939 }
0940 
0941 /* Skip over an SSN. This is used during the processing of
0942  * Forwared TSN chunk to skip over the abandoned ordered data
0943  */
0944 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
0945 {
0946     struct sctp_stream *stream;
0947 
0948     /* Note: The stream ID must be verified before this routine.  */
0949     stream  = &ulpq->asoc->stream;
0950 
0951     /* Is this an old SSN?  If so ignore. */
0952     if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
0953         return;
0954 
0955     /* Mark that we are no longer expecting this SSN or lower. */
0956     sctp_ssn_skip(stream, in, sid, ssn);
0957 
0958     /* Go find any other chunks that were waiting for
0959      * ordering and deliver them if needed.
0960      */
0961     sctp_ulpq_reap_ordered(ulpq, sid);
0962 }
0963 
0964 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
0965                 __u16 needed)
0966 {
0967     __u16 freed = 0;
0968     __u32 tsn, last_tsn;
0969     struct sk_buff *skb, *flist, *last;
0970     struct sctp_ulpevent *event;
0971     struct sctp_tsnmap *tsnmap;
0972 
0973     tsnmap = &ulpq->asoc->peer.tsn_map;
0974 
0975     while ((skb = skb_peek_tail(list)) != NULL) {
0976         event = sctp_skb2event(skb);
0977         tsn = event->tsn;
0978 
0979         /* Don't renege below the Cumulative TSN ACK Point. */
0980         if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
0981             break;
0982 
0983         /* Events in ordering queue may have multiple fragments
0984          * corresponding to additional TSNs.  Sum the total
0985          * freed space; find the last TSN.
0986          */
0987         freed += skb_headlen(skb);
0988         flist = skb_shinfo(skb)->frag_list;
0989         for (last = flist; flist; flist = flist->next) {
0990             last = flist;
0991             freed += skb_headlen(last);
0992         }
0993         if (last)
0994             last_tsn = sctp_skb2event(last)->tsn;
0995         else
0996             last_tsn = tsn;
0997 
0998         /* Unlink the event, then renege all applicable TSNs. */
0999         __skb_unlink(skb, list);
1000         sctp_ulpevent_free(event);
1001         while (TSN_lte(tsn, last_tsn)) {
1002             sctp_tsnmap_renege(tsnmap, tsn);
1003             tsn++;
1004         }
1005         if (freed >= needed)
1006             return freed;
1007     }
1008 
1009     return freed;
1010 }
1011 
1012 /* Renege 'needed' bytes from the ordering queue. */
1013 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1014 {
1015     return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1016 }
1017 
1018 /* Renege 'needed' bytes from the reassembly queue. */
1019 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1020 {
1021     return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1022 }
1023 
1024 /* Partial deliver the first message as there is pressure on rwnd. */
1025 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1026                 gfp_t gfp)
1027 {
1028     struct sctp_ulpevent *event;
1029     struct sctp_association *asoc;
1030     struct sctp_sock *sp;
1031     __u32 ctsn;
1032     struct sk_buff *skb;
1033 
1034     asoc = ulpq->asoc;
1035     sp = sctp_sk(asoc->base.sk);
1036 
1037     /* If the association is already in Partial Delivery mode
1038      * we have nothing to do.
1039      */
1040     if (ulpq->pd_mode)
1041         return;
1042 
1043     /* Data must be at or below the Cumulative TSN ACK Point to
1044      * start partial delivery.
1045      */
1046     skb = skb_peek(&asoc->ulpq.reasm);
1047     if (skb != NULL) {
1048         ctsn = sctp_skb2event(skb)->tsn;
1049         if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1050             return;
1051     }
1052 
1053     /* If the user enabled fragment interleave socket option,
1054      * multiple associations can enter partial delivery.
1055      * Otherwise, we can only enter partial delivery if the
1056      * socket is not in partial deliver mode.
1057      */
1058     if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1059         /* Is partial delivery possible?  */
1060         event = sctp_ulpq_retrieve_first(ulpq);
1061         /* Send event to the ULP.   */
1062         if (event) {
1063             struct sk_buff_head temp;
1064 
1065             skb_queue_head_init(&temp);
1066             __skb_queue_tail(&temp, sctp_event2skb(event));
1067             sctp_ulpq_tail_event(ulpq, &temp);
1068             sctp_ulpq_set_pd(ulpq);
1069             return;
1070         }
1071     }
1072 }
1073 
1074 /* Renege some packets to make room for an incoming chunk.  */
1075 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1076               gfp_t gfp)
1077 {
1078     struct sctp_association *asoc = ulpq->asoc;
1079     __u32 freed = 0;
1080     __u16 needed;
1081 
1082     needed = ntohs(chunk->chunk_hdr->length) -
1083          sizeof(struct sctp_data_chunk);
1084 
1085     if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1086         freed = sctp_ulpq_renege_order(ulpq, needed);
1087         if (freed < needed)
1088             freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1089     }
1090     /* If able to free enough room, accept this chunk. */
1091     if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1092         freed >= needed) {
1093         int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1094         /*
1095          * Enter partial delivery if chunk has not been
1096          * delivered; otherwise, drain the reassembly queue.
1097          */
1098         if (retval <= 0)
1099             sctp_ulpq_partial_delivery(ulpq, gfp);
1100         else if (retval == 1)
1101             sctp_ulpq_reasm_drain(ulpq);
1102     }
1103 }
1104 
1105 /* Notify the application if an association is aborted and in
1106  * partial delivery mode.  Send up any pending received messages.
1107  */
1108 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1109 {
1110     struct sctp_ulpevent *ev = NULL;
1111     struct sctp_sock *sp;
1112     struct sock *sk;
1113 
1114     if (!ulpq->pd_mode)
1115         return;
1116 
1117     sk = ulpq->asoc->base.sk;
1118     sp = sctp_sk(sk);
1119     if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1120                        SCTP_PARTIAL_DELIVERY_EVENT))
1121         ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1122                           SCTP_PARTIAL_DELIVERY_ABORTED,
1123                           0, 0, 0, gfp);
1124     if (ev)
1125         __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1126 
1127     /* If there is data waiting, send it up the socket now. */
1128     if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1129         sp->data_ready_signalled = 1;
1130         sk->sk_data_ready(sk);
1131     }
1132 }