0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #include <linux/slab.h>
0023 #include <linux/types.h>
0024 #include <linux/skbuff.h>
0025 #include <net/sock.h>
0026 #include <net/busy_poll.h>
0027 #include <net/sctp/structs.h>
0028 #include <net/sctp/sctp.h>
0029 #include <net/sctp/sm.h>
0030
0031
0032 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
0033 struct sctp_ulpevent *);
0034 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
0035 struct sctp_ulpevent *);
0036 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
0037
0038
0039
0040
0041 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
0042 struct sctp_association *asoc)
0043 {
0044 memset(ulpq, 0, sizeof(struct sctp_ulpq));
0045
0046 ulpq->asoc = asoc;
0047 skb_queue_head_init(&ulpq->reasm);
0048 skb_queue_head_init(&ulpq->reasm_uo);
0049 skb_queue_head_init(&ulpq->lobby);
0050 ulpq->pd_mode = 0;
0051
0052 return ulpq;
0053 }
0054
0055
0056
0057 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
0058 {
0059 struct sk_buff *skb;
0060 struct sctp_ulpevent *event;
0061
0062 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
0063 event = sctp_skb2event(skb);
0064 sctp_ulpevent_free(event);
0065 }
0066
0067 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
0068 event = sctp_skb2event(skb);
0069 sctp_ulpevent_free(event);
0070 }
0071
0072 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
0073 event = sctp_skb2event(skb);
0074 sctp_ulpevent_free(event);
0075 }
0076 }
0077
0078
0079 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
0080 {
0081 sctp_ulpq_flush(ulpq);
0082 }
0083
0084
0085 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
0086 gfp_t gfp)
0087 {
0088 struct sk_buff_head temp;
0089 struct sctp_ulpevent *event;
0090 int event_eor = 0;
0091
0092
0093 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
0094 if (!event)
0095 return -ENOMEM;
0096
0097 event->ssn = ntohs(chunk->subh.data_hdr->ssn);
0098 event->ppid = chunk->subh.data_hdr->ppid;
0099
0100
0101 event = sctp_ulpq_reasm(ulpq, event);
0102
0103
0104 if (event) {
0105
0106 skb_queue_head_init(&temp);
0107 __skb_queue_tail(&temp, sctp_event2skb(event));
0108
0109 if (event->msg_flags & MSG_EOR)
0110 event = sctp_ulpq_order(ulpq, event);
0111 }
0112
0113
0114
0115
0116 if (event) {
0117 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
0118 sctp_ulpq_tail_event(ulpq, &temp);
0119 }
0120
0121 return event_eor;
0122 }
0123
0124
0125
0126
0127
0128 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
0129 {
0130 struct sctp_sock *sp = sctp_sk(sk);
0131
0132 if (atomic_dec_and_test(&sp->pd_mode)) {
0133
0134
0135
0136 if (!skb_queue_empty(&sp->pd_lobby)) {
0137 skb_queue_splice_tail_init(&sp->pd_lobby,
0138 &sk->sk_receive_queue);
0139 return 1;
0140 }
0141 } else {
0142
0143
0144
0145
0146
0147 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
0148 struct sk_buff *skb, *tmp;
0149 struct sctp_ulpevent *event;
0150
0151 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
0152 event = sctp_skb2event(skb);
0153 if (event->asoc == asoc) {
0154 __skb_unlink(skb, &sp->pd_lobby);
0155 __skb_queue_tail(&sk->sk_receive_queue,
0156 skb);
0157 }
0158 }
0159 }
0160 }
0161
0162 return 0;
0163 }
0164
0165
0166 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
0167 {
0168 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
0169
0170 atomic_inc(&sp->pd_mode);
0171 ulpq->pd_mode = 1;
0172 }
0173
0174
0175 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
0176 {
0177 ulpq->pd_mode = 0;
0178 sctp_ulpq_reasm_drain(ulpq);
0179 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
0180 }
0181
0182 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
0183 {
0184 struct sock *sk = ulpq->asoc->base.sk;
0185 struct sctp_sock *sp = sctp_sk(sk);
0186 struct sctp_ulpevent *event;
0187 struct sk_buff_head *queue;
0188 struct sk_buff *skb;
0189 int clear_pd = 0;
0190
0191 skb = __skb_peek(skb_list);
0192 event = sctp_skb2event(skb);
0193
0194
0195
0196
0197 if (sk->sk_shutdown & RCV_SHUTDOWN &&
0198 (sk->sk_shutdown & SEND_SHUTDOWN ||
0199 !sctp_ulpevent_is_notification(event)))
0200 goto out_free;
0201
0202 if (!sctp_ulpevent_is_notification(event)) {
0203 sk_mark_napi_id(sk, skb);
0204 sk_incoming_cpu_update(sk);
0205 }
0206
0207 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
0208 goto out_free;
0209
0210
0211
0212
0213
0214
0215 if (atomic_read(&sp->pd_mode) == 0) {
0216 queue = &sk->sk_receive_queue;
0217 } else {
0218 if (ulpq->pd_mode) {
0219
0220
0221
0222
0223
0224 if ((event->msg_flags & MSG_NOTIFICATION) ||
0225 (SCTP_DATA_NOT_FRAG ==
0226 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
0227 queue = &sp->pd_lobby;
0228 else {
0229 clear_pd = event->msg_flags & MSG_EOR;
0230 queue = &sk->sk_receive_queue;
0231 }
0232 } else {
0233
0234
0235
0236
0237
0238 if (sp->frag_interleave)
0239 queue = &sk->sk_receive_queue;
0240 else
0241 queue = &sp->pd_lobby;
0242 }
0243 }
0244
0245 skb_queue_splice_tail_init(skb_list, queue);
0246
0247
0248
0249
0250
0251 if (clear_pd)
0252 sctp_ulpq_clear_pd(ulpq);
0253
0254 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
0255 if (!sock_owned_by_user(sk))
0256 sp->data_ready_signalled = 1;
0257 sk->sk_data_ready(sk);
0258 }
0259 return 1;
0260
0261 out_free:
0262 if (skb_list)
0263 sctp_queue_purge_ulpevents(skb_list);
0264 else
0265 sctp_ulpevent_free(event);
0266
0267 return 0;
0268 }
0269
0270
0271
0272
0273 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
0274 struct sctp_ulpevent *event)
0275 {
0276 struct sk_buff *pos;
0277 struct sctp_ulpevent *cevent;
0278 __u32 tsn, ctsn;
0279
0280 tsn = event->tsn;
0281
0282
0283 pos = skb_peek_tail(&ulpq->reasm);
0284 if (!pos) {
0285 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
0286 return;
0287 }
0288
0289
0290 cevent = sctp_skb2event(pos);
0291 ctsn = cevent->tsn;
0292 if (TSN_lt(ctsn, tsn)) {
0293 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
0294 return;
0295 }
0296
0297
0298 skb_queue_walk(&ulpq->reasm, pos) {
0299 cevent = sctp_skb2event(pos);
0300 ctsn = cevent->tsn;
0301
0302 if (TSN_lt(tsn, ctsn))
0303 break;
0304 }
0305
0306
0307 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
0308
0309 }
0310
0311
0312
0313
0314
0315
0316
0317
0318 struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
0319 struct sk_buff_head *queue,
0320 struct sk_buff *f_frag,
0321 struct sk_buff *l_frag)
0322 {
0323 struct sk_buff *pos;
0324 struct sk_buff *new = NULL;
0325 struct sctp_ulpevent *event;
0326 struct sk_buff *pnext, *last;
0327 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
0328
0329
0330 if (f_frag == l_frag)
0331 pos = NULL;
0332 else
0333 pos = f_frag->next;
0334
0335
0336 for (last = list; list; last = list, list = list->next)
0337 ;
0338
0339
0340
0341
0342 if (last)
0343 last->next = pos;
0344 else {
0345 if (skb_cloned(f_frag)) {
0346
0347
0348
0349
0350
0351 new = skb_copy(f_frag, GFP_ATOMIC);
0352 if (!new)
0353 return NULL;
0354
0355 sctp_skb_set_owner_r(new, f_frag->sk);
0356
0357 skb_shinfo(new)->frag_list = pos;
0358 } else
0359 skb_shinfo(f_frag)->frag_list = pos;
0360 }
0361
0362
0363 __skb_unlink(f_frag, queue);
0364
0365
0366 if (new) {
0367 kfree_skb(f_frag);
0368 f_frag = new;
0369 }
0370
0371 while (pos) {
0372
0373 pnext = pos->next;
0374
0375
0376 f_frag->len += pos->len;
0377 f_frag->data_len += pos->len;
0378
0379
0380 __skb_unlink(pos, queue);
0381
0382
0383 if (pos == l_frag)
0384 break;
0385 pos->next = pnext;
0386 pos = pnext;
0387 }
0388
0389 event = sctp_skb2event(f_frag);
0390 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
0391
0392 return event;
0393 }
0394
0395
0396
0397
0398
0399 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
0400 {
0401 struct sk_buff *pos;
0402 struct sctp_ulpevent *cevent;
0403 struct sk_buff *first_frag = NULL;
0404 __u32 ctsn, next_tsn;
0405 struct sctp_ulpevent *retval = NULL;
0406 struct sk_buff *pd_first = NULL;
0407 struct sk_buff *pd_last = NULL;
0408 size_t pd_len = 0;
0409 struct sctp_association *asoc;
0410 u32 pd_point;
0411
0412
0413
0414
0415
0416 next_tsn = 0;
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 skb_queue_walk(&ulpq->reasm, pos) {
0432 cevent = sctp_skb2event(pos);
0433 ctsn = cevent->tsn;
0434
0435 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0436 case SCTP_DATA_FIRST_FRAG:
0437
0438
0439
0440
0441 if (skb_queue_is_first(&ulpq->reasm, pos)) {
0442 pd_first = pos;
0443 pd_last = pos;
0444 pd_len = pos->len;
0445 } else {
0446 pd_first = NULL;
0447 pd_last = NULL;
0448 pd_len = 0;
0449 }
0450
0451 first_frag = pos;
0452 next_tsn = ctsn + 1;
0453 break;
0454
0455 case SCTP_DATA_MIDDLE_FRAG:
0456 if ((first_frag) && (ctsn == next_tsn)) {
0457 next_tsn++;
0458 if (pd_first) {
0459 pd_last = pos;
0460 pd_len += pos->len;
0461 }
0462 } else
0463 first_frag = NULL;
0464 break;
0465
0466 case SCTP_DATA_LAST_FRAG:
0467 if (first_frag && (ctsn == next_tsn))
0468 goto found;
0469 else
0470 first_frag = NULL;
0471 break;
0472 }
0473 }
0474
0475 asoc = ulpq->asoc;
0476 if (pd_first) {
0477
0478
0479
0480
0481
0482 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
0483 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
0484 goto done;
0485
0486 cevent = sctp_skb2event(pd_first);
0487 pd_point = sctp_sk(asoc->base.sk)->pd_point;
0488 if (pd_point && pd_point <= pd_len) {
0489 retval = sctp_make_reassembled_event(asoc->base.net,
0490 &ulpq->reasm,
0491 pd_first, pd_last);
0492 if (retval)
0493 sctp_ulpq_set_pd(ulpq);
0494 }
0495 }
0496 done:
0497 return retval;
0498 found:
0499 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
0500 &ulpq->reasm, first_frag, pos);
0501 if (retval)
0502 retval->msg_flags |= MSG_EOR;
0503 goto done;
0504 }
0505
0506
0507 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
0508 {
0509 struct sk_buff *pos, *last_frag, *first_frag;
0510 struct sctp_ulpevent *cevent;
0511 __u32 ctsn, next_tsn;
0512 int is_last;
0513 struct sctp_ulpevent *retval;
0514
0515
0516
0517
0518
0519
0520 if (skb_queue_empty(&ulpq->reasm))
0521 return NULL;
0522
0523 last_frag = first_frag = NULL;
0524 retval = NULL;
0525 next_tsn = 0;
0526 is_last = 0;
0527
0528 skb_queue_walk(&ulpq->reasm, pos) {
0529 cevent = sctp_skb2event(pos);
0530 ctsn = cevent->tsn;
0531
0532 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0533 case SCTP_DATA_FIRST_FRAG:
0534 if (!first_frag)
0535 return NULL;
0536 goto done;
0537 case SCTP_DATA_MIDDLE_FRAG:
0538 if (!first_frag) {
0539 first_frag = pos;
0540 next_tsn = ctsn + 1;
0541 last_frag = pos;
0542 } else if (next_tsn == ctsn) {
0543 next_tsn++;
0544 last_frag = pos;
0545 } else
0546 goto done;
0547 break;
0548 case SCTP_DATA_LAST_FRAG:
0549 if (!first_frag)
0550 first_frag = pos;
0551 else if (ctsn != next_tsn)
0552 goto done;
0553 last_frag = pos;
0554 is_last = 1;
0555 goto done;
0556 default:
0557 return NULL;
0558 }
0559 }
0560
0561
0562
0563
0564 done:
0565 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
0566 first_frag, last_frag);
0567 if (retval && is_last)
0568 retval->msg_flags |= MSG_EOR;
0569
0570 return retval;
0571 }
0572
0573
0574
0575
0576
0577 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
0578 struct sctp_ulpevent *event)
0579 {
0580 struct sctp_ulpevent *retval = NULL;
0581
0582
0583 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
0584 event->msg_flags |= MSG_EOR;
0585 return event;
0586 }
0587
0588 sctp_ulpq_store_reasm(ulpq, event);
0589 if (!ulpq->pd_mode)
0590 retval = sctp_ulpq_retrieve_reassembled(ulpq);
0591 else {
0592 __u32 ctsn, ctsnap;
0593
0594
0595
0596
0597 ctsn = event->tsn;
0598 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
0599 if (TSN_lte(ctsn, ctsnap))
0600 retval = sctp_ulpq_retrieve_partial(ulpq);
0601 }
0602
0603 return retval;
0604 }
0605
0606
0607 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
0608 {
0609 struct sk_buff *pos, *last_frag, *first_frag;
0610 struct sctp_ulpevent *cevent;
0611 __u32 ctsn, next_tsn;
0612 struct sctp_ulpevent *retval;
0613
0614
0615
0616
0617
0618
0619 if (skb_queue_empty(&ulpq->reasm))
0620 return NULL;
0621
0622 last_frag = first_frag = NULL;
0623 retval = NULL;
0624 next_tsn = 0;
0625
0626 skb_queue_walk(&ulpq->reasm, pos) {
0627 cevent = sctp_skb2event(pos);
0628 ctsn = cevent->tsn;
0629
0630 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0631 case SCTP_DATA_FIRST_FRAG:
0632 if (!first_frag) {
0633 first_frag = pos;
0634 next_tsn = ctsn + 1;
0635 last_frag = pos;
0636 } else
0637 goto done;
0638 break;
0639
0640 case SCTP_DATA_MIDDLE_FRAG:
0641 if (!first_frag)
0642 return NULL;
0643 if (ctsn == next_tsn) {
0644 next_tsn++;
0645 last_frag = pos;
0646 } else
0647 goto done;
0648 break;
0649
0650 case SCTP_DATA_LAST_FRAG:
0651 if (!first_frag)
0652 return NULL;
0653 else
0654 goto done;
0655 break;
0656
0657 default:
0658 return NULL;
0659 }
0660 }
0661
0662
0663
0664
0665 done:
0666 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
0667 first_frag, last_frag);
0668 return retval;
0669 }
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
0686 {
0687 struct sk_buff *pos, *tmp;
0688 struct sctp_ulpevent *event;
0689 __u32 tsn;
0690
0691 if (skb_queue_empty(&ulpq->reasm))
0692 return;
0693
0694 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
0695 event = sctp_skb2event(pos);
0696 tsn = event->tsn;
0697
0698
0699
0700
0701
0702
0703 if (TSN_lte(tsn, fwd_tsn)) {
0704 __skb_unlink(pos, &ulpq->reasm);
0705 sctp_ulpevent_free(event);
0706 } else
0707 break;
0708 }
0709 }
0710
0711
0712
0713
0714
0715
0716 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
0717 {
0718 struct sctp_ulpevent *event = NULL;
0719
0720 if (skb_queue_empty(&ulpq->reasm))
0721 return;
0722
0723 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
0724 struct sk_buff_head temp;
0725
0726 skb_queue_head_init(&temp);
0727 __skb_queue_tail(&temp, sctp_event2skb(event));
0728
0729
0730 if (event->msg_flags & MSG_EOR)
0731 event = sctp_ulpq_order(ulpq, event);
0732
0733
0734
0735
0736 if (event)
0737 sctp_ulpq_tail_event(ulpq, &temp);
0738 }
0739 }
0740
0741
0742
0743
0744
0745 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
0746 struct sctp_ulpevent *event)
0747 {
0748 struct sk_buff_head *event_list;
0749 struct sk_buff *pos, *tmp;
0750 struct sctp_ulpevent *cevent;
0751 struct sctp_stream *stream;
0752 __u16 sid, csid, cssn;
0753
0754 sid = event->stream;
0755 stream = &ulpq->asoc->stream;
0756
0757 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
0758
0759
0760 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
0761 cevent = (struct sctp_ulpevent *) pos->cb;
0762 csid = cevent->stream;
0763 cssn = cevent->ssn;
0764
0765
0766 if (csid > sid)
0767 break;
0768
0769
0770 if (csid < sid)
0771 continue;
0772
0773 if (cssn != sctp_ssn_peek(stream, in, sid))
0774 break;
0775
0776
0777 sctp_ssn_next(stream, in, sid);
0778
0779 __skb_unlink(pos, &ulpq->lobby);
0780
0781
0782 __skb_queue_tail(event_list, pos);
0783 }
0784 }
0785
0786
0787 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
0788 struct sctp_ulpevent *event)
0789 {
0790 struct sk_buff *pos;
0791 struct sctp_ulpevent *cevent;
0792 __u16 sid, csid;
0793 __u16 ssn, cssn;
0794
0795 pos = skb_peek_tail(&ulpq->lobby);
0796 if (!pos) {
0797 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0798 return;
0799 }
0800
0801 sid = event->stream;
0802 ssn = event->ssn;
0803
0804 cevent = (struct sctp_ulpevent *) pos->cb;
0805 csid = cevent->stream;
0806 cssn = cevent->ssn;
0807 if (sid > csid) {
0808 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0809 return;
0810 }
0811
0812 if ((sid == csid) && SSN_lt(cssn, ssn)) {
0813 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0814 return;
0815 }
0816
0817
0818
0819
0820 skb_queue_walk(&ulpq->lobby, pos) {
0821 cevent = (struct sctp_ulpevent *) pos->cb;
0822 csid = cevent->stream;
0823 cssn = cevent->ssn;
0824
0825 if (csid > sid)
0826 break;
0827 if (csid == sid && SSN_lt(ssn, cssn))
0828 break;
0829 }
0830
0831
0832
0833 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
0834 }
0835
0836 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
0837 struct sctp_ulpevent *event)
0838 {
0839 __u16 sid, ssn;
0840 struct sctp_stream *stream;
0841
0842
0843 if (event->msg_flags & SCTP_DATA_UNORDERED)
0844 return event;
0845
0846
0847 sid = event->stream;
0848 ssn = event->ssn;
0849 stream = &ulpq->asoc->stream;
0850
0851
0852 if (ssn != sctp_ssn_peek(stream, in, sid)) {
0853
0854
0855
0856 sctp_ulpq_store_ordered(ulpq, event);
0857 return NULL;
0858 }
0859
0860
0861 sctp_ssn_next(stream, in, sid);
0862
0863
0864
0865
0866 sctp_ulpq_retrieve_ordered(ulpq, event);
0867
0868 return event;
0869 }
0870
0871
0872
0873
0874 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
0875 {
0876 struct sk_buff *pos, *tmp;
0877 struct sctp_ulpevent *cevent;
0878 struct sctp_ulpevent *event;
0879 struct sctp_stream *stream;
0880 struct sk_buff_head temp;
0881 struct sk_buff_head *lobby = &ulpq->lobby;
0882 __u16 csid, cssn;
0883
0884 stream = &ulpq->asoc->stream;
0885
0886
0887 skb_queue_head_init(&temp);
0888 event = NULL;
0889 sctp_skb_for_each(pos, lobby, tmp) {
0890 cevent = (struct sctp_ulpevent *) pos->cb;
0891 csid = cevent->stream;
0892 cssn = cevent->ssn;
0893
0894
0895 if (csid > sid)
0896 break;
0897
0898
0899 if (csid < sid)
0900 continue;
0901
0902
0903 if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
0904 break;
0905
0906 __skb_unlink(pos, lobby);
0907 if (!event)
0908
0909 event = sctp_skb2event(pos);
0910
0911
0912 __skb_queue_tail(&temp, pos);
0913 }
0914
0915
0916
0917
0918 if (event == NULL && pos != (struct sk_buff *)lobby) {
0919 cevent = (struct sctp_ulpevent *) pos->cb;
0920 csid = cevent->stream;
0921 cssn = cevent->ssn;
0922
0923 if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
0924 sctp_ssn_next(stream, in, csid);
0925 __skb_unlink(pos, lobby);
0926 __skb_queue_tail(&temp, pos);
0927 event = sctp_skb2event(pos);
0928 }
0929 }
0930
0931
0932
0933
0934 if (event) {
0935
0936 sctp_ulpq_retrieve_ordered(ulpq, event);
0937 sctp_ulpq_tail_event(ulpq, &temp);
0938 }
0939 }
0940
0941
0942
0943
0944 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
0945 {
0946 struct sctp_stream *stream;
0947
0948
0949 stream = &ulpq->asoc->stream;
0950
0951
0952 if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
0953 return;
0954
0955
0956 sctp_ssn_skip(stream, in, sid, ssn);
0957
0958
0959
0960
0961 sctp_ulpq_reap_ordered(ulpq, sid);
0962 }
0963
0964 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
0965 __u16 needed)
0966 {
0967 __u16 freed = 0;
0968 __u32 tsn, last_tsn;
0969 struct sk_buff *skb, *flist, *last;
0970 struct sctp_ulpevent *event;
0971 struct sctp_tsnmap *tsnmap;
0972
0973 tsnmap = &ulpq->asoc->peer.tsn_map;
0974
0975 while ((skb = skb_peek_tail(list)) != NULL) {
0976 event = sctp_skb2event(skb);
0977 tsn = event->tsn;
0978
0979
0980 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
0981 break;
0982
0983
0984
0985
0986
0987 freed += skb_headlen(skb);
0988 flist = skb_shinfo(skb)->frag_list;
0989 for (last = flist; flist; flist = flist->next) {
0990 last = flist;
0991 freed += skb_headlen(last);
0992 }
0993 if (last)
0994 last_tsn = sctp_skb2event(last)->tsn;
0995 else
0996 last_tsn = tsn;
0997
0998
0999 __skb_unlink(skb, list);
1000 sctp_ulpevent_free(event);
1001 while (TSN_lte(tsn, last_tsn)) {
1002 sctp_tsnmap_renege(tsnmap, tsn);
1003 tsn++;
1004 }
1005 if (freed >= needed)
1006 return freed;
1007 }
1008
1009 return freed;
1010 }
1011
1012
1013 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1014 {
1015 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1016 }
1017
1018
1019 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1020 {
1021 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1022 }
1023
1024
1025 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1026 gfp_t gfp)
1027 {
1028 struct sctp_ulpevent *event;
1029 struct sctp_association *asoc;
1030 struct sctp_sock *sp;
1031 __u32 ctsn;
1032 struct sk_buff *skb;
1033
1034 asoc = ulpq->asoc;
1035 sp = sctp_sk(asoc->base.sk);
1036
1037
1038
1039
1040 if (ulpq->pd_mode)
1041 return;
1042
1043
1044
1045
1046 skb = skb_peek(&asoc->ulpq.reasm);
1047 if (skb != NULL) {
1048 ctsn = sctp_skb2event(skb)->tsn;
1049 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1050 return;
1051 }
1052
1053
1054
1055
1056
1057
1058 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1059
1060 event = sctp_ulpq_retrieve_first(ulpq);
1061
1062 if (event) {
1063 struct sk_buff_head temp;
1064
1065 skb_queue_head_init(&temp);
1066 __skb_queue_tail(&temp, sctp_event2skb(event));
1067 sctp_ulpq_tail_event(ulpq, &temp);
1068 sctp_ulpq_set_pd(ulpq);
1069 return;
1070 }
1071 }
1072 }
1073
1074
1075 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1076 gfp_t gfp)
1077 {
1078 struct sctp_association *asoc = ulpq->asoc;
1079 __u32 freed = 0;
1080 __u16 needed;
1081
1082 needed = ntohs(chunk->chunk_hdr->length) -
1083 sizeof(struct sctp_data_chunk);
1084
1085 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1086 freed = sctp_ulpq_renege_order(ulpq, needed);
1087 if (freed < needed)
1088 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1089 }
1090
1091 if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1092 freed >= needed) {
1093 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1094
1095
1096
1097
1098 if (retval <= 0)
1099 sctp_ulpq_partial_delivery(ulpq, gfp);
1100 else if (retval == 1)
1101 sctp_ulpq_reasm_drain(ulpq);
1102 }
1103 }
1104
1105
1106
1107
1108 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1109 {
1110 struct sctp_ulpevent *ev = NULL;
1111 struct sctp_sock *sp;
1112 struct sock *sk;
1113
1114 if (!ulpq->pd_mode)
1115 return;
1116
1117 sk = ulpq->asoc->base.sk;
1118 sp = sctp_sk(sk);
1119 if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1120 SCTP_PARTIAL_DELIVERY_EVENT))
1121 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1122 SCTP_PARTIAL_DELIVERY_ABORTED,
1123 0, 0, 0, gfp);
1124 if (ev)
1125 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1126
1127
1128 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1129 sp->data_ready_signalled = 1;
1130 sk->sk_data_ready(sk);
1131 }
1132 }