0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <net/busy_poll.h>
0019 #include <net/sctp/sctp.h>
0020 #include <net/sctp/sm.h>
0021 #include <net/sctp/ulpevent.h>
0022 #include <linux/sctp.h>
0023
0024 static struct sctp_chunk *sctp_make_idatafrag_empty(
0025 const struct sctp_association *asoc,
0026 const struct sctp_sndrcvinfo *sinfo,
0027 int len, __u8 flags, gfp_t gfp)
0028 {
0029 struct sctp_chunk *retval;
0030 struct sctp_idatahdr dp;
0031
0032 memset(&dp, 0, sizeof(dp));
0033 dp.stream = htons(sinfo->sinfo_stream);
0034
0035 if (sinfo->sinfo_flags & SCTP_UNORDERED)
0036 flags |= SCTP_DATA_UNORDERED;
0037
0038 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
0039 if (!retval)
0040 return NULL;
0041
0042 retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
0043 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
0044
0045 return retval;
0046 }
0047
0048 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
0049 {
0050 struct sctp_stream *stream;
0051 struct sctp_chunk *lchunk;
0052 __u32 cfsn = 0;
0053 __u16 sid;
0054
0055 if (chunk->has_mid)
0056 return;
0057
0058 sid = sctp_chunk_stream_no(chunk);
0059 stream = &chunk->asoc->stream;
0060
0061 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
0062 struct sctp_idatahdr *hdr;
0063 __u32 mid;
0064
0065 lchunk->has_mid = 1;
0066
0067 hdr = lchunk->subh.idata_hdr;
0068
0069 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
0070 hdr->ppid = lchunk->sinfo.sinfo_ppid;
0071 else
0072 hdr->fsn = htonl(cfsn++);
0073
0074 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
0075 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
0076 sctp_mid_uo_next(stream, out, sid) :
0077 sctp_mid_uo_peek(stream, out, sid);
0078 } else {
0079 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
0080 sctp_mid_next(stream, out, sid) :
0081 sctp_mid_peek(stream, out, sid);
0082 }
0083 hdr->mid = htonl(mid);
0084 }
0085 }
0086
0087 static bool sctp_validate_data(struct sctp_chunk *chunk)
0088 {
0089 struct sctp_stream *stream;
0090 __u16 sid, ssn;
0091
0092 if (chunk->chunk_hdr->type != SCTP_CID_DATA)
0093 return false;
0094
0095 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
0096 return true;
0097
0098 stream = &chunk->asoc->stream;
0099 sid = sctp_chunk_stream_no(chunk);
0100 ssn = ntohs(chunk->subh.data_hdr->ssn);
0101
0102 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
0103 }
0104
0105 static bool sctp_validate_idata(struct sctp_chunk *chunk)
0106 {
0107 struct sctp_stream *stream;
0108 __u32 mid;
0109 __u16 sid;
0110
0111 if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
0112 return false;
0113
0114 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
0115 return true;
0116
0117 stream = &chunk->asoc->stream;
0118 sid = sctp_chunk_stream_no(chunk);
0119 mid = ntohl(chunk->subh.idata_hdr->mid);
0120
0121 return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
0122 }
0123
0124 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
0125 struct sctp_ulpevent *event)
0126 {
0127 struct sctp_ulpevent *cevent;
0128 struct sk_buff *pos, *loc;
0129
0130 pos = skb_peek_tail(&ulpq->reasm);
0131 if (!pos) {
0132 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
0133 return;
0134 }
0135
0136 cevent = sctp_skb2event(pos);
0137
0138 if (event->stream == cevent->stream &&
0139 event->mid == cevent->mid &&
0140 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
0141 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
0142 event->fsn > cevent->fsn))) {
0143 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
0144 return;
0145 }
0146
0147 if ((event->stream == cevent->stream &&
0148 MID_lt(cevent->mid, event->mid)) ||
0149 event->stream > cevent->stream) {
0150 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
0151 return;
0152 }
0153
0154 loc = NULL;
0155 skb_queue_walk(&ulpq->reasm, pos) {
0156 cevent = sctp_skb2event(pos);
0157
0158 if (event->stream < cevent->stream ||
0159 (event->stream == cevent->stream &&
0160 MID_lt(event->mid, cevent->mid))) {
0161 loc = pos;
0162 break;
0163 }
0164 if (event->stream == cevent->stream &&
0165 event->mid == cevent->mid &&
0166 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
0167 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
0168 event->fsn < cevent->fsn)) {
0169 loc = pos;
0170 break;
0171 }
0172 }
0173
0174 if (!loc)
0175 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
0176 else
0177 __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
0178 }
0179
0180 static struct sctp_ulpevent *sctp_intl_retrieve_partial(
0181 struct sctp_ulpq *ulpq,
0182 struct sctp_ulpevent *event)
0183 {
0184 struct sk_buff *first_frag = NULL;
0185 struct sk_buff *last_frag = NULL;
0186 struct sctp_ulpevent *retval;
0187 struct sctp_stream_in *sin;
0188 struct sk_buff *pos;
0189 __u32 next_fsn = 0;
0190 int is_last = 0;
0191
0192 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
0193
0194 skb_queue_walk(&ulpq->reasm, pos) {
0195 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
0196
0197 if (cevent->stream < event->stream)
0198 continue;
0199
0200 if (cevent->stream > event->stream ||
0201 cevent->mid != sin->mid)
0202 break;
0203
0204 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0205 case SCTP_DATA_FIRST_FRAG:
0206 goto out;
0207 case SCTP_DATA_MIDDLE_FRAG:
0208 if (!first_frag) {
0209 if (cevent->fsn == sin->fsn) {
0210 first_frag = pos;
0211 last_frag = pos;
0212 next_fsn = cevent->fsn + 1;
0213 }
0214 } else if (cevent->fsn == next_fsn) {
0215 last_frag = pos;
0216 next_fsn++;
0217 } else {
0218 goto out;
0219 }
0220 break;
0221 case SCTP_DATA_LAST_FRAG:
0222 if (!first_frag) {
0223 if (cevent->fsn == sin->fsn) {
0224 first_frag = pos;
0225 last_frag = pos;
0226 next_fsn = 0;
0227 is_last = 1;
0228 }
0229 } else if (cevent->fsn == next_fsn) {
0230 last_frag = pos;
0231 next_fsn = 0;
0232 is_last = 1;
0233 }
0234 goto out;
0235 default:
0236 goto out;
0237 }
0238 }
0239
0240 out:
0241 if (!first_frag)
0242 return NULL;
0243
0244 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
0245 first_frag, last_frag);
0246 if (retval) {
0247 sin->fsn = next_fsn;
0248 if (is_last) {
0249 retval->msg_flags |= MSG_EOR;
0250 sin->pd_mode = 0;
0251 }
0252 }
0253
0254 return retval;
0255 }
0256
0257 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
0258 struct sctp_ulpq *ulpq,
0259 struct sctp_ulpevent *event)
0260 {
0261 struct sctp_association *asoc = ulpq->asoc;
0262 struct sk_buff *pos, *first_frag = NULL;
0263 struct sctp_ulpevent *retval = NULL;
0264 struct sk_buff *pd_first = NULL;
0265 struct sk_buff *pd_last = NULL;
0266 struct sctp_stream_in *sin;
0267 __u32 next_fsn = 0;
0268 __u32 pd_point = 0;
0269 __u32 pd_len = 0;
0270 __u32 mid = 0;
0271
0272 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
0273
0274 skb_queue_walk(&ulpq->reasm, pos) {
0275 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
0276
0277 if (cevent->stream < event->stream)
0278 continue;
0279 if (cevent->stream > event->stream)
0280 break;
0281
0282 if (MID_lt(cevent->mid, event->mid))
0283 continue;
0284 if (MID_lt(event->mid, cevent->mid))
0285 break;
0286
0287 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0288 case SCTP_DATA_FIRST_FRAG:
0289 if (cevent->mid == sin->mid) {
0290 pd_first = pos;
0291 pd_last = pos;
0292 pd_len = pos->len;
0293 }
0294
0295 first_frag = pos;
0296 next_fsn = 0;
0297 mid = cevent->mid;
0298 break;
0299
0300 case SCTP_DATA_MIDDLE_FRAG:
0301 if (first_frag && cevent->mid == mid &&
0302 cevent->fsn == next_fsn) {
0303 next_fsn++;
0304 if (pd_first) {
0305 pd_last = pos;
0306 pd_len += pos->len;
0307 }
0308 } else {
0309 first_frag = NULL;
0310 }
0311 break;
0312
0313 case SCTP_DATA_LAST_FRAG:
0314 if (first_frag && cevent->mid == mid &&
0315 cevent->fsn == next_fsn)
0316 goto found;
0317 else
0318 first_frag = NULL;
0319 break;
0320 }
0321 }
0322
0323 if (!pd_first)
0324 goto out;
0325
0326 pd_point = sctp_sk(asoc->base.sk)->pd_point;
0327 if (pd_point && pd_point <= pd_len) {
0328 retval = sctp_make_reassembled_event(asoc->base.net,
0329 &ulpq->reasm,
0330 pd_first, pd_last);
0331 if (retval) {
0332 sin->fsn = next_fsn;
0333 sin->pd_mode = 1;
0334 }
0335 }
0336 goto out;
0337
0338 found:
0339 retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
0340 first_frag, pos);
0341 if (retval)
0342 retval->msg_flags |= MSG_EOR;
0343
0344 out:
0345 return retval;
0346 }
0347
0348 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
0349 struct sctp_ulpevent *event)
0350 {
0351 struct sctp_ulpevent *retval = NULL;
0352 struct sctp_stream_in *sin;
0353
0354 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
0355 event->msg_flags |= MSG_EOR;
0356 return event;
0357 }
0358
0359 sctp_intl_store_reasm(ulpq, event);
0360
0361 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
0362 if (sin->pd_mode && event->mid == sin->mid &&
0363 event->fsn == sin->fsn)
0364 retval = sctp_intl_retrieve_partial(ulpq, event);
0365
0366 if (!retval)
0367 retval = sctp_intl_retrieve_reassembled(ulpq, event);
0368
0369 return retval;
0370 }
0371
0372 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
0373 struct sctp_ulpevent *event)
0374 {
0375 struct sctp_ulpevent *cevent;
0376 struct sk_buff *pos, *loc;
0377
0378 pos = skb_peek_tail(&ulpq->lobby);
0379 if (!pos) {
0380 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0381 return;
0382 }
0383
0384 cevent = (struct sctp_ulpevent *)pos->cb;
0385 if (event->stream == cevent->stream &&
0386 MID_lt(cevent->mid, event->mid)) {
0387 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0388 return;
0389 }
0390
0391 if (event->stream > cevent->stream) {
0392 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0393 return;
0394 }
0395
0396 loc = NULL;
0397 skb_queue_walk(&ulpq->lobby, pos) {
0398 cevent = (struct sctp_ulpevent *)pos->cb;
0399
0400 if (cevent->stream > event->stream) {
0401 loc = pos;
0402 break;
0403 }
0404 if (cevent->stream == event->stream &&
0405 MID_lt(event->mid, cevent->mid)) {
0406 loc = pos;
0407 break;
0408 }
0409 }
0410
0411 if (!loc)
0412 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
0413 else
0414 __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
0415 }
0416
0417 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
0418 struct sctp_ulpevent *event)
0419 {
0420 struct sk_buff_head *event_list;
0421 struct sctp_stream *stream;
0422 struct sk_buff *pos, *tmp;
0423 __u16 sid = event->stream;
0424
0425 stream = &ulpq->asoc->stream;
0426 event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
0427
0428 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
0429 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
0430
0431 if (cevent->stream > sid)
0432 break;
0433
0434 if (cevent->stream < sid)
0435 continue;
0436
0437 if (cevent->mid != sctp_mid_peek(stream, in, sid))
0438 break;
0439
0440 sctp_mid_next(stream, in, sid);
0441
0442 __skb_unlink(pos, &ulpq->lobby);
0443
0444 __skb_queue_tail(event_list, pos);
0445 }
0446 }
0447
0448 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
0449 struct sctp_ulpevent *event)
0450 {
0451 struct sctp_stream *stream;
0452 __u16 sid;
0453
0454 stream = &ulpq->asoc->stream;
0455 sid = event->stream;
0456
0457 if (event->mid != sctp_mid_peek(stream, in, sid)) {
0458 sctp_intl_store_ordered(ulpq, event);
0459 return NULL;
0460 }
0461
0462 sctp_mid_next(stream, in, sid);
0463
0464 sctp_intl_retrieve_ordered(ulpq, event);
0465
0466 return event;
0467 }
0468
0469 static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
0470 struct sk_buff_head *skb_list)
0471 {
0472 struct sock *sk = ulpq->asoc->base.sk;
0473 struct sctp_sock *sp = sctp_sk(sk);
0474 struct sctp_ulpevent *event;
0475 struct sk_buff *skb;
0476
0477 skb = __skb_peek(skb_list);
0478 event = sctp_skb2event(skb);
0479
0480 if (sk->sk_shutdown & RCV_SHUTDOWN &&
0481 (sk->sk_shutdown & SEND_SHUTDOWN ||
0482 !sctp_ulpevent_is_notification(event)))
0483 goto out_free;
0484
0485 if (!sctp_ulpevent_is_notification(event)) {
0486 sk_mark_napi_id(sk, skb);
0487 sk_incoming_cpu_update(sk);
0488 }
0489
0490 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
0491 goto out_free;
0492
0493 if (skb_list)
0494 skb_queue_splice_tail_init(skb_list,
0495 &sk->sk_receive_queue);
0496 else
0497 __skb_queue_tail(&sk->sk_receive_queue, skb);
0498
0499 if (!sp->data_ready_signalled) {
0500 sp->data_ready_signalled = 1;
0501 sk->sk_data_ready(sk);
0502 }
0503
0504 return 1;
0505
0506 out_free:
0507 if (skb_list)
0508 sctp_queue_purge_ulpevents(skb_list);
0509 else
0510 sctp_ulpevent_free(event);
0511
0512 return 0;
0513 }
0514
0515 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
0516 struct sctp_ulpevent *event)
0517 {
0518 struct sctp_ulpevent *cevent;
0519 struct sk_buff *pos;
0520
0521 pos = skb_peek_tail(&ulpq->reasm_uo);
0522 if (!pos) {
0523 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
0524 return;
0525 }
0526
0527 cevent = sctp_skb2event(pos);
0528
0529 if (event->stream == cevent->stream &&
0530 event->mid == cevent->mid &&
0531 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
0532 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
0533 event->fsn > cevent->fsn))) {
0534 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
0535 return;
0536 }
0537
0538 if ((event->stream == cevent->stream &&
0539 MID_lt(cevent->mid, event->mid)) ||
0540 event->stream > cevent->stream) {
0541 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
0542 return;
0543 }
0544
0545 skb_queue_walk(&ulpq->reasm_uo, pos) {
0546 cevent = sctp_skb2event(pos);
0547
0548 if (event->stream < cevent->stream ||
0549 (event->stream == cevent->stream &&
0550 MID_lt(event->mid, cevent->mid)))
0551 break;
0552
0553 if (event->stream == cevent->stream &&
0554 event->mid == cevent->mid &&
0555 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
0556 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
0557 event->fsn < cevent->fsn))
0558 break;
0559 }
0560
0561 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
0562 }
0563
0564 static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
0565 struct sctp_ulpq *ulpq,
0566 struct sctp_ulpevent *event)
0567 {
0568 struct sk_buff *first_frag = NULL;
0569 struct sk_buff *last_frag = NULL;
0570 struct sctp_ulpevent *retval;
0571 struct sctp_stream_in *sin;
0572 struct sk_buff *pos;
0573 __u32 next_fsn = 0;
0574 int is_last = 0;
0575
0576 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
0577
0578 skb_queue_walk(&ulpq->reasm_uo, pos) {
0579 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
0580
0581 if (cevent->stream < event->stream)
0582 continue;
0583 if (cevent->stream > event->stream)
0584 break;
0585
0586 if (MID_lt(cevent->mid, sin->mid_uo))
0587 continue;
0588 if (MID_lt(sin->mid_uo, cevent->mid))
0589 break;
0590
0591 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0592 case SCTP_DATA_FIRST_FRAG:
0593 goto out;
0594 case SCTP_DATA_MIDDLE_FRAG:
0595 if (!first_frag) {
0596 if (cevent->fsn == sin->fsn_uo) {
0597 first_frag = pos;
0598 last_frag = pos;
0599 next_fsn = cevent->fsn + 1;
0600 }
0601 } else if (cevent->fsn == next_fsn) {
0602 last_frag = pos;
0603 next_fsn++;
0604 } else {
0605 goto out;
0606 }
0607 break;
0608 case SCTP_DATA_LAST_FRAG:
0609 if (!first_frag) {
0610 if (cevent->fsn == sin->fsn_uo) {
0611 first_frag = pos;
0612 last_frag = pos;
0613 next_fsn = 0;
0614 is_last = 1;
0615 }
0616 } else if (cevent->fsn == next_fsn) {
0617 last_frag = pos;
0618 next_fsn = 0;
0619 is_last = 1;
0620 }
0621 goto out;
0622 default:
0623 goto out;
0624 }
0625 }
0626
0627 out:
0628 if (!first_frag)
0629 return NULL;
0630
0631 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
0632 &ulpq->reasm_uo, first_frag,
0633 last_frag);
0634 if (retval) {
0635 sin->fsn_uo = next_fsn;
0636 if (is_last) {
0637 retval->msg_flags |= MSG_EOR;
0638 sin->pd_mode_uo = 0;
0639 }
0640 }
0641
0642 return retval;
0643 }
0644
0645 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
0646 struct sctp_ulpq *ulpq,
0647 struct sctp_ulpevent *event)
0648 {
0649 struct sctp_association *asoc = ulpq->asoc;
0650 struct sk_buff *pos, *first_frag = NULL;
0651 struct sctp_ulpevent *retval = NULL;
0652 struct sk_buff *pd_first = NULL;
0653 struct sk_buff *pd_last = NULL;
0654 struct sctp_stream_in *sin;
0655 __u32 next_fsn = 0;
0656 __u32 pd_point = 0;
0657 __u32 pd_len = 0;
0658 __u32 mid = 0;
0659
0660 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
0661
0662 skb_queue_walk(&ulpq->reasm_uo, pos) {
0663 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
0664
0665 if (cevent->stream < event->stream)
0666 continue;
0667 if (cevent->stream > event->stream)
0668 break;
0669
0670 if (MID_lt(cevent->mid, event->mid))
0671 continue;
0672 if (MID_lt(event->mid, cevent->mid))
0673 break;
0674
0675 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0676 case SCTP_DATA_FIRST_FRAG:
0677 if (!sin->pd_mode_uo) {
0678 sin->mid_uo = cevent->mid;
0679 pd_first = pos;
0680 pd_last = pos;
0681 pd_len = pos->len;
0682 }
0683
0684 first_frag = pos;
0685 next_fsn = 0;
0686 mid = cevent->mid;
0687 break;
0688
0689 case SCTP_DATA_MIDDLE_FRAG:
0690 if (first_frag && cevent->mid == mid &&
0691 cevent->fsn == next_fsn) {
0692 next_fsn++;
0693 if (pd_first) {
0694 pd_last = pos;
0695 pd_len += pos->len;
0696 }
0697 } else {
0698 first_frag = NULL;
0699 }
0700 break;
0701
0702 case SCTP_DATA_LAST_FRAG:
0703 if (first_frag && cevent->mid == mid &&
0704 cevent->fsn == next_fsn)
0705 goto found;
0706 else
0707 first_frag = NULL;
0708 break;
0709 }
0710 }
0711
0712 if (!pd_first)
0713 goto out;
0714
0715 pd_point = sctp_sk(asoc->base.sk)->pd_point;
0716 if (pd_point && pd_point <= pd_len) {
0717 retval = sctp_make_reassembled_event(asoc->base.net,
0718 &ulpq->reasm_uo,
0719 pd_first, pd_last);
0720 if (retval) {
0721 sin->fsn_uo = next_fsn;
0722 sin->pd_mode_uo = 1;
0723 }
0724 }
0725 goto out;
0726
0727 found:
0728 retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
0729 first_frag, pos);
0730 if (retval)
0731 retval->msg_flags |= MSG_EOR;
0732
0733 out:
0734 return retval;
0735 }
0736
0737 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
0738 struct sctp_ulpevent *event)
0739 {
0740 struct sctp_ulpevent *retval = NULL;
0741 struct sctp_stream_in *sin;
0742
0743 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
0744 event->msg_flags |= MSG_EOR;
0745 return event;
0746 }
0747
0748 sctp_intl_store_reasm_uo(ulpq, event);
0749
0750 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
0751 if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
0752 event->fsn == sin->fsn_uo)
0753 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
0754
0755 if (!retval)
0756 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
0757
0758 return retval;
0759 }
0760
0761 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
0762 {
0763 struct sctp_stream_in *csin, *sin = NULL;
0764 struct sk_buff *first_frag = NULL;
0765 struct sk_buff *last_frag = NULL;
0766 struct sctp_ulpevent *retval;
0767 struct sk_buff *pos;
0768 __u32 next_fsn = 0;
0769 __u16 sid = 0;
0770
0771 skb_queue_walk(&ulpq->reasm_uo, pos) {
0772 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
0773
0774 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
0775 if (csin->pd_mode_uo)
0776 continue;
0777
0778 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0779 case SCTP_DATA_FIRST_FRAG:
0780 if (first_frag)
0781 goto out;
0782 first_frag = pos;
0783 last_frag = pos;
0784 next_fsn = 0;
0785 sin = csin;
0786 sid = cevent->stream;
0787 sin->mid_uo = cevent->mid;
0788 break;
0789 case SCTP_DATA_MIDDLE_FRAG:
0790 if (!first_frag)
0791 break;
0792 if (cevent->stream == sid &&
0793 cevent->mid == sin->mid_uo &&
0794 cevent->fsn == next_fsn) {
0795 next_fsn++;
0796 last_frag = pos;
0797 } else {
0798 goto out;
0799 }
0800 break;
0801 case SCTP_DATA_LAST_FRAG:
0802 if (first_frag)
0803 goto out;
0804 break;
0805 default:
0806 break;
0807 }
0808 }
0809
0810 if (!first_frag)
0811 return NULL;
0812
0813 out:
0814 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
0815 &ulpq->reasm_uo, first_frag,
0816 last_frag);
0817 if (retval) {
0818 sin->fsn_uo = next_fsn;
0819 sin->pd_mode_uo = 1;
0820 }
0821
0822 return retval;
0823 }
0824
0825 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
0826 struct sctp_chunk *chunk, gfp_t gfp)
0827 {
0828 struct sctp_ulpevent *event;
0829 struct sk_buff_head temp;
0830 int event_eor = 0;
0831
0832 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
0833 if (!event)
0834 return -ENOMEM;
0835
0836 event->mid = ntohl(chunk->subh.idata_hdr->mid);
0837 if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
0838 event->ppid = chunk->subh.idata_hdr->ppid;
0839 else
0840 event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
0841
0842 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
0843 event = sctp_intl_reasm(ulpq, event);
0844 if (event) {
0845 skb_queue_head_init(&temp);
0846 __skb_queue_tail(&temp, sctp_event2skb(event));
0847
0848 if (event->msg_flags & MSG_EOR)
0849 event = sctp_intl_order(ulpq, event);
0850 }
0851 } else {
0852 event = sctp_intl_reasm_uo(ulpq, event);
0853 if (event) {
0854 skb_queue_head_init(&temp);
0855 __skb_queue_tail(&temp, sctp_event2skb(event));
0856 }
0857 }
0858
0859 if (event) {
0860 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
0861 sctp_enqueue_event(ulpq, &temp);
0862 }
0863
0864 return event_eor;
0865 }
0866
0867 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
0868 {
0869 struct sctp_stream_in *csin, *sin = NULL;
0870 struct sk_buff *first_frag = NULL;
0871 struct sk_buff *last_frag = NULL;
0872 struct sctp_ulpevent *retval;
0873 struct sk_buff *pos;
0874 __u32 next_fsn = 0;
0875 __u16 sid = 0;
0876
0877 skb_queue_walk(&ulpq->reasm, pos) {
0878 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
0879
0880 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
0881 if (csin->pd_mode)
0882 continue;
0883
0884 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
0885 case SCTP_DATA_FIRST_FRAG:
0886 if (first_frag)
0887 goto out;
0888 if (cevent->mid == csin->mid) {
0889 first_frag = pos;
0890 last_frag = pos;
0891 next_fsn = 0;
0892 sin = csin;
0893 sid = cevent->stream;
0894 }
0895 break;
0896 case SCTP_DATA_MIDDLE_FRAG:
0897 if (!first_frag)
0898 break;
0899 if (cevent->stream == sid &&
0900 cevent->mid == sin->mid &&
0901 cevent->fsn == next_fsn) {
0902 next_fsn++;
0903 last_frag = pos;
0904 } else {
0905 goto out;
0906 }
0907 break;
0908 case SCTP_DATA_LAST_FRAG:
0909 if (first_frag)
0910 goto out;
0911 break;
0912 default:
0913 break;
0914 }
0915 }
0916
0917 if (!first_frag)
0918 return NULL;
0919
0920 out:
0921 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
0922 &ulpq->reasm, first_frag,
0923 last_frag);
0924 if (retval) {
0925 sin->fsn = next_fsn;
0926 sin->pd_mode = 1;
0927 }
0928
0929 return retval;
0930 }
0931
0932 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
0933 {
0934 struct sctp_ulpevent *event;
0935 struct sk_buff_head temp;
0936
0937 if (!skb_queue_empty(&ulpq->reasm)) {
0938 do {
0939 event = sctp_intl_retrieve_first(ulpq);
0940 if (event) {
0941 skb_queue_head_init(&temp);
0942 __skb_queue_tail(&temp, sctp_event2skb(event));
0943 sctp_enqueue_event(ulpq, &temp);
0944 }
0945 } while (event);
0946 }
0947
0948 if (!skb_queue_empty(&ulpq->reasm_uo)) {
0949 do {
0950 event = sctp_intl_retrieve_first_uo(ulpq);
0951 if (event) {
0952 skb_queue_head_init(&temp);
0953 __skb_queue_tail(&temp, sctp_event2skb(event));
0954 sctp_enqueue_event(ulpq, &temp);
0955 }
0956 } while (event);
0957 }
0958 }
0959
0960 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
0961 gfp_t gfp)
0962 {
0963 struct sctp_association *asoc = ulpq->asoc;
0964 __u32 freed = 0;
0965 __u16 needed;
0966
0967 needed = ntohs(chunk->chunk_hdr->length) -
0968 sizeof(struct sctp_idata_chunk);
0969
0970 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
0971 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
0972 if (freed < needed)
0973 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
0974 needed);
0975 if (freed < needed)
0976 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
0977 needed);
0978 }
0979
0980 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
0981 sctp_intl_start_pd(ulpq, gfp);
0982 }
0983
0984 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
0985 __u32 mid, __u16 flags, gfp_t gfp)
0986 {
0987 struct sock *sk = ulpq->asoc->base.sk;
0988 struct sctp_ulpevent *ev = NULL;
0989
0990 if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
0991 SCTP_PARTIAL_DELIVERY_EVENT))
0992 return;
0993
0994 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
0995 sid, mid, flags, gfp);
0996 if (ev) {
0997 struct sctp_sock *sp = sctp_sk(sk);
0998
0999 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1000
1001 if (!sp->data_ready_signalled) {
1002 sp->data_ready_signalled = 1;
1003 sk->sk_data_ready(sk);
1004 }
1005 }
1006 }
1007
1008 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1009 {
1010 struct sctp_stream *stream = &ulpq->asoc->stream;
1011 struct sctp_ulpevent *cevent, *event = NULL;
1012 struct sk_buff_head *lobby = &ulpq->lobby;
1013 struct sk_buff *pos, *tmp;
1014 struct sk_buff_head temp;
1015 __u16 csid;
1016 __u32 cmid;
1017
1018 skb_queue_head_init(&temp);
1019 sctp_skb_for_each(pos, lobby, tmp) {
1020 cevent = (struct sctp_ulpevent *)pos->cb;
1021 csid = cevent->stream;
1022 cmid = cevent->mid;
1023
1024 if (csid > sid)
1025 break;
1026
1027 if (csid < sid)
1028 continue;
1029
1030 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1031 break;
1032
1033 __skb_unlink(pos, lobby);
1034 if (!event)
1035 event = sctp_skb2event(pos);
1036
1037 __skb_queue_tail(&temp, pos);
1038 }
1039
1040 if (!event && pos != (struct sk_buff *)lobby) {
1041 cevent = (struct sctp_ulpevent *)pos->cb;
1042 csid = cevent->stream;
1043 cmid = cevent->mid;
1044
1045 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1046 sctp_mid_next(stream, in, csid);
1047 __skb_unlink(pos, lobby);
1048 __skb_queue_tail(&temp, pos);
1049 event = sctp_skb2event(pos);
1050 }
1051 }
1052
1053 if (event) {
1054 sctp_intl_retrieve_ordered(ulpq, event);
1055 sctp_enqueue_event(ulpq, &temp);
1056 }
1057 }
1058
1059 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1060 {
1061 struct sctp_stream *stream = &ulpq->asoc->stream;
1062 __u16 sid;
1063
1064 for (sid = 0; sid < stream->incnt; sid++) {
1065 struct sctp_stream_in *sin = SCTP_SI(stream, sid);
1066 __u32 mid;
1067
1068 if (sin->pd_mode_uo) {
1069 sin->pd_mode_uo = 0;
1070
1071 mid = sin->mid_uo;
1072 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1073 }
1074
1075 if (sin->pd_mode) {
1076 sin->pd_mode = 0;
1077
1078 mid = sin->mid;
1079 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1080 sctp_mid_skip(stream, in, sid, mid);
1081
1082 sctp_intl_reap_ordered(ulpq, sid);
1083 }
1084 }
1085
1086
1087 sctp_ulpq_flush(ulpq);
1088 }
1089
1090 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1091 int nskips, __be16 stream, __u8 flags)
1092 {
1093 int i;
1094
1095 for (i = 0; i < nskips; i++)
1096 if (skiplist[i].stream == stream &&
1097 skiplist[i].flags == flags)
1098 return i;
1099
1100 return i;
1101 }
1102
1103 #define SCTP_FTSN_U_BIT 0x1
1104 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1105 {
1106 struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1107 struct sctp_association *asoc = q->asoc;
1108 struct sctp_chunk *ftsn_chunk = NULL;
1109 struct list_head *lchunk, *temp;
1110 int nskips = 0, skip_pos;
1111 struct sctp_chunk *chunk;
1112 __u32 tsn;
1113
1114 if (!asoc->peer.prsctp_capable)
1115 return;
1116
1117 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1118 asoc->adv_peer_ack_point = ctsn;
1119
1120 list_for_each_safe(lchunk, temp, &q->abandoned) {
1121 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1122 tsn = ntohl(chunk->subh.data_hdr->tsn);
1123
1124 if (TSN_lte(tsn, ctsn)) {
1125 list_del_init(lchunk);
1126 sctp_chunk_free(chunk);
1127 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1128 __be16 sid = chunk->subh.idata_hdr->stream;
1129 __be32 mid = chunk->subh.idata_hdr->mid;
1130 __u8 flags = 0;
1131
1132 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1133 flags |= SCTP_FTSN_U_BIT;
1134
1135 asoc->adv_peer_ack_point = tsn;
1136 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1137 sid, flags);
1138 ftsn_skip_arr[skip_pos].stream = sid;
1139 ftsn_skip_arr[skip_pos].reserved = 0;
1140 ftsn_skip_arr[skip_pos].flags = flags;
1141 ftsn_skip_arr[skip_pos].mid = mid;
1142 if (skip_pos == nskips)
1143 nskips++;
1144 if (nskips == 10)
1145 break;
1146 } else {
1147 break;
1148 }
1149 }
1150
1151 if (asoc->adv_peer_ack_point > ctsn)
1152 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1153 nskips, &ftsn_skip_arr[0]);
1154
1155 if (ftsn_chunk) {
1156 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1157 SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
1158 }
1159 }
1160
1161 #define _sctp_walk_ifwdtsn(pos, chunk, end) \
1162 for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1163 (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1164
1165 #define sctp_walk_ifwdtsn(pos, ch) \
1166 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1167 sizeof(struct sctp_ifwdtsn_chunk))
1168
1169 static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1170 {
1171 struct sctp_fwdtsn_skip *skip;
1172 __u16 incnt;
1173
1174 if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1175 return false;
1176
1177 incnt = chunk->asoc->stream.incnt;
1178 sctp_walk_fwdtsn(skip, chunk)
1179 if (ntohs(skip->stream) >= incnt)
1180 return false;
1181
1182 return true;
1183 }
1184
1185 static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1186 {
1187 struct sctp_ifwdtsn_skip *skip;
1188 __u16 incnt;
1189
1190 if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1191 return false;
1192
1193 incnt = chunk->asoc->stream.incnt;
1194 sctp_walk_ifwdtsn(skip, chunk)
1195 if (ntohs(skip->stream) >= incnt)
1196 return false;
1197
1198 return true;
1199 }
1200
1201 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1202 {
1203
1204 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1205
1206 sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1207
1208 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1209 }
1210
1211 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1212 {
1213 struct sk_buff *pos, *tmp;
1214
1215 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1216 struct sctp_ulpevent *event = sctp_skb2event(pos);
1217 __u32 tsn = event->tsn;
1218
1219 if (TSN_lte(tsn, ftsn)) {
1220 __skb_unlink(pos, &ulpq->reasm);
1221 sctp_ulpevent_free(event);
1222 }
1223 }
1224
1225 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1226 struct sctp_ulpevent *event = sctp_skb2event(pos);
1227 __u32 tsn = event->tsn;
1228
1229 if (TSN_lte(tsn, ftsn)) {
1230 __skb_unlink(pos, &ulpq->reasm_uo);
1231 sctp_ulpevent_free(event);
1232 }
1233 }
1234 }
1235
1236 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1237 {
1238
1239 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1240
1241 sctp_intl_reasm_flushtsn(ulpq, ftsn);
1242
1243 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1244 sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1245 }
1246
1247 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1248 {
1249 struct sctp_fwdtsn_skip *skip;
1250
1251
1252 sctp_walk_fwdtsn(skip, chunk)
1253 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1254 }
1255
1256 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1257 __u8 flags)
1258 {
1259 struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1260 struct sctp_stream *stream = &ulpq->asoc->stream;
1261
1262 if (flags & SCTP_FTSN_U_BIT) {
1263 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1264 sin->pd_mode_uo = 0;
1265 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1266 GFP_ATOMIC);
1267 }
1268 return;
1269 }
1270
1271 if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1272 return;
1273
1274 if (sin->pd_mode) {
1275 sin->pd_mode = 0;
1276 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1277 }
1278
1279 sctp_mid_skip(stream, in, sid, mid);
1280
1281 sctp_intl_reap_ordered(ulpq, sid);
1282 }
1283
1284 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1285 {
1286 struct sctp_ifwdtsn_skip *skip;
1287
1288
1289 sctp_walk_ifwdtsn(skip, chunk)
1290 sctp_intl_skip(ulpq, ntohs(skip->stream),
1291 ntohl(skip->mid), skip->flags);
1292 }
1293
1294 static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
1295 {
1296 struct sk_buff_head temp;
1297
1298 skb_queue_head_init(&temp);
1299 __skb_queue_tail(&temp, sctp_event2skb(event));
1300 return sctp_ulpq_tail_event(ulpq, &temp);
1301 }
1302
1303 static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1304 .data_chunk_len = sizeof(struct sctp_data_chunk),
1305 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
1306
1307 .make_datafrag = sctp_make_datafrag_empty,
1308 .assign_number = sctp_chunk_assign_ssn,
1309 .validate_data = sctp_validate_data,
1310 .ulpevent_data = sctp_ulpq_tail_data,
1311 .enqueue_event = do_ulpq_tail_event,
1312 .renege_events = sctp_ulpq_renege,
1313 .start_pd = sctp_ulpq_partial_delivery,
1314 .abort_pd = sctp_ulpq_abort_pd,
1315
1316 .generate_ftsn = sctp_generate_fwdtsn,
1317 .validate_ftsn = sctp_validate_fwdtsn,
1318 .report_ftsn = sctp_report_fwdtsn,
1319 .handle_ftsn = sctp_handle_fwdtsn,
1320 };
1321
1322 static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
1323 struct sctp_ulpevent *event)
1324 {
1325 struct sk_buff_head temp;
1326
1327 skb_queue_head_init(&temp);
1328 __skb_queue_tail(&temp, sctp_event2skb(event));
1329 return sctp_enqueue_event(ulpq, &temp);
1330 }
1331
1332 static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1333 .data_chunk_len = sizeof(struct sctp_idata_chunk),
1334 .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
1335
1336 .make_datafrag = sctp_make_idatafrag_empty,
1337 .assign_number = sctp_chunk_assign_mid,
1338 .validate_data = sctp_validate_idata,
1339 .ulpevent_data = sctp_ulpevent_idata,
1340 .enqueue_event = do_sctp_enqueue_event,
1341 .renege_events = sctp_renege_events,
1342 .start_pd = sctp_intl_start_pd,
1343 .abort_pd = sctp_intl_abort_pd,
1344
1345 .generate_ftsn = sctp_generate_iftsn,
1346 .validate_ftsn = sctp_validate_iftsn,
1347 .report_ftsn = sctp_report_iftsn,
1348 .handle_ftsn = sctp_handle_iftsn,
1349 };
1350
1351 void sctp_stream_interleave_init(struct sctp_stream *stream)
1352 {
1353 struct sctp_association *asoc;
1354
1355 asoc = container_of(stream, struct sctp_association, stream);
1356 stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
1357 : &sctp_stream_interleave_0;
1358 }