0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0030
0031 #include <linux/types.h>
0032 #include <linux/fcntl.h>
0033 #include <linux/poll.h>
0034 #include <linux/init.h>
0035
0036 #include <linux/slab.h>
0037 #include <linux/in.h>
0038 #include <net/ipv6.h>
0039 #include <net/sctp/sctp.h>
0040 #include <net/sctp/sm.h>
0041
0042
0043 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
0044 static void sctp_assoc_bh_rcv(struct work_struct *work);
0045 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
0046 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
0047
0048
0049
0050
0051 static struct sctp_association *sctp_association_init(
0052 struct sctp_association *asoc,
0053 const struct sctp_endpoint *ep,
0054 const struct sock *sk,
0055 enum sctp_scope scope, gfp_t gfp)
0056 {
0057 struct sctp_sock *sp;
0058 struct sctp_paramhdr *p;
0059 int i;
0060
0061
0062 sp = sctp_sk((struct sock *)sk);
0063
0064
0065 asoc->ep = (struct sctp_endpoint *)ep;
0066 asoc->base.sk = (struct sock *)sk;
0067 asoc->base.net = sock_net(sk);
0068
0069 sctp_endpoint_hold(asoc->ep);
0070 sock_hold(asoc->base.sk);
0071
0072
0073 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
0074
0075
0076 refcount_set(&asoc->base.refcnt, 1);
0077
0078
0079 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
0080
0081 asoc->state = SCTP_STATE_CLOSED;
0082 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
0083 asoc->user_frag = sp->user_frag;
0084
0085
0086
0087
0088 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
0089 asoc->pf_retrans = sp->pf_retrans;
0090 asoc->ps_retrans = sp->ps_retrans;
0091 asoc->pf_expose = sp->pf_expose;
0092
0093 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
0094 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
0095 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
0096
0097
0098
0099
0100 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
0101 asoc->probe_interval = msecs_to_jiffies(sp->probe_interval);
0102
0103 asoc->encap_port = sp->encap_port;
0104
0105
0106 asoc->pathmaxrxt = sp->pathmaxrxt;
0107
0108 asoc->flowlabel = sp->flowlabel;
0109 asoc->dscp = sp->dscp;
0110
0111
0112 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
0113 asoc->sackfreq = sp->sackfreq;
0114
0115
0116
0117
0118 asoc->param_flags = sp->param_flags;
0119
0120
0121
0122
0123 asoc->max_burst = sp->max_burst;
0124
0125 asoc->subscribe = sp->subscribe;
0126
0127
0128 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
0129 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
0130 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
0131
0132
0133
0134
0135
0136 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
0137 = 5 * asoc->rto_max;
0138
0139 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
0140 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
0141
0142
0143 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
0144 timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
0145
0146
0147
0148
0149
0150 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
0151 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
0152 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
0153
0154 asoc->max_init_timeo =
0155 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
0156
0157
0158
0159
0160
0161
0162 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
0163 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
0164 else
0165 asoc->rwnd = sk->sk_rcvbuf/2;
0166
0167 asoc->a_rwnd = asoc->rwnd;
0168
0169
0170 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
0171
0172
0173 atomic_set(&asoc->rmem_alloc, 0);
0174
0175 init_waitqueue_head(&asoc->wait);
0176
0177 asoc->c.my_vtag = sctp_generate_tag(ep);
0178 asoc->c.my_port = ep->base.bind_addr.port;
0179
0180 asoc->c.initial_tsn = sctp_generate_tsn(ep);
0181
0182 asoc->next_tsn = asoc->c.initial_tsn;
0183
0184 asoc->ctsn_ack_point = asoc->next_tsn - 1;
0185 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
0186 asoc->highest_sacked = asoc->ctsn_ack_point;
0187 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199 asoc->addip_serial = asoc->c.initial_tsn;
0200 asoc->strreset_outseq = asoc->c.initial_tsn;
0201
0202 INIT_LIST_HEAD(&asoc->addip_chunk_list);
0203 INIT_LIST_HEAD(&asoc->asconf_ack_list);
0204
0205
0206 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219 asoc->peer.sack_needed = 1;
0220 asoc->peer.sack_generation = 1;
0221
0222
0223 sctp_inq_init(&asoc->base.inqueue);
0224 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
0225
0226
0227 sctp_outq_init(asoc, &asoc->outqueue);
0228
0229 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
0230 goto fail_init;
0231
0232 if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
0233 goto stream_free;
0234
0235
0236 asoc->pathmtu = sp->pathmtu;
0237 sctp_assoc_update_frag_point(asoc);
0238
0239
0240
0241
0242 asoc->peer.ipv4_address = 1;
0243 if (asoc->base.sk->sk_family == PF_INET6)
0244 asoc->peer.ipv6_address = 1;
0245 INIT_LIST_HEAD(&asoc->asocs);
0246
0247 asoc->default_stream = sp->default_stream;
0248 asoc->default_ppid = sp->default_ppid;
0249 asoc->default_flags = sp->default_flags;
0250 asoc->default_context = sp->default_context;
0251 asoc->default_timetolive = sp->default_timetolive;
0252 asoc->default_rcv_context = sp->default_rcv_context;
0253
0254
0255 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
0256 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
0257 goto stream_free;
0258
0259 asoc->active_key_id = ep->active_key_id;
0260 asoc->strreset_enable = ep->strreset_enable;
0261
0262
0263 if (ep->auth_hmacs_list)
0264 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
0265 ntohs(ep->auth_hmacs_list->param_hdr.length));
0266 if (ep->auth_chunk_list)
0267 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
0268 ntohs(ep->auth_chunk_list->param_hdr.length));
0269
0270
0271 p = (struct sctp_paramhdr *)asoc->c.auth_random;
0272 p->type = SCTP_PARAM_RANDOM;
0273 p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH);
0274 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
0275
0276 return asoc;
0277
0278 stream_free:
0279 sctp_stream_free(&asoc->stream);
0280 fail_init:
0281 sock_put(asoc->base.sk);
0282 sctp_endpoint_put(asoc->ep);
0283 return NULL;
0284 }
0285
0286
0287 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
0288 const struct sock *sk,
0289 enum sctp_scope scope, gfp_t gfp)
0290 {
0291 struct sctp_association *asoc;
0292
0293 asoc = kzalloc(sizeof(*asoc), gfp);
0294 if (!asoc)
0295 goto fail;
0296
0297 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
0298 goto fail_init;
0299
0300 SCTP_DBG_OBJCNT_INC(assoc);
0301
0302 pr_debug("Created asoc %p\n", asoc);
0303
0304 return asoc;
0305
0306 fail_init:
0307 kfree(asoc);
0308 fail:
0309 return NULL;
0310 }
0311
0312
0313
0314
0315 void sctp_association_free(struct sctp_association *asoc)
0316 {
0317 struct sock *sk = asoc->base.sk;
0318 struct sctp_transport *transport;
0319 struct list_head *pos, *temp;
0320 int i;
0321
0322
0323
0324
0325 if (!list_empty(&asoc->asocs)) {
0326 list_del(&asoc->asocs);
0327
0328
0329
0330
0331 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
0332 sk_acceptq_removed(sk);
0333 }
0334
0335
0336
0337
0338 asoc->base.dead = true;
0339
0340
0341 sctp_outq_free(&asoc->outqueue);
0342
0343
0344 sctp_ulpq_free(&asoc->ulpq);
0345
0346
0347 sctp_inq_free(&asoc->base.inqueue);
0348
0349 sctp_tsnmap_free(&asoc->peer.tsn_map);
0350
0351
0352 sctp_stream_free(&asoc->stream);
0353
0354 if (asoc->strreset_chunk)
0355 sctp_chunk_free(asoc->strreset_chunk);
0356
0357
0358 sctp_bind_addr_free(&asoc->base.bind_addr);
0359
0360
0361
0362
0363
0364
0365 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
0366 if (del_timer(&asoc->timers[i]))
0367 sctp_association_put(asoc);
0368 }
0369
0370
0371 kfree(asoc->peer.cookie);
0372 kfree(asoc->peer.peer_random);
0373 kfree(asoc->peer.peer_chunks);
0374 kfree(asoc->peer.peer_hmacs);
0375
0376
0377 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
0378 transport = list_entry(pos, struct sctp_transport, transports);
0379 list_del_rcu(pos);
0380 sctp_unhash_transport(transport);
0381 sctp_transport_free(transport);
0382 }
0383
0384 asoc->peer.transport_count = 0;
0385
0386 sctp_asconf_queue_teardown(asoc);
0387
0388
0389 kfree(asoc->asconf_addr_del_pending);
0390
0391
0392 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
0393
0394
0395 sctp_auth_key_put(asoc->asoc_shared_key);
0396
0397 sctp_association_put(asoc);
0398 }
0399
0400
0401 static void sctp_association_destroy(struct sctp_association *asoc)
0402 {
0403 if (unlikely(!asoc->base.dead)) {
0404 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
0405 return;
0406 }
0407
0408 sctp_endpoint_put(asoc->ep);
0409 sock_put(asoc->base.sk);
0410
0411 if (asoc->assoc_id != 0) {
0412 spin_lock_bh(&sctp_assocs_id_lock);
0413 idr_remove(&sctp_assocs_id, asoc->assoc_id);
0414 spin_unlock_bh(&sctp_assocs_id_lock);
0415 }
0416
0417 WARN_ON(atomic_read(&asoc->rmem_alloc));
0418
0419 kfree_rcu(asoc, rcu);
0420 SCTP_DBG_OBJCNT_DEC(assoc);
0421 }
0422
0423
0424 void sctp_assoc_set_primary(struct sctp_association *asoc,
0425 struct sctp_transport *transport)
0426 {
0427 int changeover = 0;
0428
0429
0430
0431
0432 if (asoc->peer.primary_path != NULL &&
0433 asoc->peer.primary_path != transport)
0434 changeover = 1 ;
0435
0436 asoc->peer.primary_path = transport;
0437 sctp_ulpevent_notify_peer_addr_change(transport,
0438 SCTP_ADDR_MADE_PRIM, 0);
0439
0440
0441 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
0442 sizeof(union sctp_addr));
0443
0444
0445
0446
0447 if ((transport->state == SCTP_ACTIVE) ||
0448 (transport->state == SCTP_UNKNOWN))
0449 asoc->peer.active_path = transport;
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
0466 return;
0467
0468 if (transport->cacc.changeover_active)
0469 transport->cacc.cycling_changeover = changeover;
0470
0471
0472
0473
0474 transport->cacc.changeover_active = changeover;
0475
0476
0477
0478
0479 transport->cacc.next_tsn_at_change = asoc->next_tsn;
0480 }
0481
0482
0483 void sctp_assoc_rm_peer(struct sctp_association *asoc,
0484 struct sctp_transport *peer)
0485 {
0486 struct sctp_transport *transport;
0487 struct list_head *pos;
0488 struct sctp_chunk *ch;
0489
0490 pr_debug("%s: association:%p addr:%pISpc\n",
0491 __func__, asoc, &peer->ipaddr.sa);
0492
0493
0494
0495
0496 if (asoc->peer.retran_path == peer)
0497 sctp_assoc_update_retran_path(asoc);
0498
0499
0500 list_del_rcu(&peer->transports);
0501
0502 sctp_unhash_transport(peer);
0503
0504
0505 pos = asoc->peer.transport_addr_list.next;
0506 transport = list_entry(pos, struct sctp_transport, transports);
0507
0508
0509 if (asoc->peer.primary_path == peer)
0510 sctp_assoc_set_primary(asoc, transport);
0511 if (asoc->peer.active_path == peer)
0512 asoc->peer.active_path = transport;
0513 if (asoc->peer.retran_path == peer)
0514 asoc->peer.retran_path = transport;
0515 if (asoc->peer.last_data_from == peer)
0516 asoc->peer.last_data_from = transport;
0517
0518 if (asoc->strreset_chunk &&
0519 asoc->strreset_chunk->transport == peer) {
0520 asoc->strreset_chunk->transport = transport;
0521 sctp_transport_reset_reconf_timer(transport);
0522 }
0523
0524
0525
0526
0527
0528
0529 if (asoc->init_last_sent_to == peer)
0530 asoc->init_last_sent_to = NULL;
0531
0532
0533
0534
0535
0536
0537 if (asoc->shutdown_last_sent_to == peer)
0538 asoc->shutdown_last_sent_to = NULL;
0539
0540
0541
0542
0543 if (asoc->addip_last_asconf &&
0544 asoc->addip_last_asconf->transport == peer)
0545 asoc->addip_last_asconf->transport = NULL;
0546
0547
0548
0549
0550 if (!list_empty(&peer->transmitted)) {
0551 struct sctp_transport *active = asoc->peer.active_path;
0552
0553
0554 list_for_each_entry(ch, &peer->transmitted,
0555 transmitted_list) {
0556 ch->transport = NULL;
0557 ch->rtt_in_progress = 0;
0558 }
0559
0560 list_splice_tail_init(&peer->transmitted,
0561 &active->transmitted);
0562
0563
0564
0565
0566
0567 if (!timer_pending(&active->T3_rtx_timer))
0568 if (!mod_timer(&active->T3_rtx_timer,
0569 jiffies + active->rto))
0570 sctp_transport_hold(active);
0571 }
0572
0573 list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
0574 if (ch->transport == peer)
0575 ch->transport = NULL;
0576
0577 asoc->peer.transport_count--;
0578
0579 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0);
0580 sctp_transport_free(peer);
0581 }
0582
0583
0584 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
0585 const union sctp_addr *addr,
0586 const gfp_t gfp,
0587 const int peer_state)
0588 {
0589 struct sctp_transport *peer;
0590 struct sctp_sock *sp;
0591 unsigned short port;
0592
0593 sp = sctp_sk(asoc->base.sk);
0594
0595
0596 port = ntohs(addr->v4.sin_port);
0597
0598 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
0599 asoc, &addr->sa, peer_state);
0600
0601
0602 if (0 == asoc->peer.port)
0603 asoc->peer.port = port;
0604
0605
0606 peer = sctp_assoc_lookup_paddr(asoc, addr);
0607 if (peer) {
0608
0609
0610
0611
0612 if (peer->state == SCTP_UNKNOWN) {
0613 peer->state = SCTP_ACTIVE;
0614 }
0615 return peer;
0616 }
0617
0618 peer = sctp_transport_new(asoc->base.net, addr, gfp);
0619 if (!peer)
0620 return NULL;
0621
0622 sctp_transport_set_owner(peer, asoc);
0623
0624
0625
0626
0627 peer->hbinterval = asoc->hbinterval;
0628 peer->probe_interval = asoc->probe_interval;
0629
0630 peer->encap_port = asoc->encap_port;
0631
0632
0633 peer->pathmaxrxt = asoc->pathmaxrxt;
0634
0635
0636 peer->pf_retrans = asoc->pf_retrans;
0637
0638 peer->ps_retrans = asoc->ps_retrans;
0639
0640
0641
0642
0643 peer->sackdelay = asoc->sackdelay;
0644 peer->sackfreq = asoc->sackfreq;
0645
0646 if (addr->sa.sa_family == AF_INET6) {
0647 __be32 info = addr->v6.sin6_flowinfo;
0648
0649 if (info) {
0650 peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
0651 peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
0652 } else {
0653 peer->flowlabel = asoc->flowlabel;
0654 }
0655 }
0656 peer->dscp = asoc->dscp;
0657
0658
0659
0660
0661 peer->param_flags = asoc->param_flags;
0662
0663
0664 sctp_transport_route(peer, NULL, sp);
0665
0666
0667
0668
0669
0670
0671 sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
0672 min_t(int, peer->pathmtu, asoc->pathmtu) :
0673 peer->pathmtu);
0674
0675 peer->pmtu_pending = 0;
0676
0677
0678
0679
0680 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
0681 asoc->peer.port);
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
0694
0695
0696
0697
0698
0699 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
0700
0701 peer->partial_bytes_acked = 0;
0702 peer->flight_size = 0;
0703 peer->burst_limited = 0;
0704
0705
0706 peer->rto = asoc->rto_initial;
0707 sctp_max_rto(asoc, peer);
0708
0709
0710 peer->state = peer_state;
0711
0712
0713 if (sctp_hash_transport(peer)) {
0714 sctp_transport_free(peer);
0715 return NULL;
0716 }
0717
0718 sctp_transport_pl_reset(peer);
0719
0720
0721 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
0722 asoc->peer.transport_count++;
0723
0724 sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_ADDED, 0);
0725
0726
0727 if (!asoc->peer.primary_path) {
0728 sctp_assoc_set_primary(asoc, peer);
0729 asoc->peer.retran_path = peer;
0730 }
0731
0732 if (asoc->peer.active_path == asoc->peer.retran_path &&
0733 peer->state != SCTP_UNCONFIRMED) {
0734 asoc->peer.retran_path = peer;
0735 }
0736
0737 return peer;
0738 }
0739
0740
0741 void sctp_assoc_del_peer(struct sctp_association *asoc,
0742 const union sctp_addr *addr)
0743 {
0744 struct list_head *pos;
0745 struct list_head *temp;
0746 struct sctp_transport *transport;
0747
0748 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
0749 transport = list_entry(pos, struct sctp_transport, transports);
0750 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
0751
0752 sctp_assoc_rm_peer(asoc, transport);
0753 break;
0754 }
0755 }
0756 }
0757
0758
0759 struct sctp_transport *sctp_assoc_lookup_paddr(
0760 const struct sctp_association *asoc,
0761 const union sctp_addr *address)
0762 {
0763 struct sctp_transport *t;
0764
0765
0766
0767 list_for_each_entry(t, &asoc->peer.transport_addr_list,
0768 transports) {
0769 if (sctp_cmp_addr_exact(address, &t->ipaddr))
0770 return t;
0771 }
0772
0773 return NULL;
0774 }
0775
0776
0777 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
0778 struct sctp_transport *primary)
0779 {
0780 struct sctp_transport *temp;
0781 struct sctp_transport *t;
0782
0783 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
0784 transports) {
0785
0786 if (t != primary)
0787 sctp_assoc_rm_peer(asoc, t);
0788 }
0789 }
0790
0791
0792
0793
0794
0795 void sctp_assoc_control_transport(struct sctp_association *asoc,
0796 struct sctp_transport *transport,
0797 enum sctp_transport_cmd command,
0798 sctp_sn_error_t error)
0799 {
0800 int spc_state = SCTP_ADDR_AVAILABLE;
0801 bool ulp_notify = true;
0802
0803
0804 switch (command) {
0805 case SCTP_TRANSPORT_UP:
0806
0807
0808
0809
0810 if (transport->state == SCTP_PF &&
0811 asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
0812 ulp_notify = false;
0813 else if (transport->state == SCTP_UNCONFIRMED &&
0814 error == SCTP_HEARTBEAT_SUCCESS)
0815 spc_state = SCTP_ADDR_CONFIRMED;
0816
0817 transport->state = SCTP_ACTIVE;
0818 sctp_transport_pl_reset(transport);
0819 break;
0820
0821 case SCTP_TRANSPORT_DOWN:
0822
0823
0824
0825
0826 if (transport->state != SCTP_UNCONFIRMED) {
0827 transport->state = SCTP_INACTIVE;
0828 sctp_transport_pl_reset(transport);
0829 spc_state = SCTP_ADDR_UNREACHABLE;
0830 } else {
0831 sctp_transport_dst_release(transport);
0832 ulp_notify = false;
0833 }
0834 break;
0835
0836 case SCTP_TRANSPORT_PF:
0837 transport->state = SCTP_PF;
0838 if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
0839 ulp_notify = false;
0840 else
0841 spc_state = SCTP_ADDR_POTENTIALLY_FAILED;
0842 break;
0843
0844 default:
0845 return;
0846 }
0847
0848
0849
0850
0851 if (ulp_notify)
0852 sctp_ulpevent_notify_peer_addr_change(transport,
0853 spc_state, error);
0854
0855
0856 sctp_select_active_and_retran_path(asoc);
0857 }
0858
0859
0860 void sctp_association_hold(struct sctp_association *asoc)
0861 {
0862 refcount_inc(&asoc->base.refcnt);
0863 }
0864
0865
0866
0867
0868 void sctp_association_put(struct sctp_association *asoc)
0869 {
0870 if (refcount_dec_and_test(&asoc->base.refcnt))
0871 sctp_association_destroy(asoc);
0872 }
0873
0874
0875
0876
0877 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
0878 {
0879
0880
0881
0882
0883
0884 __u32 retval = asoc->next_tsn;
0885 asoc->next_tsn++;
0886 asoc->unack_data++;
0887
0888 return retval;
0889 }
0890
0891
0892
0893
0894 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
0895 const union sctp_addr *ss2)
0896 {
0897 struct sctp_af *af;
0898
0899 af = sctp_get_af_specific(ss1->sa.sa_family);
0900 if (unlikely(!af))
0901 return 0;
0902
0903 return af->cmp_addr(ss1, ss2);
0904 }
0905
0906
0907
0908
0909
0910 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
0911 {
0912 if (!asoc->need_ecne)
0913 return NULL;
0914
0915
0916
0917
0918 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
0919 }
0920
0921
0922
0923
0924 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
0925 __u32 tsn)
0926 {
0927 struct sctp_transport *active;
0928 struct sctp_transport *match;
0929 struct sctp_transport *transport;
0930 struct sctp_chunk *chunk;
0931 __be32 key = htonl(tsn);
0932
0933 match = NULL;
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950 active = asoc->peer.active_path;
0951
0952 list_for_each_entry(chunk, &active->transmitted,
0953 transmitted_list) {
0954
0955 if (key == chunk->subh.data_hdr->tsn) {
0956 match = active;
0957 goto out;
0958 }
0959 }
0960
0961
0962 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
0963 transports) {
0964
0965 if (transport == active)
0966 continue;
0967 list_for_each_entry(chunk, &transport->transmitted,
0968 transmitted_list) {
0969 if (key == chunk->subh.data_hdr->tsn) {
0970 match = transport;
0971 goto out;
0972 }
0973 }
0974 }
0975 out:
0976 return match;
0977 }
0978
0979
0980 static void sctp_assoc_bh_rcv(struct work_struct *work)
0981 {
0982 struct sctp_association *asoc =
0983 container_of(work, struct sctp_association,
0984 base.inqueue.immediate);
0985 struct net *net = asoc->base.net;
0986 union sctp_subtype subtype;
0987 struct sctp_endpoint *ep;
0988 struct sctp_chunk *chunk;
0989 struct sctp_inq *inqueue;
0990 int first_time = 1;
0991 int error = 0;
0992 int state;
0993
0994
0995 ep = asoc->ep;
0996
0997 inqueue = &asoc->base.inqueue;
0998 sctp_association_hold(asoc);
0999 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1000 state = asoc->state;
1001 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1002
1003
1004
1005
1006 if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1007 struct sctp_chunkhdr *next_hdr;
1008
1009 next_hdr = sctp_inq_peek(inqueue);
1010 if (!next_hdr)
1011 goto normal;
1012
1013
1014
1015
1016
1017
1018 if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1019 chunk->auth_chunk = skb_clone(chunk->skb,
1020 GFP_ATOMIC);
1021 chunk->auth = 1;
1022 continue;
1023 }
1024 }
1025
1026 normal:
1027
1028
1029
1030
1031
1032
1033
1034 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1035 continue;
1036
1037
1038
1039
1040 if (sctp_chunk_is_data(chunk))
1041 asoc->peer.last_data_from = chunk->transport;
1042 else {
1043 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1044 asoc->stats.ictrlchunks++;
1045 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1046 asoc->stats.isacks++;
1047 }
1048
1049 if (chunk->transport)
1050 chunk->transport->last_time_heard = ktime_get();
1051
1052
1053 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1054 state, ep, asoc, chunk, GFP_ATOMIC);
1055
1056
1057
1058
1059 if (asoc->base.dead)
1060 break;
1061
1062
1063 if (error && chunk)
1064 chunk->pdiscard = 1;
1065
1066 if (first_time)
1067 first_time = 0;
1068 }
1069 sctp_association_put(asoc);
1070 }
1071
1072
1073 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1074 {
1075 struct sctp_sock *newsp = sctp_sk(newsk);
1076 struct sock *oldsk = assoc->base.sk;
1077
1078
1079
1080
1081 list_del_init(&assoc->asocs);
1082
1083
1084 if (sctp_style(oldsk, TCP))
1085 sk_acceptq_removed(oldsk);
1086
1087
1088 sctp_endpoint_put(assoc->ep);
1089 sock_put(assoc->base.sk);
1090
1091
1092 assoc->ep = newsp->ep;
1093 sctp_endpoint_hold(assoc->ep);
1094
1095
1096 assoc->base.sk = newsk;
1097 sock_hold(assoc->base.sk);
1098
1099
1100 sctp_endpoint_add_asoc(newsp->ep, assoc);
1101 }
1102
1103
1104 int sctp_assoc_update(struct sctp_association *asoc,
1105 struct sctp_association *new)
1106 {
1107 struct sctp_transport *trans;
1108 struct list_head *pos, *temp;
1109
1110
1111 asoc->c = new->c;
1112 asoc->peer.rwnd = new->peer.rwnd;
1113 asoc->peer.sack_needed = new->peer.sack_needed;
1114 asoc->peer.auth_capable = new->peer.auth_capable;
1115 asoc->peer.i = new->peer.i;
1116
1117 if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1118 asoc->peer.i.initial_tsn, GFP_ATOMIC))
1119 return -ENOMEM;
1120
1121
1122 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1123 trans = list_entry(pos, struct sctp_transport, transports);
1124 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1125 sctp_assoc_rm_peer(asoc, trans);
1126 continue;
1127 }
1128
1129 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1130 sctp_transport_reset(trans);
1131 }
1132
1133
1134
1135
1136
1137
1138 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1139 asoc->next_tsn = new->next_tsn;
1140 asoc->ctsn_ack_point = new->ctsn_ack_point;
1141 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1142
1143
1144
1145
1146 sctp_stream_clear(&asoc->stream);
1147
1148
1149
1150
1151
1152 sctp_ulpq_flush(&asoc->ulpq);
1153
1154
1155
1156
1157
1158 asoc->overall_error_count = 0;
1159
1160 } else {
1161
1162 list_for_each_entry(trans, &new->peer.transport_addr_list,
1163 transports)
1164 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
1165 !sctp_assoc_add_peer(asoc, &trans->ipaddr,
1166 GFP_ATOMIC, trans->state))
1167 return -ENOMEM;
1168
1169 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1170 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1171
1172 if (sctp_state(asoc, COOKIE_WAIT))
1173 sctp_stream_update(&asoc->stream, &new->stream);
1174
1175
1176 if (sctp_assoc_set_id(asoc, GFP_ATOMIC))
1177 return -ENOMEM;
1178 }
1179
1180
1181
1182
1183 kfree(asoc->peer.peer_random);
1184 asoc->peer.peer_random = new->peer.peer_random;
1185 new->peer.peer_random = NULL;
1186
1187 kfree(asoc->peer.peer_chunks);
1188 asoc->peer.peer_chunks = new->peer.peer_chunks;
1189 new->peer.peer_chunks = NULL;
1190
1191 kfree(asoc->peer.peer_hmacs);
1192 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1193 new->peer.peer_hmacs = NULL;
1194
1195 return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1196 }
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 static u8 sctp_trans_score(const struct sctp_transport *trans)
1227 {
1228 switch (trans->state) {
1229 case SCTP_ACTIVE:
1230 return 3;
1231 case SCTP_UNKNOWN:
1232 return 2;
1233 case SCTP_PF:
1234 return 1;
1235 default:
1236 return 0;
1237 }
1238 }
1239
1240 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1241 struct sctp_transport *trans2)
1242 {
1243 if (trans1->error_count > trans2->error_count) {
1244 return trans2;
1245 } else if (trans1->error_count == trans2->error_count &&
1246 ktime_after(trans2->last_time_heard,
1247 trans1->last_time_heard)) {
1248 return trans2;
1249 } else {
1250 return trans1;
1251 }
1252 }
1253
1254 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1255 struct sctp_transport *best)
1256 {
1257 u8 score_curr, score_best;
1258
1259 if (best == NULL || curr == best)
1260 return curr;
1261
1262 score_curr = sctp_trans_score(curr);
1263 score_best = sctp_trans_score(best);
1264
1265
1266
1267
1268
1269 if (score_curr > score_best)
1270 return curr;
1271 else if (score_curr == score_best)
1272 return sctp_trans_elect_tie(best, curr);
1273 else
1274 return best;
1275 }
1276
1277 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1278 {
1279 struct sctp_transport *trans = asoc->peer.retran_path;
1280 struct sctp_transport *trans_next = NULL;
1281
1282
1283 if (asoc->peer.transport_count == 1)
1284 return;
1285
1286
1287
1288 if (asoc->peer.active_path == asoc->peer.retran_path &&
1289 asoc->peer.active_path->state == SCTP_ACTIVE)
1290 return;
1291
1292
1293 for (trans = list_next_entry(trans, transports); 1;
1294 trans = list_next_entry(trans, transports)) {
1295
1296 if (&trans->transports == &asoc->peer.transport_addr_list)
1297 continue;
1298 if (trans->state == SCTP_UNCONFIRMED)
1299 continue;
1300 trans_next = sctp_trans_elect_best(trans, trans_next);
1301
1302 if (trans_next->state == SCTP_ACTIVE)
1303 break;
1304
1305 if (trans == asoc->peer.retran_path)
1306 break;
1307 }
1308
1309 asoc->peer.retran_path = trans_next;
1310
1311 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1312 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1313 }
1314
1315 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1316 {
1317 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1318 struct sctp_transport *trans_pf = NULL;
1319
1320
1321 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1322 transports) {
1323
1324 if (trans->state == SCTP_INACTIVE ||
1325 trans->state == SCTP_UNCONFIRMED)
1326 continue;
1327
1328
1329
1330 if (trans->state == SCTP_PF) {
1331 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1332 continue;
1333 }
1334
1335 if (trans_pri == NULL ||
1336 ktime_after(trans->last_time_heard,
1337 trans_pri->last_time_heard)) {
1338 trans_sec = trans_pri;
1339 trans_pri = trans;
1340 } else if (trans_sec == NULL ||
1341 ktime_after(trans->last_time_heard,
1342 trans_sec->last_time_heard)) {
1343 trans_sec = trans;
1344 }
1345 }
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1356 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1357 asoc->peer.primary_path != trans_pri) {
1358 trans_sec = trans_pri;
1359 trans_pri = asoc->peer.primary_path;
1360 }
1361
1362
1363
1364
1365
1366 if (trans_sec == NULL)
1367 trans_sec = trans_pri;
1368
1369
1370
1371
1372 if (trans_pri == NULL) {
1373 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1374 trans_sec = trans_pri;
1375 }
1376
1377
1378 asoc->peer.active_path = trans_pri;
1379 asoc->peer.retran_path = trans_sec;
1380 }
1381
1382 struct sctp_transport *
1383 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1384 struct sctp_transport *last_sent_to)
1385 {
1386
1387
1388
1389
1390 if (last_sent_to == NULL) {
1391 return asoc->peer.active_path;
1392 } else {
1393 if (last_sent_to == asoc->peer.retran_path)
1394 sctp_assoc_update_retran_path(asoc);
1395
1396 return asoc->peer.retran_path;
1397 }
1398 }
1399
1400 void sctp_assoc_update_frag_point(struct sctp_association *asoc)
1401 {
1402 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
1403 sctp_datachk_len(&asoc->stream));
1404
1405 if (asoc->user_frag)
1406 frag = min_t(int, frag, asoc->user_frag);
1407
1408 frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
1409 sctp_datachk_len(&asoc->stream));
1410
1411 asoc->frag_point = SCTP_TRUNC4(frag);
1412 }
1413
1414 void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
1415 {
1416 if (asoc->pathmtu != pmtu) {
1417 asoc->pathmtu = pmtu;
1418 sctp_assoc_update_frag_point(asoc);
1419 }
1420
1421 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1422 asoc->pathmtu, asoc->frag_point);
1423 }
1424
1425
1426
1427
1428 void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1429 {
1430 struct sctp_transport *t;
1431 __u32 pmtu = 0;
1432
1433 if (!asoc)
1434 return;
1435
1436
1437 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
1438 if (t->pmtu_pending && t->dst) {
1439 sctp_transport_update_pmtu(t,
1440 atomic_read(&t->mtu_info));
1441 t->pmtu_pending = 0;
1442 }
1443 if (!pmtu || (t->pathmtu < pmtu))
1444 pmtu = t->pathmtu;
1445 }
1446
1447 sctp_assoc_set_pmtu(asoc, pmtu);
1448 }
1449
1450
1451 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1452 {
1453 struct net *net = asoc->base.net;
1454
1455 switch (asoc->state) {
1456 case SCTP_STATE_ESTABLISHED:
1457 case SCTP_STATE_SHUTDOWN_PENDING:
1458 case SCTP_STATE_SHUTDOWN_RECEIVED:
1459 case SCTP_STATE_SHUTDOWN_SENT:
1460 if ((asoc->rwnd > asoc->a_rwnd) &&
1461 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1462 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1463 asoc->pathmtu)))
1464 return true;
1465 break;
1466 default:
1467 break;
1468 }
1469 return false;
1470 }
1471
1472
1473 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1474 {
1475 struct sctp_chunk *sack;
1476 struct timer_list *timer;
1477
1478 if (asoc->rwnd_over) {
1479 if (asoc->rwnd_over >= len) {
1480 asoc->rwnd_over -= len;
1481 } else {
1482 asoc->rwnd += (len - asoc->rwnd_over);
1483 asoc->rwnd_over = 0;
1484 }
1485 } else {
1486 asoc->rwnd += len;
1487 }
1488
1489
1490
1491
1492
1493
1494 if (asoc->rwnd_press) {
1495 int change = min(asoc->pathmtu, asoc->rwnd_press);
1496 asoc->rwnd += change;
1497 asoc->rwnd_press -= change;
1498 }
1499
1500 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1501 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1502 asoc->a_rwnd);
1503
1504
1505
1506
1507
1508
1509 if (sctp_peer_needs_update(asoc)) {
1510 asoc->a_rwnd = asoc->rwnd;
1511
1512 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1513 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1514 asoc->a_rwnd);
1515
1516 sack = sctp_make_sack(asoc);
1517 if (!sack)
1518 return;
1519
1520 asoc->peer.sack_needed = 0;
1521
1522 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1523
1524
1525 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1526 if (del_timer(timer))
1527 sctp_association_put(asoc);
1528 }
1529 }
1530
1531
1532 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1533 {
1534 int rx_count;
1535 int over = 0;
1536
1537 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1538 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1539 "asoc->rwnd_over:%u!\n", __func__, asoc,
1540 asoc->rwnd, asoc->rwnd_over);
1541
1542 if (asoc->ep->rcvbuf_policy)
1543 rx_count = atomic_read(&asoc->rmem_alloc);
1544 else
1545 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1546
1547
1548
1549
1550
1551
1552 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1553 over = 1;
1554
1555 if (asoc->rwnd >= len) {
1556 asoc->rwnd -= len;
1557 if (over) {
1558 asoc->rwnd_press += asoc->rwnd;
1559 asoc->rwnd = 0;
1560 }
1561 } else {
1562 asoc->rwnd_over += len - asoc->rwnd;
1563 asoc->rwnd = 0;
1564 }
1565
1566 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1567 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1568 asoc->rwnd_press);
1569 }
1570
1571
1572
1573
1574 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1575 enum sctp_scope scope, gfp_t gfp)
1576 {
1577 struct sock *sk = asoc->base.sk;
1578 int flags;
1579
1580
1581
1582
1583 flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1584 if (!inet_v6_ipv6only(sk))
1585 flags |= SCTP_ADDR4_ALLOWED;
1586 if (asoc->peer.ipv4_address)
1587 flags |= SCTP_ADDR4_PEERSUPP;
1588 if (asoc->peer.ipv6_address)
1589 flags |= SCTP_ADDR6_PEERSUPP;
1590
1591 return sctp_bind_addr_copy(asoc->base.net,
1592 &asoc->base.bind_addr,
1593 &asoc->ep->base.bind_addr,
1594 scope, gfp, flags);
1595 }
1596
1597
1598 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1599 struct sctp_cookie *cookie,
1600 gfp_t gfp)
1601 {
1602 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1603 int var_size3 = cookie->raw_addr_list_len;
1604 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1605
1606 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1607 asoc->ep->base.bind_addr.port, gfp);
1608 }
1609
1610
1611 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1612 const union sctp_addr *laddr)
1613 {
1614 int found = 0;
1615
1616 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1617 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1618 sctp_sk(asoc->base.sk)))
1619 found = 1;
1620
1621 return found;
1622 }
1623
1624
1625 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1626 {
1627 bool preload = gfpflags_allow_blocking(gfp);
1628 int ret;
1629
1630
1631 if (asoc->assoc_id)
1632 return 0;
1633
1634 if (preload)
1635 idr_preload(gfp);
1636 spin_lock_bh(&sctp_assocs_id_lock);
1637
1638
1639
1640 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0,
1641 GFP_NOWAIT);
1642 spin_unlock_bh(&sctp_assocs_id_lock);
1643 if (preload)
1644 idr_preload_end();
1645 if (ret < 0)
1646 return ret;
1647
1648 asoc->assoc_id = (sctp_assoc_t)ret;
1649 return 0;
1650 }
1651
1652
1653 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1654 {
1655 struct sctp_chunk *asconf;
1656 struct sctp_chunk *tmp;
1657
1658 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1659 list_del_init(&asconf->list);
1660 sctp_chunk_free(asconf);
1661 }
1662 }
1663
1664
1665 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1666 {
1667 struct sctp_chunk *ack;
1668 struct sctp_chunk *tmp;
1669
1670 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1671 transmitted_list) {
1672 list_del_init(&ack->transmitted_list);
1673 sctp_chunk_free(ack);
1674 }
1675 }
1676
1677
1678 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1679 {
1680 struct sctp_chunk *ack;
1681 struct sctp_chunk *tmp;
1682
1683
1684
1685
1686 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1687 transmitted_list) {
1688 if (ack->subh.addip_hdr->serial ==
1689 htonl(asoc->peer.addip_serial))
1690 break;
1691
1692 list_del_init(&ack->transmitted_list);
1693 sctp_chunk_free(ack);
1694 }
1695 }
1696
1697
1698 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1699 const struct sctp_association *asoc,
1700 __be32 serial)
1701 {
1702 struct sctp_chunk *ack;
1703
1704
1705
1706
1707 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1708 if (sctp_chunk_pending(ack))
1709 continue;
1710 if (ack->subh.addip_hdr->serial == serial) {
1711 sctp_chunk_hold(ack);
1712 return ack;
1713 }
1714 }
1715
1716 return NULL;
1717 }
1718
1719 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1720 {
1721
1722 sctp_assoc_free_asconf_acks(asoc);
1723
1724
1725 sctp_assoc_free_asconf_queue(asoc);
1726
1727
1728 if (asoc->addip_last_asconf)
1729 sctp_chunk_free(asoc->addip_last_asconf);
1730 }