Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* SCTP kernel implementation
0003  * (C) Copyright IBM Corp. 2001, 2004
0004  * Copyright (c) 1999-2000 Cisco, Inc.
0005  * Copyright (c) 1999-2001 Motorola, Inc.
0006  *
0007  * This file is part of the SCTP kernel implementation
0008  *
0009  * These functions handle output processing.
0010  *
0011  * Please send any bug reports or fixes you make to the
0012  * email address(es):
0013  *    lksctp developers <linux-sctp@vger.kernel.org>
0014  *
0015  * Written or modified by:
0016  *    La Monte H.P. Yarroll <piggy@acm.org>
0017  *    Karl Knutson          <karl@athena.chicago.il.us>
0018  *    Jon Grimm             <jgrimm@austin.ibm.com>
0019  *    Sridhar Samudrala     <sri@us.ibm.com>
0020  */
0021 
0022 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0023 
0024 #include <linux/types.h>
0025 #include <linux/kernel.h>
0026 #include <linux/wait.h>
0027 #include <linux/time.h>
0028 #include <linux/ip.h>
0029 #include <linux/ipv6.h>
0030 #include <linux/init.h>
0031 #include <linux/slab.h>
0032 #include <net/inet_ecn.h>
0033 #include <net/ip.h>
0034 #include <net/icmp.h>
0035 #include <net/net_namespace.h>
0036 
0037 #include <linux/socket.h> /* for sa_family_t */
0038 #include <net/sock.h>
0039 
0040 #include <net/sctp/sctp.h>
0041 #include <net/sctp/sm.h>
0042 #include <net/sctp/checksum.h>
0043 
0044 /* Forward declarations for private helpers. */
0045 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
0046                          struct sctp_chunk *chunk);
0047 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
0048                           struct sctp_chunk *chunk);
0049 static void sctp_packet_append_data(struct sctp_packet *packet,
0050                     struct sctp_chunk *chunk);
0051 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
0052                        struct sctp_chunk *chunk,
0053                        u16 chunk_len);
0054 
0055 static void sctp_packet_reset(struct sctp_packet *packet)
0056 {
0057     /* sctp_packet_transmit() relies on this to reset size to the
0058      * current overhead after sending packets.
0059      */
0060     packet->size = packet->overhead;
0061 
0062     packet->has_cookie_echo = 0;
0063     packet->has_sack = 0;
0064     packet->has_data = 0;
0065     packet->has_auth = 0;
0066     packet->ipfragok = 0;
0067     packet->auth = NULL;
0068 }
0069 
0070 /* Config a packet.
0071  * This appears to be a followup set of initializations.
0072  */
0073 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
0074             int ecn_capable)
0075 {
0076     struct sctp_transport *tp = packet->transport;
0077     struct sctp_association *asoc = tp->asoc;
0078     struct sctp_sock *sp = NULL;
0079     struct sock *sk;
0080 
0081     pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
0082     packet->vtag = vtag;
0083 
0084     /* do the following jobs only once for a flush schedule */
0085     if (!sctp_packet_empty(packet))
0086         return;
0087 
0088     /* set packet max_size with pathmtu, then calculate overhead */
0089     packet->max_size = tp->pathmtu;
0090 
0091     if (asoc) {
0092         sk = asoc->base.sk;
0093         sp = sctp_sk(sk);
0094     }
0095     packet->overhead = sctp_mtu_payload(sp, 0, 0);
0096     packet->size = packet->overhead;
0097 
0098     if (!asoc)
0099         return;
0100 
0101     /* update dst or transport pathmtu if in need */
0102     if (!sctp_transport_dst_check(tp)) {
0103         sctp_transport_route(tp, NULL, sp);
0104         if (asoc->param_flags & SPP_PMTUD_ENABLE)
0105             sctp_assoc_sync_pmtu(asoc);
0106     } else if (!sctp_transport_pl_enabled(tp) &&
0107            asoc->param_flags & SPP_PMTUD_ENABLE) {
0108         if (!sctp_transport_pmtu_check(tp))
0109             sctp_assoc_sync_pmtu(asoc);
0110     }
0111 
0112     if (asoc->pmtu_pending) {
0113         if (asoc->param_flags & SPP_PMTUD_ENABLE)
0114             sctp_assoc_sync_pmtu(asoc);
0115         asoc->pmtu_pending = 0;
0116     }
0117 
0118     /* If there a is a prepend chunk stick it on the list before
0119      * any other chunks get appended.
0120      */
0121     if (ecn_capable) {
0122         struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
0123 
0124         if (chunk)
0125             sctp_packet_append_chunk(packet, chunk);
0126     }
0127 
0128     if (!tp->dst)
0129         return;
0130 
0131     /* set packet max_size with gso_max_size if gso is enabled*/
0132     rcu_read_lock();
0133     if (__sk_dst_get(sk) != tp->dst) {
0134         dst_hold(tp->dst);
0135         sk_setup_caps(sk, tp->dst);
0136     }
0137     packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size),
0138                         GSO_LEGACY_MAX_SIZE)
0139                       : asoc->pathmtu;
0140     rcu_read_unlock();
0141 }
0142 
0143 /* Initialize the packet structure. */
0144 void sctp_packet_init(struct sctp_packet *packet,
0145               struct sctp_transport *transport,
0146               __u16 sport, __u16 dport)
0147 {
0148     pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
0149 
0150     packet->transport = transport;
0151     packet->source_port = sport;
0152     packet->destination_port = dport;
0153     INIT_LIST_HEAD(&packet->chunk_list);
0154     /* The overhead will be calculated by sctp_packet_config() */
0155     packet->overhead = 0;
0156     sctp_packet_reset(packet);
0157     packet->vtag = 0;
0158 }
0159 
0160 /* Free a packet.  */
0161 void sctp_packet_free(struct sctp_packet *packet)
0162 {
0163     struct sctp_chunk *chunk, *tmp;
0164 
0165     pr_debug("%s: packet:%p\n", __func__, packet);
0166 
0167     list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
0168         list_del_init(&chunk->list);
0169         sctp_chunk_free(chunk);
0170     }
0171 }
0172 
0173 /* This routine tries to append the chunk to the offered packet. If adding
0174  * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
0175  * is not present in the packet, it transmits the input packet.
0176  * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
0177  * as it can fit in the packet, but any more data that does not fit in this
0178  * packet can be sent only after receiving the COOKIE_ACK.
0179  */
0180 enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
0181                       struct sctp_chunk *chunk,
0182                       int one_packet, gfp_t gfp)
0183 {
0184     enum sctp_xmit retval;
0185 
0186     pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
0187          packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
0188 
0189     switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
0190     case SCTP_XMIT_PMTU_FULL:
0191         if (!packet->has_cookie_echo) {
0192             int error = 0;
0193 
0194             error = sctp_packet_transmit(packet, gfp);
0195             if (error < 0)
0196                 chunk->skb->sk->sk_err = -error;
0197 
0198             /* If we have an empty packet, then we can NOT ever
0199              * return PMTU_FULL.
0200              */
0201             if (!one_packet)
0202                 retval = sctp_packet_append_chunk(packet,
0203                                   chunk);
0204         }
0205         break;
0206 
0207     case SCTP_XMIT_RWND_FULL:
0208     case SCTP_XMIT_OK:
0209     case SCTP_XMIT_DELAY:
0210         break;
0211     }
0212 
0213     return retval;
0214 }
0215 
0216 /* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */
0217 static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk)
0218 {
0219     struct sctp_transport *t = pkt->transport;
0220     struct sctp_chunk *pad;
0221     int overhead = 0;
0222 
0223     if (!chunk->pmtu_probe)
0224         return SCTP_XMIT_OK;
0225 
0226     /* calculate the Padding Data size for the pad chunk */
0227     overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
0228     overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk);
0229     pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead);
0230     if (!pad)
0231         return SCTP_XMIT_DELAY;
0232 
0233     list_add_tail(&pad->list, &pkt->chunk_list);
0234     pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length));
0235     chunk->transport = t;
0236 
0237     return SCTP_XMIT_OK;
0238 }
0239 
0240 /* Try to bundle an auth chunk into the packet. */
0241 static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
0242                           struct sctp_chunk *chunk)
0243 {
0244     struct sctp_association *asoc = pkt->transport->asoc;
0245     enum sctp_xmit retval = SCTP_XMIT_OK;
0246     struct sctp_chunk *auth;
0247 
0248     /* if we don't have an association, we can't do authentication */
0249     if (!asoc)
0250         return retval;
0251 
0252     /* See if this is an auth chunk we are bundling or if
0253      * auth is already bundled.
0254      */
0255     if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
0256         return retval;
0257 
0258     /* if the peer did not request this chunk to be authenticated,
0259      * don't do it
0260      */
0261     if (!chunk->auth)
0262         return retval;
0263 
0264     auth = sctp_make_auth(asoc, chunk->shkey->key_id);
0265     if (!auth)
0266         return retval;
0267 
0268     auth->shkey = chunk->shkey;
0269     sctp_auth_shkey_hold(auth->shkey);
0270 
0271     retval = __sctp_packet_append_chunk(pkt, auth);
0272 
0273     if (retval != SCTP_XMIT_OK)
0274         sctp_chunk_free(auth);
0275 
0276     return retval;
0277 }
0278 
0279 /* Try to bundle a SACK with the packet. */
0280 static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
0281                           struct sctp_chunk *chunk)
0282 {
0283     enum sctp_xmit retval = SCTP_XMIT_OK;
0284 
0285     /* If sending DATA and haven't aleady bundled a SACK, try to
0286      * bundle one in to the packet.
0287      */
0288     if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
0289         !pkt->has_cookie_echo) {
0290         struct sctp_association *asoc;
0291         struct timer_list *timer;
0292         asoc = pkt->transport->asoc;
0293         timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
0294 
0295         /* If the SACK timer is running, we have a pending SACK */
0296         if (timer_pending(timer)) {
0297             struct sctp_chunk *sack;
0298 
0299             if (pkt->transport->sack_generation !=
0300                 pkt->transport->asoc->peer.sack_generation)
0301                 return retval;
0302 
0303             asoc->a_rwnd = asoc->rwnd;
0304             sack = sctp_make_sack(asoc);
0305             if (sack) {
0306                 retval = __sctp_packet_append_chunk(pkt, sack);
0307                 if (retval != SCTP_XMIT_OK) {
0308                     sctp_chunk_free(sack);
0309                     goto out;
0310                 }
0311                 SCTP_INC_STATS(asoc->base.net,
0312                            SCTP_MIB_OUTCTRLCHUNKS);
0313                 asoc->stats.octrlchunks++;
0314                 asoc->peer.sack_needed = 0;
0315                 if (del_timer(timer))
0316                     sctp_association_put(asoc);
0317             }
0318         }
0319     }
0320 out:
0321     return retval;
0322 }
0323 
0324 
0325 /* Append a chunk to the offered packet reporting back any inability to do
0326  * so.
0327  */
0328 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
0329                          struct sctp_chunk *chunk)
0330 {
0331     __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
0332     enum sctp_xmit retval = SCTP_XMIT_OK;
0333 
0334     /* Check to see if this chunk will fit into the packet */
0335     retval = sctp_packet_will_fit(packet, chunk, chunk_len);
0336     if (retval != SCTP_XMIT_OK)
0337         goto finish;
0338 
0339     /* We believe that this chunk is OK to add to the packet */
0340     switch (chunk->chunk_hdr->type) {
0341     case SCTP_CID_DATA:
0342     case SCTP_CID_I_DATA:
0343         /* Account for the data being in the packet */
0344         sctp_packet_append_data(packet, chunk);
0345         /* Disallow SACK bundling after DATA. */
0346         packet->has_sack = 1;
0347         /* Disallow AUTH bundling after DATA */
0348         packet->has_auth = 1;
0349         /* Let it be knows that packet has DATA in it */
0350         packet->has_data = 1;
0351         /* timestamp the chunk for rtx purposes */
0352         chunk->sent_at = jiffies;
0353         /* Mainly used for prsctp RTX policy */
0354         chunk->sent_count++;
0355         break;
0356     case SCTP_CID_COOKIE_ECHO:
0357         packet->has_cookie_echo = 1;
0358         break;
0359 
0360     case SCTP_CID_SACK:
0361         packet->has_sack = 1;
0362         if (chunk->asoc)
0363             chunk->asoc->stats.osacks++;
0364         break;
0365 
0366     case SCTP_CID_AUTH:
0367         packet->has_auth = 1;
0368         packet->auth = chunk;
0369         break;
0370     }
0371 
0372     /* It is OK to send this chunk.  */
0373     list_add_tail(&chunk->list, &packet->chunk_list);
0374     packet->size += chunk_len;
0375     chunk->transport = packet->transport;
0376 finish:
0377     return retval;
0378 }
0379 
0380 /* Append a chunk to the offered packet reporting back any inability to do
0381  * so.
0382  */
0383 enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
0384                     struct sctp_chunk *chunk)
0385 {
0386     enum sctp_xmit retval = SCTP_XMIT_OK;
0387 
0388     pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
0389 
0390     /* Data chunks are special.  Before seeing what else we can
0391      * bundle into this packet, check to see if we are allowed to
0392      * send this DATA.
0393      */
0394     if (sctp_chunk_is_data(chunk)) {
0395         retval = sctp_packet_can_append_data(packet, chunk);
0396         if (retval != SCTP_XMIT_OK)
0397             goto finish;
0398     }
0399 
0400     /* Try to bundle AUTH chunk */
0401     retval = sctp_packet_bundle_auth(packet, chunk);
0402     if (retval != SCTP_XMIT_OK)
0403         goto finish;
0404 
0405     /* Try to bundle SACK chunk */
0406     retval = sctp_packet_bundle_sack(packet, chunk);
0407     if (retval != SCTP_XMIT_OK)
0408         goto finish;
0409 
0410     retval = __sctp_packet_append_chunk(packet, chunk);
0411     if (retval != SCTP_XMIT_OK)
0412         goto finish;
0413 
0414     retval = sctp_packet_bundle_pad(packet, chunk);
0415 
0416 finish:
0417     return retval;
0418 }
0419 
0420 static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
0421 {
0422     if (SCTP_OUTPUT_CB(head)->last == head)
0423         skb_shinfo(head)->frag_list = skb;
0424     else
0425         SCTP_OUTPUT_CB(head)->last->next = skb;
0426     SCTP_OUTPUT_CB(head)->last = skb;
0427 
0428     head->truesize += skb->truesize;
0429     head->data_len += skb->len;
0430     head->len += skb->len;
0431     refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
0432 
0433     __skb_header_release(skb);
0434 }
0435 
0436 static int sctp_packet_pack(struct sctp_packet *packet,
0437                 struct sk_buff *head, int gso, gfp_t gfp)
0438 {
0439     struct sctp_transport *tp = packet->transport;
0440     struct sctp_auth_chunk *auth = NULL;
0441     struct sctp_chunk *chunk, *tmp;
0442     int pkt_count = 0, pkt_size;
0443     struct sock *sk = head->sk;
0444     struct sk_buff *nskb;
0445     int auth_len = 0;
0446 
0447     if (gso) {
0448         skb_shinfo(head)->gso_type = sk->sk_gso_type;
0449         SCTP_OUTPUT_CB(head)->last = head;
0450     } else {
0451         nskb = head;
0452         pkt_size = packet->size;
0453         goto merge;
0454     }
0455 
0456     do {
0457         /* calculate the pkt_size and alloc nskb */
0458         pkt_size = packet->overhead;
0459         list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
0460                      list) {
0461             int padded = SCTP_PAD4(chunk->skb->len);
0462 
0463             if (chunk == packet->auth)
0464                 auth_len = padded;
0465             else if (auth_len + padded + packet->overhead >
0466                  tp->pathmtu)
0467                 return 0;
0468             else if (pkt_size + padded > tp->pathmtu)
0469                 break;
0470             pkt_size += padded;
0471         }
0472         nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
0473         if (!nskb)
0474             return 0;
0475         skb_reserve(nskb, packet->overhead + MAX_HEADER);
0476 
0477 merge:
0478         /* merge chunks into nskb and append nskb into head list */
0479         pkt_size -= packet->overhead;
0480         list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
0481             int padding;
0482 
0483             list_del_init(&chunk->list);
0484             if (sctp_chunk_is_data(chunk)) {
0485                 if (!sctp_chunk_retransmitted(chunk) &&
0486                     !tp->rto_pending) {
0487                     chunk->rtt_in_progress = 1;
0488                     tp->rto_pending = 1;
0489                 }
0490             }
0491 
0492             padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
0493             if (padding)
0494                 skb_put_zero(chunk->skb, padding);
0495 
0496             if (chunk == packet->auth)
0497                 auth = (struct sctp_auth_chunk *)
0498                             skb_tail_pointer(nskb);
0499 
0500             skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
0501 
0502             pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
0503                  chunk,
0504                  sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
0505                  chunk->has_tsn ? "TSN" : "No TSN",
0506                  chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
0507                  ntohs(chunk->chunk_hdr->length), chunk->skb->len,
0508                  chunk->rtt_in_progress);
0509 
0510             pkt_size -= SCTP_PAD4(chunk->skb->len);
0511 
0512             if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
0513                 sctp_chunk_free(chunk);
0514 
0515             if (!pkt_size)
0516                 break;
0517         }
0518 
0519         if (auth) {
0520             sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
0521                          packet->auth->shkey, gfp);
0522             /* free auth if no more chunks, or add it back */
0523             if (list_empty(&packet->chunk_list))
0524                 sctp_chunk_free(packet->auth);
0525             else
0526                 list_add(&packet->auth->list,
0527                      &packet->chunk_list);
0528         }
0529 
0530         if (gso)
0531             sctp_packet_gso_append(head, nskb);
0532 
0533         pkt_count++;
0534     } while (!list_empty(&packet->chunk_list));
0535 
0536     if (gso) {
0537         memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
0538                     sizeof(struct inet6_skb_parm)));
0539         skb_shinfo(head)->gso_segs = pkt_count;
0540         skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
0541         goto chksum;
0542     }
0543 
0544     if (sctp_checksum_disable)
0545         return 1;
0546 
0547     if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) ||
0548         dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) {
0549         struct sctphdr *sh =
0550             (struct sctphdr *)skb_transport_header(head);
0551 
0552         sh->checksum = sctp_compute_cksum(head, 0);
0553     } else {
0554 chksum:
0555         head->ip_summed = CHECKSUM_PARTIAL;
0556         head->csum_not_inet = 1;
0557         head->csum_start = skb_transport_header(head) - head->head;
0558         head->csum_offset = offsetof(struct sctphdr, checksum);
0559     }
0560 
0561     return pkt_count;
0562 }
0563 
0564 /* All packets are sent to the network through this function from
0565  * sctp_outq_tail().
0566  *
0567  * The return value is always 0 for now.
0568  */
0569 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
0570 {
0571     struct sctp_transport *tp = packet->transport;
0572     struct sctp_association *asoc = tp->asoc;
0573     struct sctp_chunk *chunk, *tmp;
0574     int pkt_count, gso = 0;
0575     struct sk_buff *head;
0576     struct sctphdr *sh;
0577     struct sock *sk;
0578 
0579     pr_debug("%s: packet:%p\n", __func__, packet);
0580     if (list_empty(&packet->chunk_list))
0581         return 0;
0582     chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
0583     sk = chunk->skb->sk;
0584 
0585     if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) {
0586         if (tp->pl.state == SCTP_PL_ERROR) { /* do IP fragmentation if in Error state */
0587             packet->ipfragok = 1;
0588         } else {
0589             if (!sk_can_gso(sk)) { /* check gso */
0590                 pr_err_once("Trying to GSO but underlying device doesn't support it.");
0591                 goto out;
0592             }
0593             gso = 1;
0594         }
0595     }
0596 
0597     /* alloc head skb */
0598     head = alloc_skb((gso ? packet->overhead : packet->size) +
0599              MAX_HEADER, gfp);
0600     if (!head)
0601         goto out;
0602     skb_reserve(head, packet->overhead + MAX_HEADER);
0603     skb_set_owner_w(head, sk);
0604 
0605     /* set sctp header */
0606     sh = skb_push(head, sizeof(struct sctphdr));
0607     skb_reset_transport_header(head);
0608     sh->source = htons(packet->source_port);
0609     sh->dest = htons(packet->destination_port);
0610     sh->vtag = htonl(packet->vtag);
0611     sh->checksum = 0;
0612 
0613     /* drop packet if no dst */
0614     if (!tp->dst) {
0615         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
0616         kfree_skb(head);
0617         goto out;
0618     }
0619 
0620     /* pack up chunks */
0621     pkt_count = sctp_packet_pack(packet, head, gso, gfp);
0622     if (!pkt_count) {
0623         kfree_skb(head);
0624         goto out;
0625     }
0626     pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
0627 
0628     /* start autoclose timer */
0629     if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
0630         asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
0631         struct timer_list *timer =
0632             &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
0633         unsigned long timeout =
0634             asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
0635 
0636         if (!mod_timer(timer, jiffies + timeout))
0637             sctp_association_hold(asoc);
0638     }
0639 
0640     /* sctp xmit */
0641     tp->af_specific->ecn_capable(sk);
0642     if (asoc) {
0643         asoc->stats.opackets += pkt_count;
0644         if (asoc->peer.last_sent_to != tp)
0645             asoc->peer.last_sent_to = tp;
0646     }
0647     head->ignore_df = packet->ipfragok;
0648     if (tp->dst_pending_confirm)
0649         skb_set_dst_pending_confirm(head, 1);
0650     /* neighbour should be confirmed on successful transmission or
0651      * positive error
0652      */
0653     if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
0654         tp->dst_pending_confirm)
0655         tp->dst_pending_confirm = 0;
0656 
0657 out:
0658     list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
0659         list_del_init(&chunk->list);
0660         if (!sctp_chunk_is_data(chunk))
0661             sctp_chunk_free(chunk);
0662     }
0663     sctp_packet_reset(packet);
0664     return 0;
0665 }
0666 
0667 /********************************************************************
0668  * 2nd Level Abstractions
0669  ********************************************************************/
0670 
0671 /* This private function check to see if a chunk can be added */
0672 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
0673                           struct sctp_chunk *chunk)
0674 {
0675     size_t datasize, rwnd, inflight, flight_size;
0676     struct sctp_transport *transport = packet->transport;
0677     struct sctp_association *asoc = transport->asoc;
0678     struct sctp_outq *q = &asoc->outqueue;
0679 
0680     /* RFC 2960 6.1  Transmission of DATA Chunks
0681      *
0682      * A) At any given time, the data sender MUST NOT transmit new data to
0683      * any destination transport address if its peer's rwnd indicates
0684      * that the peer has no buffer space (i.e. rwnd is 0, see Section
0685      * 6.2.1).  However, regardless of the value of rwnd (including if it
0686      * is 0), the data sender can always have one DATA chunk in flight to
0687      * the receiver if allowed by cwnd (see rule B below).  This rule
0688      * allows the sender to probe for a change in rwnd that the sender
0689      * missed due to the SACK having been lost in transit from the data
0690      * receiver to the data sender.
0691      */
0692 
0693     rwnd = asoc->peer.rwnd;
0694     inflight = q->outstanding_bytes;
0695     flight_size = transport->flight_size;
0696 
0697     datasize = sctp_data_size(chunk);
0698 
0699     if (datasize > rwnd && inflight > 0)
0700         /* We have (at least) one data chunk in flight,
0701          * so we can't fall back to rule 6.1 B).
0702          */
0703         return SCTP_XMIT_RWND_FULL;
0704 
0705     /* RFC 2960 6.1  Transmission of DATA Chunks
0706      *
0707      * B) At any given time, the sender MUST NOT transmit new data
0708      * to a given transport address if it has cwnd or more bytes
0709      * of data outstanding to that transport address.
0710      */
0711     /* RFC 7.2.4 & the Implementers Guide 2.8.
0712      *
0713      * 3) ...
0714      *    When a Fast Retransmit is being performed the sender SHOULD
0715      *    ignore the value of cwnd and SHOULD NOT delay retransmission.
0716      */
0717     if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
0718         flight_size >= transport->cwnd)
0719         return SCTP_XMIT_RWND_FULL;
0720 
0721     /* Nagle's algorithm to solve small-packet problem:
0722      * Inhibit the sending of new chunks when new outgoing data arrives
0723      * if any previously transmitted data on the connection remains
0724      * unacknowledged.
0725      */
0726 
0727     if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
0728         !asoc->force_delay)
0729         /* Nothing unacked */
0730         return SCTP_XMIT_OK;
0731 
0732     if (!sctp_packet_empty(packet))
0733         /* Append to packet */
0734         return SCTP_XMIT_OK;
0735 
0736     if (!sctp_state(asoc, ESTABLISHED))
0737         return SCTP_XMIT_OK;
0738 
0739     /* Check whether this chunk and all the rest of pending data will fit
0740      * or delay in hopes of bundling a full sized packet.
0741      */
0742     if (chunk->skb->len + q->out_qlen > transport->pathmtu -
0743         packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
0744         /* Enough data queued to fill a packet */
0745         return SCTP_XMIT_OK;
0746 
0747     /* Don't delay large message writes that may have been fragmented */
0748     if (!chunk->msg->can_delay)
0749         return SCTP_XMIT_OK;
0750 
0751     /* Defer until all data acked or packet full */
0752     return SCTP_XMIT_DELAY;
0753 }
0754 
0755 /* This private function does management things when adding DATA chunk */
0756 static void sctp_packet_append_data(struct sctp_packet *packet,
0757                 struct sctp_chunk *chunk)
0758 {
0759     struct sctp_transport *transport = packet->transport;
0760     size_t datasize = sctp_data_size(chunk);
0761     struct sctp_association *asoc = transport->asoc;
0762     u32 rwnd = asoc->peer.rwnd;
0763 
0764     /* Keep track of how many bytes are in flight over this transport. */
0765     transport->flight_size += datasize;
0766 
0767     /* Keep track of how many bytes are in flight to the receiver. */
0768     asoc->outqueue.outstanding_bytes += datasize;
0769 
0770     /* Update our view of the receiver's rwnd. */
0771     if (datasize < rwnd)
0772         rwnd -= datasize;
0773     else
0774         rwnd = 0;
0775 
0776     asoc->peer.rwnd = rwnd;
0777     sctp_chunk_assign_tsn(chunk);
0778     asoc->stream.si->assign_number(chunk);
0779 }
0780 
0781 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
0782                        struct sctp_chunk *chunk,
0783                        u16 chunk_len)
0784 {
0785     enum sctp_xmit retval = SCTP_XMIT_OK;
0786     size_t psize, pmtu, maxsize;
0787 
0788     /* Don't bundle in this packet if this chunk's auth key doesn't
0789      * match other chunks already enqueued on this packet. Also,
0790      * don't bundle the chunk with auth key if other chunks in this
0791      * packet don't have auth key.
0792      */
0793     if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
0794         (!packet->auth && chunk->shkey &&
0795          chunk->chunk_hdr->type != SCTP_CID_AUTH))
0796         return SCTP_XMIT_PMTU_FULL;
0797 
0798     psize = packet->size;
0799     if (packet->transport->asoc)
0800         pmtu = packet->transport->asoc->pathmtu;
0801     else
0802         pmtu = packet->transport->pathmtu;
0803 
0804     /* Decide if we need to fragment or resubmit later. */
0805     if (psize + chunk_len > pmtu) {
0806         /* It's OK to fragment at IP level if any one of the following
0807          * is true:
0808          *  1. The packet is empty (meaning this chunk is greater
0809          *     the MTU)
0810          *  2. The packet doesn't have any data in it yet and data
0811          *     requires authentication.
0812          */
0813         if (sctp_packet_empty(packet) ||
0814             (!packet->has_data && chunk->auth)) {
0815             /* We no longer do re-fragmentation.
0816              * Just fragment at the IP layer, if we
0817              * actually hit this condition
0818              */
0819             packet->ipfragok = 1;
0820             goto out;
0821         }
0822 
0823         /* Similarly, if this chunk was built before a PMTU
0824          * reduction, we have to fragment it at IP level now. So
0825          * if the packet already contains something, we need to
0826          * flush.
0827          */
0828         maxsize = pmtu - packet->overhead;
0829         if (packet->auth)
0830             maxsize -= SCTP_PAD4(packet->auth->skb->len);
0831         if (chunk_len > maxsize)
0832             retval = SCTP_XMIT_PMTU_FULL;
0833 
0834         /* It is also okay to fragment if the chunk we are
0835          * adding is a control chunk, but only if current packet
0836          * is not a GSO one otherwise it causes fragmentation of
0837          * a large frame. So in this case we allow the
0838          * fragmentation by forcing it to be in a new packet.
0839          */
0840         if (!sctp_chunk_is_data(chunk) && packet->has_data)
0841             retval = SCTP_XMIT_PMTU_FULL;
0842 
0843         if (psize + chunk_len > packet->max_size)
0844             /* Hit GSO/PMTU limit, gotta flush */
0845             retval = SCTP_XMIT_PMTU_FULL;
0846 
0847         if (!packet->transport->burst_limited &&
0848             psize + chunk_len > (packet->transport->cwnd >> 1))
0849             /* Do not allow a single GSO packet to use more
0850              * than half of cwnd.
0851              */
0852             retval = SCTP_XMIT_PMTU_FULL;
0853 
0854         if (packet->transport->burst_limited &&
0855             psize + chunk_len > (packet->transport->burst_limited >> 1))
0856             /* Do not allow a single GSO packet to use more
0857              * than half of original cwnd.
0858              */
0859             retval = SCTP_XMIT_PMTU_FULL;
0860         /* Otherwise it will fit in the GSO packet */
0861     }
0862 
0863 out:
0864     return retval;
0865 }