Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  net/dccp/output.c
0004  *
0005  *  An implementation of the DCCP protocol
0006  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
0007  */
0008 
0009 #include <linux/dccp.h>
0010 #include <linux/kernel.h>
0011 #include <linux/skbuff.h>
0012 #include <linux/slab.h>
0013 #include <linux/sched/signal.h>
0014 
0015 #include <net/inet_sock.h>
0016 #include <net/sock.h>
0017 
0018 #include "ackvec.h"
0019 #include "ccid.h"
0020 #include "dccp.h"
0021 
0022 static inline void dccp_event_ack_sent(struct sock *sk)
0023 {
0024     inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
0025 }
0026 
0027 /* enqueue @skb on sk_send_head for retransmission, return clone to send now */
0028 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
0029 {
0030     skb_set_owner_w(skb, sk);
0031     WARN_ON(sk->sk_send_head);
0032     sk->sk_send_head = skb;
0033     return skb_clone(sk->sk_send_head, gfp_any());
0034 }
0035 
0036 /*
0037  * All SKB's seen here are completely headerless. It is our
0038  * job to build the DCCP header, and pass the packet down to
0039  * IP so it can do the same plus pass the packet off to the
0040  * device.
0041  */
0042 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
0043 {
0044     if (likely(skb != NULL)) {
0045         struct inet_sock *inet = inet_sk(sk);
0046         const struct inet_connection_sock *icsk = inet_csk(sk);
0047         struct dccp_sock *dp = dccp_sk(sk);
0048         struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
0049         struct dccp_hdr *dh;
0050         /* XXX For now we're using only 48 bits sequence numbers */
0051         const u32 dccp_header_size = sizeof(*dh) +
0052                          sizeof(struct dccp_hdr_ext) +
0053                       dccp_packet_hdr_len(dcb->dccpd_type);
0054         int err, set_ack = 1;
0055         u64 ackno = dp->dccps_gsr;
0056         /*
0057          * Increment GSS here already in case the option code needs it.
0058          * Update GSS for real only if option processing below succeeds.
0059          */
0060         dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
0061 
0062         switch (dcb->dccpd_type) {
0063         case DCCP_PKT_DATA:
0064             set_ack = 0;
0065             fallthrough;
0066         case DCCP_PKT_DATAACK:
0067         case DCCP_PKT_RESET:
0068             break;
0069 
0070         case DCCP_PKT_REQUEST:
0071             set_ack = 0;
0072             /* Use ISS on the first (non-retransmitted) Request. */
0073             if (icsk->icsk_retransmits == 0)
0074                 dcb->dccpd_seq = dp->dccps_iss;
0075             fallthrough;
0076 
0077         case DCCP_PKT_SYNC:
0078         case DCCP_PKT_SYNCACK:
0079             ackno = dcb->dccpd_ack_seq;
0080             fallthrough;
0081         default:
0082             /*
0083              * Set owner/destructor: some skbs are allocated via
0084              * alloc_skb (e.g. when retransmission may happen).
0085              * Only Data, DataAck, and Reset packets should come
0086              * through here with skb->sk set.
0087              */
0088             WARN_ON(skb->sk);
0089             skb_set_owner_w(skb, sk);
0090             break;
0091         }
0092 
0093         if (dccp_insert_options(sk, skb)) {
0094             kfree_skb(skb);
0095             return -EPROTO;
0096         }
0097 
0098 
0099         /* Build DCCP header and checksum it. */
0100         dh = dccp_zeroed_hdr(skb, dccp_header_size);
0101         dh->dccph_type  = dcb->dccpd_type;
0102         dh->dccph_sport = inet->inet_sport;
0103         dh->dccph_dport = inet->inet_dport;
0104         dh->dccph_doff  = (dccp_header_size + dcb->dccpd_opt_len) / 4;
0105         dh->dccph_ccval = dcb->dccpd_ccval;
0106         dh->dccph_cscov = dp->dccps_pcslen;
0107         /* XXX For now we're using only 48 bits sequence numbers */
0108         dh->dccph_x = 1;
0109 
0110         dccp_update_gss(sk, dcb->dccpd_seq);
0111         dccp_hdr_set_seq(dh, dp->dccps_gss);
0112         if (set_ack)
0113             dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
0114 
0115         switch (dcb->dccpd_type) {
0116         case DCCP_PKT_REQUEST:
0117             dccp_hdr_request(skb)->dccph_req_service =
0118                             dp->dccps_service;
0119             /*
0120              * Limit Ack window to ISS <= P.ackno <= GSS, so that
0121              * only Responses to Requests we sent are considered.
0122              */
0123             dp->dccps_awl = dp->dccps_iss;
0124             break;
0125         case DCCP_PKT_RESET:
0126             dccp_hdr_reset(skb)->dccph_reset_code =
0127                             dcb->dccpd_reset_code;
0128             break;
0129         }
0130 
0131         icsk->icsk_af_ops->send_check(sk, skb);
0132 
0133         if (set_ack)
0134             dccp_event_ack_sent(sk);
0135 
0136         DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
0137 
0138         err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
0139         return net_xmit_eval(err);
0140     }
0141     return -ENOBUFS;
0142 }
0143 
0144 /**
0145  * dccp_determine_ccmps  -  Find out about CCID-specific packet-size limits
0146  * @dp: socket to find packet size limits of
0147  *
0148  * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
0149  * since the RX CCID is restricted to feedback packets (Acks), which are small
0150  * in comparison with the data traffic. A value of 0 means "no current CCMPS".
0151  */
0152 static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
0153 {
0154     const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
0155 
0156     if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
0157         return 0;
0158     return tx_ccid->ccid_ops->ccid_ccmps;
0159 }
0160 
0161 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
0162 {
0163     struct inet_connection_sock *icsk = inet_csk(sk);
0164     struct dccp_sock *dp = dccp_sk(sk);
0165     u32 ccmps = dccp_determine_ccmps(dp);
0166     u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
0167 
0168     /* Account for header lengths and IPv4/v6 option overhead */
0169     cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
0170             sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
0171 
0172     /*
0173      * Leave enough headroom for common DCCP header options.
0174      * This only considers options which may appear on DCCP-Data packets, as
0175      * per table 3 in RFC 4340, 5.8. When running out of space for other
0176      * options (eg. Ack Vector which can take up to 255 bytes), it is better
0177      * to schedule a separate Ack. Thus we leave headroom for the following:
0178      *  - 1 byte for Slow Receiver (11.6)
0179      *  - 6 bytes for Timestamp (13.1)
0180      *  - 10 bytes for Timestamp Echo (13.3)
0181      *  - 8 bytes for NDP count (7.7, when activated)
0182      *  - 6 bytes for Data Checksum (9.3)
0183      *  - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
0184      */
0185     cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
0186                (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
0187 
0188     /* And store cached results */
0189     icsk->icsk_pmtu_cookie = pmtu;
0190     dp->dccps_mss_cache = cur_mps;
0191 
0192     return cur_mps;
0193 }
0194 
0195 EXPORT_SYMBOL_GPL(dccp_sync_mss);
0196 
0197 void dccp_write_space(struct sock *sk)
0198 {
0199     struct socket_wq *wq;
0200 
0201     rcu_read_lock();
0202     wq = rcu_dereference(sk->sk_wq);
0203     if (skwq_has_sleeper(wq))
0204         wake_up_interruptible(&wq->wait);
0205     /* Should agree with poll, otherwise some programs break */
0206     if (sock_writeable(sk))
0207         sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
0208 
0209     rcu_read_unlock();
0210 }
0211 
0212 /**
0213  * dccp_wait_for_ccid  -  Await CCID send permission
0214  * @sk:    socket to wait for
0215  * @delay: timeout in jiffies
0216  *
0217  * This is used by CCIDs which need to delay the send time in process context.
0218  */
0219 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
0220 {
0221     DEFINE_WAIT(wait);
0222     long remaining;
0223 
0224     prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
0225     sk->sk_write_pending++;
0226     release_sock(sk);
0227 
0228     remaining = schedule_timeout(delay);
0229 
0230     lock_sock(sk);
0231     sk->sk_write_pending--;
0232     finish_wait(sk_sleep(sk), &wait);
0233 
0234     if (signal_pending(current) || sk->sk_err)
0235         return -1;
0236     return remaining;
0237 }
0238 
0239 /**
0240  * dccp_xmit_packet  -  Send data packet under control of CCID
0241  * @sk: socket to send data packet on
0242  *
0243  * Transmits next-queued payload and informs CCID to account for the packet.
0244  */
0245 static void dccp_xmit_packet(struct sock *sk)
0246 {
0247     int err, len;
0248     struct dccp_sock *dp = dccp_sk(sk);
0249     struct sk_buff *skb = dccp_qpolicy_pop(sk);
0250 
0251     if (unlikely(skb == NULL))
0252         return;
0253     len = skb->len;
0254 
0255     if (sk->sk_state == DCCP_PARTOPEN) {
0256         const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
0257         /*
0258          * See 8.1.5 - Handshake Completion.
0259          *
0260          * For robustness we resend Confirm options until the client has
0261          * entered OPEN. During the initial feature negotiation, the MPS
0262          * is smaller than usual, reduced by the Change/Confirm options.
0263          */
0264         if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
0265             DCCP_WARN("Payload too large (%d) for featneg.\n", len);
0266             dccp_send_ack(sk);
0267             dccp_feat_list_purge(&dp->dccps_featneg);
0268         }
0269 
0270         inet_csk_schedule_ack(sk);
0271         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
0272                           inet_csk(sk)->icsk_rto,
0273                           DCCP_RTO_MAX);
0274         DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
0275     } else if (dccp_ack_pending(sk)) {
0276         DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
0277     } else {
0278         DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
0279     }
0280 
0281     err = dccp_transmit_skb(sk, skb);
0282     if (err)
0283         dccp_pr_debug("transmit_skb() returned err=%d\n", err);
0284     /*
0285      * Register this one as sent even if an error occurred. To the remote
0286      * end a local packet drop is indistinguishable from network loss, i.e.
0287      * any local drop will eventually be reported via receiver feedback.
0288      */
0289     ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
0290 
0291     /*
0292      * If the CCID needs to transfer additional header options out-of-band
0293      * (e.g. Ack Vectors or feature-negotiation options), it activates this
0294      * flag to schedule a Sync. The Sync will automatically incorporate all
0295      * currently pending header options, thus clearing the backlog.
0296      */
0297     if (dp->dccps_sync_scheduled)
0298         dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
0299 }
0300 
0301 /**
0302  * dccp_flush_write_queue  -  Drain queue at end of connection
0303  * @sk: socket to be drained
0304  * @time_budget: time allowed to drain the queue
0305  *
0306  * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
0307  * happen that the TX queue is not empty at the end of a connection. We give the
0308  * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
0309  * returns with a non-empty write queue, it will be purged later.
0310  */
0311 void dccp_flush_write_queue(struct sock *sk, long *time_budget)
0312 {
0313     struct dccp_sock *dp = dccp_sk(sk);
0314     struct sk_buff *skb;
0315     long delay, rc;
0316 
0317     while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
0318         rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
0319 
0320         switch (ccid_packet_dequeue_eval(rc)) {
0321         case CCID_PACKET_WILL_DEQUEUE_LATER:
0322             /*
0323              * If the CCID determines when to send, the next sending
0324              * time is unknown or the CCID may not even send again
0325              * (e.g. remote host crashes or lost Ack packets).
0326              */
0327             DCCP_WARN("CCID did not manage to send all packets\n");
0328             return;
0329         case CCID_PACKET_DELAY:
0330             delay = msecs_to_jiffies(rc);
0331             if (delay > *time_budget)
0332                 return;
0333             rc = dccp_wait_for_ccid(sk, delay);
0334             if (rc < 0)
0335                 return;
0336             *time_budget -= (delay - rc);
0337             /* check again if we can send now */
0338             break;
0339         case CCID_PACKET_SEND_AT_ONCE:
0340             dccp_xmit_packet(sk);
0341             break;
0342         case CCID_PACKET_ERR:
0343             skb_dequeue(&sk->sk_write_queue);
0344             kfree_skb(skb);
0345             dccp_pr_debug("packet discarded due to err=%ld\n", rc);
0346         }
0347     }
0348 }
0349 
0350 void dccp_write_xmit(struct sock *sk)
0351 {
0352     struct dccp_sock *dp = dccp_sk(sk);
0353     struct sk_buff *skb;
0354 
0355     while ((skb = dccp_qpolicy_top(sk))) {
0356         int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
0357 
0358         switch (ccid_packet_dequeue_eval(rc)) {
0359         case CCID_PACKET_WILL_DEQUEUE_LATER:
0360             return;
0361         case CCID_PACKET_DELAY:
0362             sk_reset_timer(sk, &dp->dccps_xmit_timer,
0363                        jiffies + msecs_to_jiffies(rc));
0364             return;
0365         case CCID_PACKET_SEND_AT_ONCE:
0366             dccp_xmit_packet(sk);
0367             break;
0368         case CCID_PACKET_ERR:
0369             dccp_qpolicy_drop(sk, skb);
0370             dccp_pr_debug("packet discarded due to err=%d\n", rc);
0371         }
0372     }
0373 }
0374 
0375 /**
0376  * dccp_retransmit_skb  -  Retransmit Request, Close, or CloseReq packets
0377  * @sk: socket to perform retransmit on
0378  *
0379  * There are only four retransmittable packet types in DCCP:
0380  * - Request  in client-REQUEST  state (sec. 8.1.1),
0381  * - CloseReq in server-CLOSEREQ state (sec. 8.3),
0382  * - Close    in   node-CLOSING  state (sec. 8.3),
0383  * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
0384  * This function expects sk->sk_send_head to contain the original skb.
0385  */
0386 int dccp_retransmit_skb(struct sock *sk)
0387 {
0388     WARN_ON(sk->sk_send_head == NULL);
0389 
0390     if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
0391         return -EHOSTUNREACH; /* Routing failure or similar. */
0392 
0393     /* this count is used to distinguish original and retransmitted skb */
0394     inet_csk(sk)->icsk_retransmits++;
0395 
0396     return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
0397 }
0398 
0399 struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
0400                    struct request_sock *req)
0401 {
0402     struct dccp_hdr *dh;
0403     struct dccp_request_sock *dreq;
0404     const u32 dccp_header_size = sizeof(struct dccp_hdr) +
0405                      sizeof(struct dccp_hdr_ext) +
0406                      sizeof(struct dccp_hdr_response);
0407     struct sk_buff *skb;
0408 
0409     /* sk is marked const to clearly express we dont hold socket lock.
0410      * sock_wmalloc() will atomically change sk->sk_wmem_alloc,
0411      * it is safe to promote sk to non const.
0412      */
0413     skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
0414                GFP_ATOMIC);
0415     if (!skb)
0416         return NULL;
0417 
0418     skb_reserve(skb, MAX_DCCP_HEADER);
0419 
0420     skb_dst_set(skb, dst_clone(dst));
0421 
0422     dreq = dccp_rsk(req);
0423     if (inet_rsk(req)->acked)   /* increase GSS upon retransmission */
0424         dccp_inc_seqno(&dreq->dreq_gss);
0425     DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
0426     DCCP_SKB_CB(skb)->dccpd_seq  = dreq->dreq_gss;
0427 
0428     /* Resolve feature dependencies resulting from choice of CCID */
0429     if (dccp_feat_server_ccid_dependencies(dreq))
0430         goto response_failed;
0431 
0432     if (dccp_insert_options_rsk(dreq, skb))
0433         goto response_failed;
0434 
0435     /* Build and checksum header */
0436     dh = dccp_zeroed_hdr(skb, dccp_header_size);
0437 
0438     dh->dccph_sport = htons(inet_rsk(req)->ir_num);
0439     dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
0440     dh->dccph_doff  = (dccp_header_size +
0441                DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
0442     dh->dccph_type  = DCCP_PKT_RESPONSE;
0443     dh->dccph_x = 1;
0444     dccp_hdr_set_seq(dh, dreq->dreq_gss);
0445     dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
0446     dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
0447 
0448     dccp_csum_outgoing(skb);
0449 
0450     /* We use `acked' to remember that a Response was already sent. */
0451     inet_rsk(req)->acked = 1;
0452     DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
0453     return skb;
0454 response_failed:
0455     kfree_skb(skb);
0456     return NULL;
0457 }
0458 
0459 EXPORT_SYMBOL_GPL(dccp_make_response);
0460 
0461 /* answer offending packet in @rcv_skb with Reset from control socket @ctl */
0462 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
0463 {
0464     struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
0465     struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
0466     const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
0467                        sizeof(struct dccp_hdr_ext) +
0468                        sizeof(struct dccp_hdr_reset);
0469     struct dccp_hdr_reset *dhr;
0470     struct sk_buff *skb;
0471 
0472     skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
0473     if (skb == NULL)
0474         return NULL;
0475 
0476     skb_reserve(skb, sk->sk_prot->max_header);
0477 
0478     /* Swap the send and the receive. */
0479     dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
0480     dh->dccph_type  = DCCP_PKT_RESET;
0481     dh->dccph_sport = rxdh->dccph_dport;
0482     dh->dccph_dport = rxdh->dccph_sport;
0483     dh->dccph_doff  = dccp_hdr_reset_len / 4;
0484     dh->dccph_x = 1;
0485 
0486     dhr = dccp_hdr_reset(skb);
0487     dhr->dccph_reset_code = dcb->dccpd_reset_code;
0488 
0489     switch (dcb->dccpd_reset_code) {
0490     case DCCP_RESET_CODE_PACKET_ERROR:
0491         dhr->dccph_reset_data[0] = rxdh->dccph_type;
0492         break;
0493     case DCCP_RESET_CODE_OPTION_ERROR:
0494     case DCCP_RESET_CODE_MANDATORY_ERROR:
0495         memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
0496         break;
0497     }
0498     /*
0499      * From RFC 4340, 8.3.1:
0500      *   If P.ackno exists, set R.seqno := P.ackno + 1.
0501      *   Else set R.seqno := 0.
0502      */
0503     if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
0504         dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
0505     dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
0506 
0507     dccp_csum_outgoing(skb);
0508     return skb;
0509 }
0510 
0511 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
0512 
0513 /* send Reset on established socket, to close or abort the connection */
0514 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
0515 {
0516     struct sk_buff *skb;
0517     /*
0518      * FIXME: what if rebuild_header fails?
0519      * Should we be doing a rebuild_header here?
0520      */
0521     int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
0522 
0523     if (err != 0)
0524         return err;
0525 
0526     skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
0527     if (skb == NULL)
0528         return -ENOBUFS;
0529 
0530     /* Reserve space for headers and prepare control bits. */
0531     skb_reserve(skb, sk->sk_prot->max_header);
0532     DCCP_SKB_CB(skb)->dccpd_type       = DCCP_PKT_RESET;
0533     DCCP_SKB_CB(skb)->dccpd_reset_code = code;
0534 
0535     return dccp_transmit_skb(sk, skb);
0536 }
0537 
0538 /*
0539  * Do all connect socket setups that can be done AF independent.
0540  */
0541 int dccp_connect(struct sock *sk)
0542 {
0543     struct sk_buff *skb;
0544     struct dccp_sock *dp = dccp_sk(sk);
0545     struct dst_entry *dst = __sk_dst_get(sk);
0546     struct inet_connection_sock *icsk = inet_csk(sk);
0547 
0548     sk->sk_err = 0;
0549     sock_reset_flag(sk, SOCK_DONE);
0550 
0551     dccp_sync_mss(sk, dst_mtu(dst));
0552 
0553     /* do not connect if feature negotiation setup fails */
0554     if (dccp_feat_finalise_settings(dccp_sk(sk)))
0555         return -EPROTO;
0556 
0557     /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
0558     dp->dccps_gar = dp->dccps_iss;
0559 
0560     skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
0561     if (unlikely(skb == NULL))
0562         return -ENOBUFS;
0563 
0564     /* Reserve space for headers. */
0565     skb_reserve(skb, sk->sk_prot->max_header);
0566 
0567     DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
0568 
0569     dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
0570     DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
0571 
0572     /* Timer for repeating the REQUEST until an answer. */
0573     icsk->icsk_retransmits = 0;
0574     inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
0575                   icsk->icsk_rto, DCCP_RTO_MAX);
0576     return 0;
0577 }
0578 
0579 EXPORT_SYMBOL_GPL(dccp_connect);
0580 
0581 void dccp_send_ack(struct sock *sk)
0582 {
0583     /* If we have been reset, we may not send again. */
0584     if (sk->sk_state != DCCP_CLOSED) {
0585         struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
0586                         GFP_ATOMIC);
0587 
0588         if (skb == NULL) {
0589             inet_csk_schedule_ack(sk);
0590             inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
0591             inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
0592                           TCP_DELACK_MAX,
0593                           DCCP_RTO_MAX);
0594             return;
0595         }
0596 
0597         /* Reserve space for headers */
0598         skb_reserve(skb, sk->sk_prot->max_header);
0599         DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
0600         dccp_transmit_skb(sk, skb);
0601     }
0602 }
0603 
0604 EXPORT_SYMBOL_GPL(dccp_send_ack);
0605 
0606 #if 0
0607 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
0608 void dccp_send_delayed_ack(struct sock *sk)
0609 {
0610     struct inet_connection_sock *icsk = inet_csk(sk);
0611     /*
0612      * FIXME: tune this timer. elapsed time fixes the skew, so no problem
0613      * with using 2s, and active senders also piggyback the ACK into a
0614      * DATAACK packet, so this is really for quiescent senders.
0615      */
0616     unsigned long timeout = jiffies + 2 * HZ;
0617 
0618     /* Use new timeout only if there wasn't a older one earlier. */
0619     if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
0620         /* If delack timer was blocked or is about to expire,
0621          * send ACK now.
0622          *
0623          * FIXME: check the "about to expire" part
0624          */
0625         if (icsk->icsk_ack.blocked) {
0626             dccp_send_ack(sk);
0627             return;
0628         }
0629 
0630         if (!time_before(timeout, icsk->icsk_ack.timeout))
0631             timeout = icsk->icsk_ack.timeout;
0632     }
0633     icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
0634     icsk->icsk_ack.timeout = timeout;
0635     sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
0636 }
0637 #endif
0638 
0639 void dccp_send_sync(struct sock *sk, const u64 ackno,
0640             const enum dccp_pkt_type pkt_type)
0641 {
0642     /*
0643      * We are not putting this on the write queue, so
0644      * dccp_transmit_skb() will set the ownership to this
0645      * sock.
0646      */
0647     struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
0648 
0649     if (skb == NULL) {
0650         /* FIXME: how to make sure the sync is sent? */
0651         DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
0652         return;
0653     }
0654 
0655     /* Reserve space for headers and prepare control bits. */
0656     skb_reserve(skb, sk->sk_prot->max_header);
0657     DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
0658     DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
0659 
0660     /*
0661      * Clear the flag in case the Sync was scheduled for out-of-band data,
0662      * such as carrying a long Ack Vector.
0663      */
0664     dccp_sk(sk)->dccps_sync_scheduled = 0;
0665 
0666     dccp_transmit_skb(sk, skb);
0667 }
0668 
0669 EXPORT_SYMBOL_GPL(dccp_send_sync);
0670 
0671 /*
0672  * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
0673  * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
0674  * any circumstances.
0675  */
0676 void dccp_send_close(struct sock *sk, const int active)
0677 {
0678     struct dccp_sock *dp = dccp_sk(sk);
0679     struct sk_buff *skb;
0680     const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
0681 
0682     skb = alloc_skb(sk->sk_prot->max_header, prio);
0683     if (skb == NULL)
0684         return;
0685 
0686     /* Reserve space for headers and prepare control bits. */
0687     skb_reserve(skb, sk->sk_prot->max_header);
0688     if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
0689         DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
0690     else
0691         DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
0692 
0693     if (active) {
0694         skb = dccp_skb_entail(sk, skb);
0695         /*
0696          * Retransmission timer for active-close: RFC 4340, 8.3 requires
0697          * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
0698          * state can be left. The initial timeout is 2 RTTs.
0699          * Since RTT measurement is done by the CCIDs, there is no easy
0700          * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
0701          * is too low (200ms); we use a high value to avoid unnecessary
0702          * retransmissions when the link RTT is > 0.2 seconds.
0703          * FIXME: Let main module sample RTTs and use that instead.
0704          */
0705         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
0706                       DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
0707     }
0708     dccp_transmit_skb(sk, skb);
0709 }