Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  net/dccp/timer.c
0004  *
0005  *  An implementation of the DCCP protocol
0006  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
0007  */
0008 
0009 #include <linux/dccp.h>
0010 #include <linux/skbuff.h>
0011 #include <linux/export.h>
0012 
0013 #include "dccp.h"
0014 
0015 /* sysctl variables governing numbers of retransmission attempts */
0016 int  sysctl_dccp_request_retries    __read_mostly = TCP_SYN_RETRIES;
0017 int  sysctl_dccp_retries1       __read_mostly = TCP_RETR1;
0018 int  sysctl_dccp_retries2       __read_mostly = TCP_RETR2;
0019 
0020 static void dccp_write_err(struct sock *sk)
0021 {
0022     sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
0023     sk_error_report(sk);
0024 
0025     dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
0026     dccp_done(sk);
0027     __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
0028 }
0029 
0030 /* A write timeout has occurred. Process the after effects. */
0031 static int dccp_write_timeout(struct sock *sk)
0032 {
0033     const struct inet_connection_sock *icsk = inet_csk(sk);
0034     int retry_until;
0035 
0036     if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
0037         if (icsk->icsk_retransmits != 0)
0038             dst_negative_advice(sk);
0039         retry_until = icsk->icsk_syn_retries ?
0040                 : sysctl_dccp_request_retries;
0041     } else {
0042         if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
0043             /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu
0044                black hole detection. :-(
0045 
0046                It is place to make it. It is not made. I do not want
0047                to make it. It is disguisting. It does not work in any
0048                case. Let me to cite the same draft, which requires for
0049                us to implement this:
0050 
0051    "The one security concern raised by this memo is that ICMP black holes
0052    are often caused by over-zealous security administrators who block
0053    all ICMP messages.  It is vitally important that those who design and
0054    deploy security systems understand the impact of strict filtering on
0055    upper-layer protocols.  The safest web site in the world is worthless
0056    if most TCP implementations cannot transfer data from it.  It would
0057    be far nicer to have all of the black holes fixed rather than fixing
0058    all of the TCP implementations."
0059 
0060                Golden words :-).
0061            */
0062 
0063             dst_negative_advice(sk);
0064         }
0065 
0066         retry_until = sysctl_dccp_retries2;
0067         /*
0068          * FIXME: see tcp_write_timout and tcp_out_of_resources
0069          */
0070     }
0071 
0072     if (icsk->icsk_retransmits >= retry_until) {
0073         /* Has it gone just too far? */
0074         dccp_write_err(sk);
0075         return 1;
0076     }
0077     return 0;
0078 }
0079 
0080 /*
0081  *  The DCCP retransmit timer.
0082  */
0083 static void dccp_retransmit_timer(struct sock *sk)
0084 {
0085     struct inet_connection_sock *icsk = inet_csk(sk);
0086 
0087     /*
0088      * More than 4MSL (8 minutes) has passed, a RESET(aborted) was
0089      * sent, no need to retransmit, this sock is dead.
0090      */
0091     if (dccp_write_timeout(sk))
0092         return;
0093 
0094     /*
0095      * We want to know the number of packets retransmitted, not the
0096      * total number of retransmissions of clones of original packets.
0097      */
0098     if (icsk->icsk_retransmits == 0)
0099         __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
0100 
0101     if (dccp_retransmit_skb(sk) != 0) {
0102         /*
0103          * Retransmission failed because of local congestion,
0104          * do not backoff.
0105          */
0106         if (--icsk->icsk_retransmits == 0)
0107             icsk->icsk_retransmits = 1;
0108         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
0109                       min(icsk->icsk_rto,
0110                           TCP_RESOURCE_PROBE_INTERVAL),
0111                       DCCP_RTO_MAX);
0112         return;
0113     }
0114 
0115     icsk->icsk_backoff++;
0116 
0117     icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
0118     inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
0119                   DCCP_RTO_MAX);
0120     if (icsk->icsk_retransmits > sysctl_dccp_retries1)
0121         __sk_dst_reset(sk);
0122 }
0123 
0124 static void dccp_write_timer(struct timer_list *t)
0125 {
0126     struct inet_connection_sock *icsk =
0127             from_timer(icsk, t, icsk_retransmit_timer);
0128     struct sock *sk = &icsk->icsk_inet.sk;
0129     int event = 0;
0130 
0131     bh_lock_sock(sk);
0132     if (sock_owned_by_user(sk)) {
0133         /* Try again later */
0134         sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
0135                    jiffies + (HZ / 20));
0136         goto out;
0137     }
0138 
0139     if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
0140         goto out;
0141 
0142     if (time_after(icsk->icsk_timeout, jiffies)) {
0143         sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
0144                    icsk->icsk_timeout);
0145         goto out;
0146     }
0147 
0148     event = icsk->icsk_pending;
0149     icsk->icsk_pending = 0;
0150 
0151     switch (event) {
0152     case ICSK_TIME_RETRANS:
0153         dccp_retransmit_timer(sk);
0154         break;
0155     }
0156 out:
0157     bh_unlock_sock(sk);
0158     sock_put(sk);
0159 }
0160 
0161 static void dccp_keepalive_timer(struct timer_list *t)
0162 {
0163     struct sock *sk = from_timer(sk, t, sk_timer);
0164 
0165     pr_err("dccp should not use a keepalive timer !\n");
0166     sock_put(sk);
0167 }
0168 
0169 /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
0170 static void dccp_delack_timer(struct timer_list *t)
0171 {
0172     struct inet_connection_sock *icsk =
0173             from_timer(icsk, t, icsk_delack_timer);
0174     struct sock *sk = &icsk->icsk_inet.sk;
0175 
0176     bh_lock_sock(sk);
0177     if (sock_owned_by_user(sk)) {
0178         /* Try again later. */
0179         __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
0180         sk_reset_timer(sk, &icsk->icsk_delack_timer,
0181                    jiffies + TCP_DELACK_MIN);
0182         goto out;
0183     }
0184 
0185     if (sk->sk_state == DCCP_CLOSED ||
0186         !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
0187         goto out;
0188     if (time_after(icsk->icsk_ack.timeout, jiffies)) {
0189         sk_reset_timer(sk, &icsk->icsk_delack_timer,
0190                    icsk->icsk_ack.timeout);
0191         goto out;
0192     }
0193 
0194     icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
0195 
0196     if (inet_csk_ack_scheduled(sk)) {
0197         if (!inet_csk_in_pingpong_mode(sk)) {
0198             /* Delayed ACK missed: inflate ATO. */
0199             icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
0200                          icsk->icsk_rto);
0201         } else {
0202             /* Delayed ACK missed: leave pingpong mode and
0203              * deflate ATO.
0204              */
0205             inet_csk_exit_pingpong_mode(sk);
0206             icsk->icsk_ack.ato = TCP_ATO_MIN;
0207         }
0208         dccp_send_ack(sk);
0209         __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
0210     }
0211 out:
0212     bh_unlock_sock(sk);
0213     sock_put(sk);
0214 }
0215 
0216 /**
0217  * dccp_write_xmitlet  -  Workhorse for CCID packet dequeueing interface
0218  * @t: pointer to the tasklet associated with this handler
0219  *
0220  * See the comments above %ccid_dequeueing_decision for supported modes.
0221  */
0222 static void dccp_write_xmitlet(struct tasklet_struct *t)
0223 {
0224     struct dccp_sock *dp = from_tasklet(dp, t, dccps_xmitlet);
0225     struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
0226 
0227     bh_lock_sock(sk);
0228     if (sock_owned_by_user(sk))
0229         sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
0230     else
0231         dccp_write_xmit(sk);
0232     bh_unlock_sock(sk);
0233     sock_put(sk);
0234 }
0235 
0236 static void dccp_write_xmit_timer(struct timer_list *t)
0237 {
0238     struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer);
0239 
0240     dccp_write_xmitlet(&dp->dccps_xmitlet);
0241 }
0242 
0243 void dccp_init_xmit_timers(struct sock *sk)
0244 {
0245     struct dccp_sock *dp = dccp_sk(sk);
0246 
0247     tasklet_setup(&dp->dccps_xmitlet, dccp_write_xmitlet);
0248     timer_setup(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 0);
0249     inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
0250                   &dccp_keepalive_timer);
0251 }
0252 
0253 static ktime_t dccp_timestamp_seed;
0254 /**
0255  * dccp_timestamp  -  10s of microseconds time source
0256  * Returns the number of 10s of microseconds since loading DCCP. This is native
0257  * DCCP time difference format (RFC 4340, sec. 13).
0258  * Please note: This will wrap around about circa every 11.9 hours.
0259  */
0260 u32 dccp_timestamp(void)
0261 {
0262     u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
0263 
0264     do_div(delta, 10);
0265     return delta;
0266 }
0267 EXPORT_SYMBOL_GPL(dccp_timestamp);
0268 
0269 void __init dccp_timestamping_init(void)
0270 {
0271     dccp_timestamp_seed = ktime_get_real();
0272 }