0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dccp.h>
0010 #include <linux/skbuff.h>
0011 #include <linux/export.h>
0012
0013 #include "dccp.h"
0014
0015
0016 int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES;
0017 int sysctl_dccp_retries1 __read_mostly = TCP_RETR1;
0018 int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
0019
0020 static void dccp_write_err(struct sock *sk)
0021 {
0022 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
0023 sk_error_report(sk);
0024
0025 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
0026 dccp_done(sk);
0027 __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
0028 }
0029
0030
0031 static int dccp_write_timeout(struct sock *sk)
0032 {
0033 const struct inet_connection_sock *icsk = inet_csk(sk);
0034 int retry_until;
0035
0036 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
0037 if (icsk->icsk_retransmits != 0)
0038 dst_negative_advice(sk);
0039 retry_until = icsk->icsk_syn_retries ?
0040 : sysctl_dccp_request_retries;
0041 } else {
0042 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 dst_negative_advice(sk);
0064 }
0065
0066 retry_until = sysctl_dccp_retries2;
0067
0068
0069
0070 }
0071
0072 if (icsk->icsk_retransmits >= retry_until) {
0073
0074 dccp_write_err(sk);
0075 return 1;
0076 }
0077 return 0;
0078 }
0079
0080
0081
0082
0083 static void dccp_retransmit_timer(struct sock *sk)
0084 {
0085 struct inet_connection_sock *icsk = inet_csk(sk);
0086
0087
0088
0089
0090
0091 if (dccp_write_timeout(sk))
0092 return;
0093
0094
0095
0096
0097
0098 if (icsk->icsk_retransmits == 0)
0099 __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
0100
0101 if (dccp_retransmit_skb(sk) != 0) {
0102
0103
0104
0105
0106 if (--icsk->icsk_retransmits == 0)
0107 icsk->icsk_retransmits = 1;
0108 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
0109 min(icsk->icsk_rto,
0110 TCP_RESOURCE_PROBE_INTERVAL),
0111 DCCP_RTO_MAX);
0112 return;
0113 }
0114
0115 icsk->icsk_backoff++;
0116
0117 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
0118 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
0119 DCCP_RTO_MAX);
0120 if (icsk->icsk_retransmits > sysctl_dccp_retries1)
0121 __sk_dst_reset(sk);
0122 }
0123
0124 static void dccp_write_timer(struct timer_list *t)
0125 {
0126 struct inet_connection_sock *icsk =
0127 from_timer(icsk, t, icsk_retransmit_timer);
0128 struct sock *sk = &icsk->icsk_inet.sk;
0129 int event = 0;
0130
0131 bh_lock_sock(sk);
0132 if (sock_owned_by_user(sk)) {
0133
0134 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
0135 jiffies + (HZ / 20));
0136 goto out;
0137 }
0138
0139 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
0140 goto out;
0141
0142 if (time_after(icsk->icsk_timeout, jiffies)) {
0143 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
0144 icsk->icsk_timeout);
0145 goto out;
0146 }
0147
0148 event = icsk->icsk_pending;
0149 icsk->icsk_pending = 0;
0150
0151 switch (event) {
0152 case ICSK_TIME_RETRANS:
0153 dccp_retransmit_timer(sk);
0154 break;
0155 }
0156 out:
0157 bh_unlock_sock(sk);
0158 sock_put(sk);
0159 }
0160
0161 static void dccp_keepalive_timer(struct timer_list *t)
0162 {
0163 struct sock *sk = from_timer(sk, t, sk_timer);
0164
0165 pr_err("dccp should not use a keepalive timer !\n");
0166 sock_put(sk);
0167 }
0168
0169
0170 static void dccp_delack_timer(struct timer_list *t)
0171 {
0172 struct inet_connection_sock *icsk =
0173 from_timer(icsk, t, icsk_delack_timer);
0174 struct sock *sk = &icsk->icsk_inet.sk;
0175
0176 bh_lock_sock(sk);
0177 if (sock_owned_by_user(sk)) {
0178
0179 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
0180 sk_reset_timer(sk, &icsk->icsk_delack_timer,
0181 jiffies + TCP_DELACK_MIN);
0182 goto out;
0183 }
0184
0185 if (sk->sk_state == DCCP_CLOSED ||
0186 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
0187 goto out;
0188 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
0189 sk_reset_timer(sk, &icsk->icsk_delack_timer,
0190 icsk->icsk_ack.timeout);
0191 goto out;
0192 }
0193
0194 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
0195
0196 if (inet_csk_ack_scheduled(sk)) {
0197 if (!inet_csk_in_pingpong_mode(sk)) {
0198
0199 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
0200 icsk->icsk_rto);
0201 } else {
0202
0203
0204
0205 inet_csk_exit_pingpong_mode(sk);
0206 icsk->icsk_ack.ato = TCP_ATO_MIN;
0207 }
0208 dccp_send_ack(sk);
0209 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
0210 }
0211 out:
0212 bh_unlock_sock(sk);
0213 sock_put(sk);
0214 }
0215
0216
0217
0218
0219
0220
0221
0222 static void dccp_write_xmitlet(struct tasklet_struct *t)
0223 {
0224 struct dccp_sock *dp = from_tasklet(dp, t, dccps_xmitlet);
0225 struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
0226
0227 bh_lock_sock(sk);
0228 if (sock_owned_by_user(sk))
0229 sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
0230 else
0231 dccp_write_xmit(sk);
0232 bh_unlock_sock(sk);
0233 sock_put(sk);
0234 }
0235
0236 static void dccp_write_xmit_timer(struct timer_list *t)
0237 {
0238 struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer);
0239
0240 dccp_write_xmitlet(&dp->dccps_xmitlet);
0241 }
0242
0243 void dccp_init_xmit_timers(struct sock *sk)
0244 {
0245 struct dccp_sock *dp = dccp_sk(sk);
0246
0247 tasklet_setup(&dp->dccps_xmitlet, dccp_write_xmitlet);
0248 timer_setup(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 0);
0249 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
0250 &dccp_keepalive_timer);
0251 }
0252
0253 static ktime_t dccp_timestamp_seed;
0254
0255
0256
0257
0258
0259
0260 u32 dccp_timestamp(void)
0261 {
0262 u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
0263
0264 do_div(delta, 10);
0265 return delta;
0266 }
0267 EXPORT_SYMBOL_GPL(dccp_timestamp);
0268
0269 void __init dccp_timestamping_init(void)
0270 {
0271 dccp_timestamp_seed = ktime_get_real();
0272 }