0001
0002 #include <linux/tcp.h>
0003 #include <net/tcp.h>
0004
0005 static u32 tcp_rack_reo_wnd(const struct sock *sk)
0006 {
0007 struct tcp_sock *tp = tcp_sk(sk);
0008
0009 if (!tp->reord_seen) {
0010
0011
0012
0013 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
0014 return 0;
0015
0016 if (tp->sacked_out >= tp->reordering &&
0017 !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
0018 TCP_RACK_NO_DUPTHRESH))
0019 return 0;
0020 }
0021
0022
0023
0024
0025
0026
0027
0028 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
0029 tp->srtt_us >> 3);
0030 }
0031
0032 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
0033 {
0034 return tp->rack.rtt_us + reo_wnd -
0035 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
0036 }
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
0059 {
0060 struct tcp_sock *tp = tcp_sk(sk);
0061 struct sk_buff *skb, *n;
0062 u32 reo_wnd;
0063
0064 *reo_timeout = 0;
0065 reo_wnd = tcp_rack_reo_wnd(sk);
0066 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
0067 tcp_tsorted_anchor) {
0068 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
0069 s32 remaining;
0070
0071
0072 if ((scb->sacked & TCPCB_LOST) &&
0073 !(scb->sacked & TCPCB_SACKED_RETRANS))
0074 continue;
0075
0076 if (!tcp_skb_sent_after(tp->rack.mstamp,
0077 tcp_skb_timestamp_us(skb),
0078 tp->rack.end_seq, scb->end_seq))
0079 break;
0080
0081
0082
0083
0084 remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
0085 if (remaining <= 0) {
0086 tcp_mark_skb_lost(sk, skb);
0087 list_del_init(&skb->tcp_tsorted_anchor);
0088 } else {
0089
0090 *reo_timeout = max_t(u32, *reo_timeout, remaining);
0091 }
0092 }
0093 }
0094
0095 bool tcp_rack_mark_lost(struct sock *sk)
0096 {
0097 struct tcp_sock *tp = tcp_sk(sk);
0098 u32 timeout;
0099
0100 if (!tp->rack.advanced)
0101 return false;
0102
0103
0104 tp->rack.advanced = 0;
0105 tcp_rack_detect_loss(sk, &timeout);
0106 if (timeout) {
0107 timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
0108 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
0109 timeout, inet_csk(sk)->icsk_rto);
0110 }
0111 return !!timeout;
0112 }
0113
0114
0115
0116
0117
0118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
0119 u64 xmit_time)
0120 {
0121 u32 rtt_us;
0122
0123 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
0124 if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 return;
0136 }
0137 tp->rack.advanced = 1;
0138 tp->rack.rtt_us = rtt_us;
0139 if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
0140 end_seq, tp->rack.end_seq)) {
0141 tp->rack.mstamp = xmit_time;
0142 tp->rack.end_seq = end_seq;
0143 }
0144 }
0145
0146
0147
0148
0149 void tcp_rack_reo_timeout(struct sock *sk)
0150 {
0151 struct tcp_sock *tp = tcp_sk(sk);
0152 u32 timeout, prior_inflight;
0153 u32 lost = tp->lost;
0154
0155 prior_inflight = tcp_packets_in_flight(tp);
0156 tcp_rack_detect_loss(sk, &timeout);
0157 if (prior_inflight != tcp_packets_in_flight(tp)) {
0158 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
0159 tcp_enter_recovery(sk, false);
0160 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
0161 tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0);
0162 }
0163 tcp_xmit_retransmit_queue(sk);
0164 }
0165 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
0166 tcp_rearm_rto(sk);
0167 }
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
0188 {
0189 struct tcp_sock *tp = tcp_sk(sk);
0190
0191 if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
0192 TCP_RACK_STATIC_REO_WND) ||
0193 !rs->prior_delivered)
0194 return;
0195
0196
0197 if (before(rs->prior_delivered, tp->rack.last_delivered))
0198 tp->rack.dsack_seen = 0;
0199
0200
0201 if (tp->rack.dsack_seen) {
0202 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
0203 tp->rack.reo_wnd_steps + 1);
0204 tp->rack.dsack_seen = 0;
0205 tp->rack.last_delivered = tp->delivered;
0206 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
0207 } else if (!tp->rack.reo_wnd_persist) {
0208 tp->rack.reo_wnd_steps = 1;
0209 }
0210 }
0211
0212
0213
0214
0215
0216
0217 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
0218 {
0219 const u8 state = inet_csk(sk)->icsk_ca_state;
0220 struct tcp_sock *tp = tcp_sk(sk);
0221
0222 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
0223 (state == TCP_CA_Recovery && snd_una_advanced)) {
0224 struct sk_buff *skb = tcp_rtx_queue_head(sk);
0225 u32 mss;
0226
0227 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
0228 return;
0229
0230 mss = tcp_skb_mss(skb);
0231 if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
0232 tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
0233 mss, mss, GFP_ATOMIC);
0234
0235 tcp_mark_skb_lost(sk, skb);
0236 }
0237 }