0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/mm.h>
0036 #include <linux/module.h>
0037 #include <linux/skbuff.h>
0038 #include <linux/inet_diag.h>
0039
0040 #include <net/tcp.h>
0041
0042 #include "tcp_vegas.h"
0043
0044 static int alpha = 2;
0045 static int beta = 4;
0046 static int gamma = 1;
0047
0048 module_param(alpha, int, 0644);
0049 MODULE_PARM_DESC(alpha, "lower bound of packets in network");
0050 module_param(beta, int, 0644);
0051 MODULE_PARM_DESC(beta, "upper bound of packets in network");
0052 module_param(gamma, int, 0644);
0053 MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 static void vegas_enable(struct sock *sk)
0072 {
0073 const struct tcp_sock *tp = tcp_sk(sk);
0074 struct vegas *vegas = inet_csk_ca(sk);
0075
0076
0077 vegas->doing_vegas_now = 1;
0078
0079
0080 vegas->beg_snd_nxt = tp->snd_nxt;
0081
0082 vegas->cntRTT = 0;
0083 vegas->minRTT = 0x7fffffff;
0084 }
0085
0086
0087 static inline void vegas_disable(struct sock *sk)
0088 {
0089 struct vegas *vegas = inet_csk_ca(sk);
0090
0091 vegas->doing_vegas_now = 0;
0092 }
0093
0094 void tcp_vegas_init(struct sock *sk)
0095 {
0096 struct vegas *vegas = inet_csk_ca(sk);
0097
0098 vegas->baseRTT = 0x7fffffff;
0099 vegas_enable(sk);
0100 }
0101 EXPORT_SYMBOL_GPL(tcp_vegas_init);
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111 void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample)
0112 {
0113 struct vegas *vegas = inet_csk_ca(sk);
0114 u32 vrtt;
0115
0116 if (sample->rtt_us < 0)
0117 return;
0118
0119
0120 vrtt = sample->rtt_us + 1;
0121
0122
0123 if (vrtt < vegas->baseRTT)
0124 vegas->baseRTT = vrtt;
0125
0126
0127
0128
0129 vegas->minRTT = min(vegas->minRTT, vrtt);
0130 vegas->cntRTT++;
0131 }
0132 EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked);
0133
0134 void tcp_vegas_state(struct sock *sk, u8 ca_state)
0135 {
0136 if (ca_state == TCP_CA_Open)
0137 vegas_enable(sk);
0138 else
0139 vegas_disable(sk);
0140 }
0141 EXPORT_SYMBOL_GPL(tcp_vegas_state);
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
0153 {
0154 if (event == CA_EVENT_CWND_RESTART ||
0155 event == CA_EVENT_TX_START)
0156 tcp_vegas_init(sk);
0157 }
0158 EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
0159
0160 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
0161 {
0162 return min(tp->snd_ssthresh, tcp_snd_cwnd(tp));
0163 }
0164
0165 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
0166 {
0167 struct tcp_sock *tp = tcp_sk(sk);
0168 struct vegas *vegas = inet_csk_ca(sk);
0169
0170 if (!vegas->doing_vegas_now) {
0171 tcp_reno_cong_avoid(sk, ack, acked);
0172 return;
0173 }
0174
0175 if (after(ack, vegas->beg_snd_nxt)) {
0176
0177
0178
0179
0180
0181 vegas->beg_snd_nxt = tp->snd_nxt;
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 if (vegas->cntRTT <= 2) {
0193
0194
0195
0196 tcp_reno_cong_avoid(sk, ack, acked);
0197 } else {
0198 u32 rtt, diff;
0199 u64 target_cwnd;
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 rtt = vegas->minRTT;
0213
0214
0215
0216
0217
0218
0219
0220 target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT;
0221 do_div(target_cwnd, rtt);
0222
0223
0224
0225
0226
0227 diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT;
0228
0229 if (diff > gamma && tcp_in_slow_start(tp)) {
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp),
0242 (u32)target_cwnd + 1));
0243 tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
0244
0245 } else if (tcp_in_slow_start(tp)) {
0246
0247 tcp_slow_start(tp, acked);
0248 } else {
0249
0250
0251
0252
0253
0254 if (diff > beta) {
0255
0256
0257
0258 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
0259 tp->snd_ssthresh
0260 = tcp_vegas_ssthresh(tp);
0261 } else if (diff < alpha) {
0262
0263
0264
0265 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
0266 } else {
0267
0268
0269
0270 }
0271 }
0272
0273 if (tcp_snd_cwnd(tp) < 2)
0274 tcp_snd_cwnd_set(tp, 2);
0275 else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
0276 tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
0277
0278 tp->snd_ssthresh = tcp_current_ssthresh(sk);
0279 }
0280
0281
0282 vegas->cntRTT = 0;
0283 vegas->minRTT = 0x7fffffff;
0284 }
0285
0286 else if (tcp_in_slow_start(tp))
0287 tcp_slow_start(tp, acked);
0288 }
0289
0290
0291 size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
0292 union tcp_cc_info *info)
0293 {
0294 const struct vegas *ca = inet_csk_ca(sk);
0295
0296 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
0297 info->vegas.tcpv_enabled = ca->doing_vegas_now;
0298 info->vegas.tcpv_rttcnt = ca->cntRTT;
0299 info->vegas.tcpv_rtt = ca->baseRTT;
0300 info->vegas.tcpv_minrtt = ca->minRTT;
0301
0302 *attr = INET_DIAG_VEGASINFO;
0303 return sizeof(struct tcpvegas_info);
0304 }
0305 return 0;
0306 }
0307 EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
0308
0309 static struct tcp_congestion_ops tcp_vegas __read_mostly = {
0310 .init = tcp_vegas_init,
0311 .ssthresh = tcp_reno_ssthresh,
0312 .undo_cwnd = tcp_reno_undo_cwnd,
0313 .cong_avoid = tcp_vegas_cong_avoid,
0314 .pkts_acked = tcp_vegas_pkts_acked,
0315 .set_state = tcp_vegas_state,
0316 .cwnd_event = tcp_vegas_cwnd_event,
0317 .get_info = tcp_vegas_get_info,
0318
0319 .owner = THIS_MODULE,
0320 .name = "vegas",
0321 };
0322
0323 static int __init tcp_vegas_register(void)
0324 {
0325 BUILD_BUG_ON(sizeof(struct vegas) > ICSK_CA_PRIV_SIZE);
0326 tcp_register_congestion_control(&tcp_vegas);
0327 return 0;
0328 }
0329
0330 static void __exit tcp_vegas_unregister(void)
0331 {
0332 tcp_unregister_congestion_control(&tcp_vegas);
0333 }
0334
0335 module_init(tcp_vegas_register);
0336 module_exit(tcp_vegas_unregister);
0337
0338 MODULE_AUTHOR("Stephen Hemminger");
0339 MODULE_LICENSE("GPL");
0340 MODULE_DESCRIPTION("TCP Vegas");