0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #ifndef _LINUX_TCP_H
0014 #define _LINUX_TCP_H
0015
0016
0017 #include <linux/skbuff.h>
0018 #include <linux/win_minmax.h>
0019 #include <net/sock.h>
0020 #include <net/inet_connection_sock.h>
0021 #include <net/inet_timewait_sock.h>
0022 #include <uapi/linux/tcp.h>
0023
0024 static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
0025 {
0026 return (struct tcphdr *)skb_transport_header(skb);
0027 }
0028
0029 static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
0030 {
0031 return th->doff * 4;
0032 }
0033
0034 static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
0035 {
0036 return __tcp_hdrlen(tcp_hdr(skb));
0037 }
0038
0039 static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
0040 {
0041 return (struct tcphdr *)skb_inner_transport_header(skb);
0042 }
0043
0044 static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
0045 {
0046 return inner_tcp_hdr(skb)->doff * 4;
0047 }
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 static inline int skb_tcp_all_headers(const struct sk_buff *skb)
0060 {
0061 return skb_transport_offset(skb) + tcp_hdrlen(skb);
0062 }
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074 static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb)
0075 {
0076 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
0077 }
0078
0079 static inline unsigned int tcp_optlen(const struct sk_buff *skb)
0080 {
0081 return (tcp_hdr(skb)->doff - 5) * 4;
0082 }
0083
0084
0085 #define TCP_FASTOPEN_COOKIE_MIN 4
0086 #define TCP_FASTOPEN_COOKIE_MAX 16
0087 #define TCP_FASTOPEN_COOKIE_SIZE 8
0088
0089
0090 struct tcp_fastopen_cookie {
0091 __le64 val[DIV_ROUND_UP(TCP_FASTOPEN_COOKIE_MAX, sizeof(u64))];
0092 s8 len;
0093 bool exp;
0094 };
0095
0096
0097 struct tcp_sack_block_wire {
0098 __be32 start_seq;
0099 __be32 end_seq;
0100 };
0101
0102 struct tcp_sack_block {
0103 u32 start_seq;
0104 u32 end_seq;
0105 };
0106
0107
0108 #define TCP_SACK_SEEN (1 << 0)
0109 #define TCP_DSACK_SEEN (1 << 2)
0110
0111 struct tcp_options_received {
0112
0113 int ts_recent_stamp;
0114 u32 ts_recent;
0115 u32 rcv_tsval;
0116 u32 rcv_tsecr;
0117 u16 saw_tstamp : 1,
0118 tstamp_ok : 1,
0119 dsack : 1,
0120 wscale_ok : 1,
0121 sack_ok : 3,
0122 smc_ok : 1,
0123 snd_wscale : 4,
0124 rcv_wscale : 4;
0125 u8 saw_unknown:1,
0126 unused:7;
0127 u8 num_sacks;
0128 u16 user_mss;
0129 u16 mss_clamp;
0130 };
0131
0132 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
0133 {
0134 rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
0135 rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
0136 #if IS_ENABLED(CONFIG_SMC)
0137 rx_opt->smc_ok = 0;
0138 #endif
0139 }
0140
0141
0142
0143
0144
0145 #define TCP_NUM_SACKS 4
0146
0147 struct tcp_request_sock_ops;
0148
0149 struct tcp_request_sock {
0150 struct inet_request_sock req;
0151 const struct tcp_request_sock_ops *af_specific;
0152 u64 snt_synack;
0153 bool tfo_listener;
0154 bool is_mptcp;
0155 #if IS_ENABLED(CONFIG_MPTCP)
0156 bool drop_req;
0157 #endif
0158 u32 txhash;
0159 u32 rcv_isn;
0160 u32 snt_isn;
0161 u32 ts_off;
0162 u32 last_oow_ack_time;
0163 u32 rcv_nxt;
0164
0165
0166
0167 u8 syn_tos;
0168 };
0169
0170 static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
0171 {
0172 return (struct tcp_request_sock *)req;
0173 }
0174
0175 struct tcp_sock {
0176
0177 struct inet_connection_sock inet_conn;
0178 u16 tcp_header_len;
0179 u16 gso_segs;
0180
0181
0182
0183
0184
0185 __be32 pred_flags;
0186
0187
0188
0189
0190
0191
0192 u64 bytes_received;
0193
0194
0195
0196 u32 segs_in;
0197
0198
0199 u32 data_segs_in;
0200
0201
0202 u32 rcv_nxt;
0203 u32 copied_seq;
0204 u32 rcv_wup;
0205 u32 snd_nxt;
0206 u32 segs_out;
0207
0208
0209 u32 data_segs_out;
0210
0211
0212 u64 bytes_sent;
0213
0214
0215 u64 bytes_acked;
0216
0217
0218
0219 u32 dsack_dups;
0220
0221
0222 u32 snd_una;
0223 u32 snd_sml;
0224 u32 rcv_tstamp;
0225 u32 lsndtime;
0226 u32 last_oow_ack_time;
0227 u32 compressed_ack_rcv_nxt;
0228
0229 u32 tsoffset;
0230
0231 struct list_head tsq_node;
0232 struct list_head tsorted_sent_queue;
0233
0234 u32 snd_wl1;
0235 u32 snd_wnd;
0236 u32 max_window;
0237 u32 mss_cache;
0238
0239 u32 window_clamp;
0240 u32 rcv_ssthresh;
0241
0242
0243 struct tcp_rack {
0244 u64 mstamp;
0245 u32 rtt_us;
0246 u32 end_seq;
0247 u32 last_delivered;
0248 u8 reo_wnd_steps;
0249 #define TCP_RACK_RECOVERY_THRESH 16
0250 u8 reo_wnd_persist:5,
0251 dsack_seen:1,
0252 advanced:1;
0253 } rack;
0254 u16 advmss;
0255 u8 compressed_ack;
0256 u8 dup_ack_counter:2,
0257 tlp_retrans:1,
0258 unused:5;
0259 u32 chrono_start;
0260 u32 chrono_stat[3];
0261 u8 chrono_type:2,
0262 rate_app_limited:1,
0263 fastopen_connect:1,
0264 fastopen_no_cookie:1,
0265 is_sack_reneg:1,
0266 fastopen_client_fail:2;
0267 u8 nonagle : 4,
0268 thin_lto : 1,
0269 recvmsg_inq : 1,
0270 repair : 1,
0271 frto : 1;
0272 u8 repair_queue;
0273 u8 save_syn:2,
0274 syn_data:1,
0275 syn_fastopen:1,
0276 syn_fastopen_exp:1,
0277 syn_fastopen_ch:1,
0278 syn_data_acked:1,
0279 is_cwnd_limited:1;
0280 u32 tlp_high_seq;
0281
0282 u32 tcp_tx_delay;
0283 u64 tcp_wstamp_ns;
0284 u64 tcp_clock_cache;
0285
0286
0287 u64 tcp_mstamp;
0288 u32 srtt_us;
0289 u32 mdev_us;
0290 u32 mdev_max_us;
0291 u32 rttvar_us;
0292 u32 rtt_seq;
0293 struct minmax rtt_min;
0294
0295 u32 packets_out;
0296 u32 retrans_out;
0297 u32 max_packets_out;
0298 u32 max_packets_seq;
0299
0300 u16 urg_data;
0301 u8 ecn_flags;
0302 u8 keepalive_probes;
0303 u32 reordering;
0304 u32 reord_seen;
0305 u32 snd_up;
0306
0307
0308
0309
0310 struct tcp_options_received rx_opt;
0311
0312
0313
0314
0315 u32 snd_ssthresh;
0316 u32 snd_cwnd;
0317 u32 snd_cwnd_cnt;
0318 u32 snd_cwnd_clamp;
0319 u32 snd_cwnd_used;
0320 u32 snd_cwnd_stamp;
0321 u32 prior_cwnd;
0322 u32 prr_delivered;
0323
0324 u32 prr_out;
0325 u32 delivered;
0326 u32 delivered_ce;
0327 u32 lost;
0328 u32 app_limited;
0329 u64 first_tx_mstamp;
0330 u64 delivered_mstamp;
0331 u32 rate_delivered;
0332 u32 rate_interval_us;
0333
0334 u32 rcv_wnd;
0335 u32 write_seq;
0336 u32 notsent_lowat;
0337 u32 pushed_seq;
0338 u32 lost_out;
0339 u32 sacked_out;
0340
0341 struct hrtimer pacing_timer;
0342 struct hrtimer compressed_ack_timer;
0343
0344
0345 struct sk_buff* lost_skb_hint;
0346 struct sk_buff *retransmit_skb_hint;
0347
0348
0349 struct rb_root out_of_order_queue;
0350 struct sk_buff *ooo_last_skb;
0351
0352
0353 struct tcp_sack_block duplicate_sack[1];
0354 struct tcp_sack_block selective_acks[4];
0355
0356 struct tcp_sack_block recv_sack_cache[4];
0357
0358 struct sk_buff *highest_sack;
0359
0360
0361
0362
0363
0364 int lost_cnt_hint;
0365
0366 u32 prior_ssthresh;
0367 u32 high_seq;
0368
0369 u32 retrans_stamp;
0370
0371
0372 u32 undo_marker;
0373 int undo_retrans;
0374 u64 bytes_retrans;
0375
0376
0377 u32 total_retrans;
0378
0379 u32 urg_seq;
0380 unsigned int keepalive_time;
0381 unsigned int keepalive_intvl;
0382
0383 int linger2;
0384
0385
0386
0387 #ifdef CONFIG_BPF
0388 u8 bpf_sock_ops_cb_flags;
0389
0390
0391 #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
0392 #else
0393 #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
0394 #endif
0395
0396 u16 timeout_rehash;
0397
0398 u32 rcv_ooopack;
0399
0400
0401 u32 rcv_rtt_last_tsecr;
0402 struct {
0403 u32 rtt_us;
0404 u32 seq;
0405 u64 time;
0406 } rcv_rtt_est;
0407
0408
0409 struct {
0410 u32 space;
0411 u32 seq;
0412 u64 time;
0413 } rcvq_space;
0414
0415
0416 struct {
0417 u32 probe_seq_start;
0418 u32 probe_seq_end;
0419 } mtu_probe;
0420 u32 mtu_info;
0421
0422
0423 #if IS_ENABLED(CONFIG_MPTCP)
0424 bool is_mptcp;
0425 #endif
0426 #if IS_ENABLED(CONFIG_SMC)
0427 bool (*smc_hs_congested)(const struct sock *sk);
0428 bool syn_smc;
0429 #endif
0430
0431 #ifdef CONFIG_TCP_MD5SIG
0432
0433 const struct tcp_sock_af_ops *af_specific;
0434
0435
0436 struct tcp_md5sig_info __rcu *md5sig_info;
0437 #endif
0438
0439
0440 struct tcp_fastopen_request *fastopen_req;
0441
0442
0443
0444 struct request_sock __rcu *fastopen_rsk;
0445 struct saved_syn *saved_syn;
0446 };
0447
0448 enum tsq_enum {
0449 TSQ_THROTTLED,
0450 TSQ_QUEUED,
0451 TCP_TSQ_DEFERRED,
0452 TCP_WRITE_TIMER_DEFERRED,
0453 TCP_DELACK_TIMER_DEFERRED,
0454 TCP_MTU_REDUCED_DEFERRED,
0455
0456
0457 };
0458
0459 enum tsq_flags {
0460 TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
0461 TSQF_QUEUED = (1UL << TSQ_QUEUED),
0462 TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
0463 TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
0464 TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
0465 TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
0466 };
0467
0468 static inline struct tcp_sock *tcp_sk(const struct sock *sk)
0469 {
0470 return (struct tcp_sock *)sk;
0471 }
0472
0473 struct tcp_timewait_sock {
0474 struct inet_timewait_sock tw_sk;
0475 #define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt
0476 #define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt
0477 u32 tw_rcv_wnd;
0478 u32 tw_ts_offset;
0479 u32 tw_ts_recent;
0480
0481
0482 u32 tw_last_oow_ack_time;
0483
0484 int tw_ts_recent_stamp;
0485 u32 tw_tx_delay;
0486 #ifdef CONFIG_TCP_MD5SIG
0487 struct tcp_md5sig_key *tw_md5_key;
0488 #endif
0489 };
0490
0491 static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
0492 {
0493 return (struct tcp_timewait_sock *)sk;
0494 }
0495
0496 static inline bool tcp_passive_fastopen(const struct sock *sk)
0497 {
0498 return sk->sk_state == TCP_SYN_RECV &&
0499 rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
0500 }
0501
0502 static inline void fastopen_queue_tune(struct sock *sk, int backlog)
0503 {
0504 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
0505 int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
0506
0507 queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
0508 }
0509
0510 static inline void tcp_move_syn(struct tcp_sock *tp,
0511 struct request_sock *req)
0512 {
0513 tp->saved_syn = req->saved_syn;
0514 req->saved_syn = NULL;
0515 }
0516
0517 static inline void tcp_saved_syn_free(struct tcp_sock *tp)
0518 {
0519 kfree(tp->saved_syn);
0520 tp->saved_syn = NULL;
0521 }
0522
0523 static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
0524 {
0525 return saved_syn->mac_hdrlen + saved_syn->network_hdrlen +
0526 saved_syn->tcp_hdrlen;
0527 }
0528
0529 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
0530 const struct sk_buff *orig_skb,
0531 const struct sk_buff *ack_skb);
0532
0533 static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
0534 {
0535
0536
0537
0538 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
0539
0540 return (user_mss && user_mss < mss) ? user_mss : mss;
0541 }
0542
0543 int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
0544 int shiftlen);
0545
0546 void __tcp_sock_set_cork(struct sock *sk, bool on);
0547 void tcp_sock_set_cork(struct sock *sk, bool on);
0548 int tcp_sock_set_keepcnt(struct sock *sk, int val);
0549 int tcp_sock_set_keepidle_locked(struct sock *sk, int val);
0550 int tcp_sock_set_keepidle(struct sock *sk, int val);
0551 int tcp_sock_set_keepintvl(struct sock *sk, int val);
0552 void __tcp_sock_set_nodelay(struct sock *sk, bool on);
0553 void tcp_sock_set_nodelay(struct sock *sk);
0554 void tcp_sock_set_quickack(struct sock *sk, int val);
0555 int tcp_sock_set_syncnt(struct sock *sk, int val);
0556 void tcp_sock_set_user_timeout(struct sock *sk, u32 val);
0557
0558 #endif