0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef _INET_CONNECTION_SOCK_H
0012 #define _INET_CONNECTION_SOCK_H
0013
0014 #include <linux/compiler.h>
0015 #include <linux/string.h>
0016 #include <linux/timer.h>
0017 #include <linux/poll.h>
0018 #include <linux/kernel.h>
0019 #include <linux/sockptr.h>
0020
0021 #include <net/inet_sock.h>
0022 #include <net/request_sock.h>
0023
0024
0025 #undef INET_CSK_CLEAR_TIMERS
0026
0027 struct inet_bind_bucket;
0028 struct tcp_congestion_ops;
0029
0030
0031
0032
0033
0034 struct inet_connection_sock_af_ops {
0035 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
0036 void (*send_check)(struct sock *sk, struct sk_buff *skb);
0037 int (*rebuild_header)(struct sock *sk);
0038 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
0039 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
0040 struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
0041 struct request_sock *req,
0042 struct dst_entry *dst,
0043 struct request_sock *req_unhash,
0044 bool *own_req);
0045 u16 net_header_len;
0046 u16 net_frag_header_len;
0047 u16 sockaddr_len;
0048 int (*setsockopt)(struct sock *sk, int level, int optname,
0049 sockptr_t optval, unsigned int optlen);
0050 int (*getsockopt)(struct sock *sk, int level, int optname,
0051 char __user *optval, int __user *optlen);
0052 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
0053 void (*mtu_reduced)(struct sock *sk);
0054 };
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 struct inet_connection_sock {
0082
0083 struct inet_sock icsk_inet;
0084 struct request_sock_queue icsk_accept_queue;
0085 struct inet_bind_bucket *icsk_bind_hash;
0086 unsigned long icsk_timeout;
0087 struct timer_list icsk_retransmit_timer;
0088 struct timer_list icsk_delack_timer;
0089 __u32 icsk_rto;
0090 __u32 icsk_rto_min;
0091 __u32 icsk_delack_max;
0092 __u32 icsk_pmtu_cookie;
0093 const struct tcp_congestion_ops *icsk_ca_ops;
0094 const struct inet_connection_sock_af_ops *icsk_af_ops;
0095 const struct tcp_ulp_ops *icsk_ulp_ops;
0096 void __rcu *icsk_ulp_data;
0097 void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
0098 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
0099 __u8 icsk_ca_state:5,
0100 icsk_ca_initialized:1,
0101 icsk_ca_setsockopt:1,
0102 icsk_ca_dst_locked:1;
0103 __u8 icsk_retransmits;
0104 __u8 icsk_pending;
0105 __u8 icsk_backoff;
0106 __u8 icsk_syn_retries;
0107 __u8 icsk_probes_out;
0108 __u16 icsk_ext_hdr_len;
0109 struct {
0110 __u8 pending;
0111 __u8 quick;
0112 __u8 pingpong;
0113 __u8 retry;
0114 __u32 ato;
0115 unsigned long timeout;
0116 __u32 lrcvtime;
0117 __u16 last_seg_size;
0118 __u16 rcv_mss;
0119 } icsk_ack;
0120 struct {
0121
0122 int search_high;
0123 int search_low;
0124
0125
0126 u32 probe_size:31,
0127
0128 enabled:1;
0129
0130 u32 probe_timestamp;
0131 } icsk_mtup;
0132 u32 icsk_probes_tstamp;
0133 u32 icsk_user_timeout;
0134
0135 u64 icsk_ca_priv[104 / sizeof(u64)];
0136 #define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv)
0137 };
0138
0139 #define ICSK_TIME_RETRANS 1
0140 #define ICSK_TIME_DACK 2
0141 #define ICSK_TIME_PROBE0 3
0142 #define ICSK_TIME_LOSS_PROBE 5
0143 #define ICSK_TIME_REO_TIMEOUT 6
0144
0145 static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
0146 {
0147 return (struct inet_connection_sock *)sk;
0148 }
0149
0150 static inline void *inet_csk_ca(const struct sock *sk)
0151 {
0152 return (void *)inet_csk(sk)->icsk_ca_priv;
0153 }
0154
0155 struct sock *inet_csk_clone_lock(const struct sock *sk,
0156 const struct request_sock *req,
0157 const gfp_t priority);
0158
0159 enum inet_csk_ack_state_t {
0160 ICSK_ACK_SCHED = 1,
0161 ICSK_ACK_TIMER = 2,
0162 ICSK_ACK_PUSHED = 4,
0163 ICSK_ACK_PUSHED2 = 8,
0164 ICSK_ACK_NOW = 16
0165 };
0166
0167 void inet_csk_init_xmit_timers(struct sock *sk,
0168 void (*retransmit_handler)(struct timer_list *),
0169 void (*delack_handler)(struct timer_list *),
0170 void (*keepalive_handler)(struct timer_list *));
0171 void inet_csk_clear_xmit_timers(struct sock *sk);
0172
0173 static inline void inet_csk_schedule_ack(struct sock *sk)
0174 {
0175 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
0176 }
0177
0178 static inline int inet_csk_ack_scheduled(const struct sock *sk)
0179 {
0180 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
0181 }
0182
0183 static inline void inet_csk_delack_init(struct sock *sk)
0184 {
0185 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
0186 }
0187
0188 void inet_csk_delete_keepalive_timer(struct sock *sk);
0189 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
0190
0191 static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
0192 {
0193 struct inet_connection_sock *icsk = inet_csk(sk);
0194
0195 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
0196 icsk->icsk_pending = 0;
0197 #ifdef INET_CSK_CLEAR_TIMERS
0198 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
0199 #endif
0200 } else if (what == ICSK_TIME_DACK) {
0201 icsk->icsk_ack.pending = 0;
0202 icsk->icsk_ack.retry = 0;
0203 #ifdef INET_CSK_CLEAR_TIMERS
0204 sk_stop_timer(sk, &icsk->icsk_delack_timer);
0205 #endif
0206 } else {
0207 pr_debug("inet_csk BUG: unknown timer value\n");
0208 }
0209 }
0210
0211
0212
0213
0214 static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
0215 unsigned long when,
0216 const unsigned long max_when)
0217 {
0218 struct inet_connection_sock *icsk = inet_csk(sk);
0219
0220 if (when > max_when) {
0221 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
0222 sk, what, when, (void *)_THIS_IP_);
0223 when = max_when;
0224 }
0225
0226 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
0227 what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
0228 icsk->icsk_pending = what;
0229 icsk->icsk_timeout = jiffies + when;
0230 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
0231 } else if (what == ICSK_TIME_DACK) {
0232 icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
0233 icsk->icsk_ack.timeout = jiffies + when;
0234 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
0235 } else {
0236 pr_debug("inet_csk BUG: unknown timer value\n");
0237 }
0238 }
0239
0240 static inline unsigned long
0241 inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
0242 unsigned long max_when)
0243 {
0244 u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
0245
0246 return (unsigned long)min_t(u64, when, max_when);
0247 }
0248
0249 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
0250
0251 int inet_csk_get_port(struct sock *sk, unsigned short snum);
0252
0253 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
0254 const struct request_sock *req);
0255 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
0256 struct sock *newsk,
0257 const struct request_sock *req);
0258
0259 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
0260 struct request_sock *req,
0261 struct sock *child);
0262 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
0263 unsigned long timeout);
0264 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
0265 struct request_sock *req,
0266 bool own_req);
0267
0268 static inline void inet_csk_reqsk_queue_added(struct sock *sk)
0269 {
0270 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
0271 }
0272
0273 static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
0274 {
0275 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
0276 }
0277
0278 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
0279 {
0280 return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
0281 }
0282
0283 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
0284 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
0285
0286 static inline unsigned long
0287 reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
0288 {
0289 u64 timeout = (u64)req->timeout << req->num_timeout;
0290
0291 return (unsigned long)min_t(u64, timeout, max_timeout);
0292 }
0293
0294 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
0295 {
0296
0297 sock_set_flag(sk, SOCK_DEAD);
0298 this_cpu_inc(*sk->sk_prot->orphan_count);
0299 }
0300
0301 void inet_csk_destroy_sock(struct sock *sk);
0302 void inet_csk_prepare_forced_close(struct sock *sk);
0303
0304
0305
0306
0307 static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
0308 {
0309 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
0310 (EPOLLIN | EPOLLRDNORM) : 0;
0311 }
0312
0313 int inet_csk_listen_start(struct sock *sk);
0314 void inet_csk_listen_stop(struct sock *sk);
0315
0316 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
0317
0318
0319 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
0320 struct sock *sk);
0321
0322 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
0323
0324 #define TCP_PINGPONG_THRESH 1
0325
0326 static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
0327 {
0328 inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
0329 }
0330
0331 static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
0332 {
0333 inet_csk(sk)->icsk_ack.pingpong = 0;
0334 }
0335
0336 static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
0337 {
0338 return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
0339 }
0340
0341 static inline bool inet_csk_has_ulp(struct sock *sk)
0342 {
0343 return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
0344 }
0345
0346 #endif