Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * NET      Generic infrastructure for Network protocols.
0004  *
0005  *      Definitions for request_sock
0006  *
0007  * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
0008  *
0009  *      From code originally in include/net/tcp.h
0010  */
0011 #ifndef _REQUEST_SOCK_H
0012 #define _REQUEST_SOCK_H
0013 
0014 #include <linux/slab.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/types.h>
0017 #include <linux/bug.h>
0018 #include <linux/refcount.h>
0019 
0020 #include <net/sock.h>
0021 
0022 struct request_sock;
0023 struct sk_buff;
0024 struct dst_entry;
0025 struct proto;
0026 
0027 struct request_sock_ops {
0028     int     family;
0029     unsigned int    obj_size;
0030     struct kmem_cache   *slab;
0031     char        *slab_name;
0032     int     (*rtx_syn_ack)(const struct sock *sk,
0033                        struct request_sock *req);
0034     void        (*send_ack)(const struct sock *sk, struct sk_buff *skb,
0035                     struct request_sock *req);
0036     void        (*send_reset)(const struct sock *sk,
0037                       struct sk_buff *skb);
0038     void        (*destructor)(struct request_sock *req);
0039     void        (*syn_ack_timeout)(const struct request_sock *req);
0040 };
0041 
0042 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
0043 
0044 struct saved_syn {
0045     u32 mac_hdrlen;
0046     u32 network_hdrlen;
0047     u32 tcp_hdrlen;
0048     u8 data[];
0049 };
0050 
0051 /* struct request_sock - mini sock to represent a connection request
0052  */
0053 struct request_sock {
0054     struct sock_common      __req_common;
0055 #define rsk_refcnt          __req_common.skc_refcnt
0056 #define rsk_hash            __req_common.skc_hash
0057 #define rsk_listener            __req_common.skc_listener
0058 #define rsk_window_clamp        __req_common.skc_window_clamp
0059 #define rsk_rcv_wnd         __req_common.skc_rcv_wnd
0060 
0061     struct request_sock     *dl_next;
0062     u16             mss;
0063     u8              num_retrans; /* number of retransmits */
0064     u8              syncookie:1; /* syncookie: encode tcpopts in timestamp */
0065     u8              num_timeout:7; /* number of timeouts */
0066     u32             ts_recent;
0067     struct timer_list       rsk_timer;
0068     const struct request_sock_ops   *rsk_ops;
0069     struct sock         *sk;
0070     struct saved_syn        *saved_syn;
0071     u32             secid;
0072     u32             peer_secid;
0073     u32             timeout;
0074 };
0075 
0076 static inline struct request_sock *inet_reqsk(const struct sock *sk)
0077 {
0078     return (struct request_sock *)sk;
0079 }
0080 
0081 static inline struct sock *req_to_sk(struct request_sock *req)
0082 {
0083     return (struct sock *)req;
0084 }
0085 
0086 static inline struct request_sock *
0087 reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
0088         bool attach_listener)
0089 {
0090     struct request_sock *req;
0091 
0092     req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
0093     if (!req)
0094         return NULL;
0095     req->rsk_listener = NULL;
0096     if (attach_listener) {
0097         if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
0098             kmem_cache_free(ops->slab, req);
0099             return NULL;
0100         }
0101         req->rsk_listener = sk_listener;
0102     }
0103     req->rsk_ops = ops;
0104     req_to_sk(req)->sk_prot = sk_listener->sk_prot;
0105     sk_node_init(&req_to_sk(req)->sk_node);
0106     sk_tx_queue_clear(req_to_sk(req));
0107     req->saved_syn = NULL;
0108     req->timeout = 0;
0109     req->num_timeout = 0;
0110     req->num_retrans = 0;
0111     req->sk = NULL;
0112     refcount_set(&req->rsk_refcnt, 0);
0113 
0114     return req;
0115 }
0116 
0117 static inline void __reqsk_free(struct request_sock *req)
0118 {
0119     req->rsk_ops->destructor(req);
0120     if (req->rsk_listener)
0121         sock_put(req->rsk_listener);
0122     kfree(req->saved_syn);
0123     kmem_cache_free(req->rsk_ops->slab, req);
0124 }
0125 
0126 static inline void reqsk_free(struct request_sock *req)
0127 {
0128     WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
0129     __reqsk_free(req);
0130 }
0131 
0132 static inline void reqsk_put(struct request_sock *req)
0133 {
0134     if (refcount_dec_and_test(&req->rsk_refcnt))
0135         reqsk_free(req);
0136 }
0137 
0138 /*
0139  * For a TCP Fast Open listener -
0140  *  lock - protects the access to all the reqsk, which is co-owned by
0141  *      the listener and the child socket.
0142  *  qlen - pending TFO requests (still in TCP_SYN_RECV).
0143  *  max_qlen - max TFO reqs allowed before TFO is disabled.
0144  *
0145  *  XXX (TFO) - ideally these fields can be made as part of "listen_sock"
0146  *  structure above. But there is some implementation difficulty due to
0147  *  listen_sock being part of request_sock_queue hence will be freed when
0148  *  a listener is stopped. But TFO related fields may continue to be
0149  *  accessed even after a listener is closed, until its sk_refcnt drops
0150  *  to 0 implying no more outstanding TFO reqs. One solution is to keep
0151  *  listen_opt around until sk_refcnt drops to 0. But there is some other
0152  *  complexity that needs to be resolved. E.g., a listener can be disabled
0153  *  temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
0154  */
0155 struct fastopen_queue {
0156     struct request_sock *rskq_rst_head; /* Keep track of past TFO */
0157     struct request_sock *rskq_rst_tail; /* requests that caused RST.
0158                          * This is part of the defense
0159                          * against spoofing attack.
0160                          */
0161     spinlock_t  lock;
0162     int     qlen;       /* # of pending (TCP_SYN_RECV) reqs */
0163     int     max_qlen;   /* != 0 iff TFO is currently enabled */
0164 
0165     struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
0166 };
0167 
0168 /** struct request_sock_queue - queue of request_socks
0169  *
0170  * @rskq_accept_head - FIFO head of established children
0171  * @rskq_accept_tail - FIFO tail of established children
0172  * @rskq_defer_accept - User waits for some data after accept()
0173  *
0174  */
0175 struct request_sock_queue {
0176     spinlock_t      rskq_lock;
0177     u8          rskq_defer_accept;
0178 
0179     u32         synflood_warned;
0180     atomic_t        qlen;
0181     atomic_t        young;
0182 
0183     struct request_sock *rskq_accept_head;
0184     struct request_sock *rskq_accept_tail;
0185     struct fastopen_queue   fastopenq;  /* Check max_qlen != 0 to determine
0186                          * if TFO is enabled.
0187                          */
0188 };
0189 
0190 void reqsk_queue_alloc(struct request_sock_queue *queue);
0191 
0192 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
0193                bool reset);
0194 
0195 static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
0196 {
0197     return READ_ONCE(queue->rskq_accept_head) == NULL;
0198 }
0199 
0200 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
0201                               struct sock *parent)
0202 {
0203     struct request_sock *req;
0204 
0205     spin_lock_bh(&queue->rskq_lock);
0206     req = queue->rskq_accept_head;
0207     if (req) {
0208         sk_acceptq_removed(parent);
0209         WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
0210         if (queue->rskq_accept_head == NULL)
0211             queue->rskq_accept_tail = NULL;
0212     }
0213     spin_unlock_bh(&queue->rskq_lock);
0214     return req;
0215 }
0216 
0217 static inline void reqsk_queue_removed(struct request_sock_queue *queue,
0218                        const struct request_sock *req)
0219 {
0220     if (req->num_timeout == 0)
0221         atomic_dec(&queue->young);
0222     atomic_dec(&queue->qlen);
0223 }
0224 
0225 static inline void reqsk_queue_added(struct request_sock_queue *queue)
0226 {
0227     atomic_inc(&queue->young);
0228     atomic_inc(&queue->qlen);
0229 }
0230 
0231 static inline int reqsk_queue_len(const struct request_sock_queue *queue)
0232 {
0233     return atomic_read(&queue->qlen);
0234 }
0235 
0236 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
0237 {
0238     return atomic_read(&queue->young);
0239 }
0240 
0241 #endif /* _REQUEST_SOCK_H */