Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/kernel.h>
0003 #include <linux/tcp.h>
0004 #include <linux/rcupdate.h>
0005 #include <net/tcp.h>
0006 
0007 void tcp_fastopen_init_key_once(struct net *net)
0008 {
0009     u8 key[TCP_FASTOPEN_KEY_LENGTH];
0010     struct tcp_fastopen_context *ctxt;
0011 
0012     rcu_read_lock();
0013     ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
0014     if (ctxt) {
0015         rcu_read_unlock();
0016         return;
0017     }
0018     rcu_read_unlock();
0019 
0020     /* tcp_fastopen_reset_cipher publishes the new context
0021      * atomically, so we allow this race happening here.
0022      *
0023      * All call sites of tcp_fastopen_cookie_gen also check
0024      * for a valid cookie, so this is an acceptable risk.
0025      */
0026     get_random_bytes(key, sizeof(key));
0027     tcp_fastopen_reset_cipher(net, NULL, key, NULL);
0028 }
0029 
0030 static void tcp_fastopen_ctx_free(struct rcu_head *head)
0031 {
0032     struct tcp_fastopen_context *ctx =
0033         container_of(head, struct tcp_fastopen_context, rcu);
0034 
0035     kfree_sensitive(ctx);
0036 }
0037 
0038 void tcp_fastopen_destroy_cipher(struct sock *sk)
0039 {
0040     struct tcp_fastopen_context *ctx;
0041 
0042     ctx = rcu_dereference_protected(
0043             inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
0044     if (ctx)
0045         call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
0046 }
0047 
0048 void tcp_fastopen_ctx_destroy(struct net *net)
0049 {
0050     struct tcp_fastopen_context *ctxt;
0051 
0052     ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
0053 
0054     if (ctxt)
0055         call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
0056 }
0057 
0058 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
0059                   void *primary_key, void *backup_key)
0060 {
0061     struct tcp_fastopen_context *ctx, *octx;
0062     struct fastopen_queue *q;
0063     int err = 0;
0064 
0065     ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
0066     if (!ctx) {
0067         err = -ENOMEM;
0068         goto out;
0069     }
0070 
0071     ctx->key[0].key[0] = get_unaligned_le64(primary_key);
0072     ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
0073     if (backup_key) {
0074         ctx->key[1].key[0] = get_unaligned_le64(backup_key);
0075         ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
0076         ctx->num = 2;
0077     } else {
0078         ctx->num = 1;
0079     }
0080 
0081     if (sk) {
0082         q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
0083         octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
0084     } else {
0085         octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
0086     }
0087 
0088     if (octx)
0089         call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
0090 out:
0091     return err;
0092 }
0093 
0094 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
0095                 u64 *key)
0096 {
0097     struct tcp_fastopen_context *ctx;
0098     int n_keys = 0, i;
0099 
0100     rcu_read_lock();
0101     if (icsk)
0102         ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
0103     else
0104         ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
0105     if (ctx) {
0106         n_keys = tcp_fastopen_context_len(ctx);
0107         for (i = 0; i < n_keys; i++) {
0108             put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
0109             put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
0110         }
0111     }
0112     rcu_read_unlock();
0113 
0114     return n_keys;
0115 }
0116 
0117 static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
0118                          struct sk_buff *syn,
0119                          const siphash_key_t *key,
0120                          struct tcp_fastopen_cookie *foc)
0121 {
0122     BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
0123 
0124     if (req->rsk_ops->family == AF_INET) {
0125         const struct iphdr *iph = ip_hdr(syn);
0126 
0127         foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
0128                       sizeof(iph->saddr) +
0129                       sizeof(iph->daddr),
0130                       key));
0131         foc->len = TCP_FASTOPEN_COOKIE_SIZE;
0132         return true;
0133     }
0134 #if IS_ENABLED(CONFIG_IPV6)
0135     if (req->rsk_ops->family == AF_INET6) {
0136         const struct ipv6hdr *ip6h = ipv6_hdr(syn);
0137 
0138         foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
0139                       sizeof(ip6h->saddr) +
0140                       sizeof(ip6h->daddr),
0141                       key));
0142         foc->len = TCP_FASTOPEN_COOKIE_SIZE;
0143         return true;
0144     }
0145 #endif
0146     return false;
0147 }
0148 
0149 /* Generate the fastopen cookie by applying SipHash to both the source and
0150  * destination addresses.
0151  */
0152 static void tcp_fastopen_cookie_gen(struct sock *sk,
0153                     struct request_sock *req,
0154                     struct sk_buff *syn,
0155                     struct tcp_fastopen_cookie *foc)
0156 {
0157     struct tcp_fastopen_context *ctx;
0158 
0159     rcu_read_lock();
0160     ctx = tcp_fastopen_get_ctx(sk);
0161     if (ctx)
0162         __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
0163     rcu_read_unlock();
0164 }
0165 
0166 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
0167  * queue this additional data / FIN.
0168  */
0169 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
0170 {
0171     struct tcp_sock *tp = tcp_sk(sk);
0172 
0173     if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
0174         return;
0175 
0176     skb = skb_clone(skb, GFP_ATOMIC);
0177     if (!skb)
0178         return;
0179 
0180     skb_dst_drop(skb);
0181     /* segs_in has been initialized to 1 in tcp_create_openreq_child().
0182      * Hence, reset segs_in to 0 before calling tcp_segs_in()
0183      * to avoid double counting.  Also, tcp_segs_in() expects
0184      * skb->len to include the tcp_hdrlen.  Hence, it should
0185      * be called before __skb_pull().
0186      */
0187     tp->segs_in = 0;
0188     tcp_segs_in(tp, skb);
0189     __skb_pull(skb, tcp_hdrlen(skb));
0190     sk_forced_mem_schedule(sk, skb->truesize);
0191     skb_set_owner_r(skb, sk);
0192 
0193     TCP_SKB_CB(skb)->seq++;
0194     TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
0195 
0196     tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
0197     __skb_queue_tail(&sk->sk_receive_queue, skb);
0198     tp->syn_data_acked = 1;
0199 
0200     /* u64_stats_update_begin(&tp->syncp) not needed here,
0201      * as we certainly are not changing upper 32bit value (0)
0202      */
0203     tp->bytes_received = skb->len;
0204 
0205     if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
0206         tcp_fin(sk);
0207 }
0208 
0209 /* returns 0 - no key match, 1 for primary, 2 for backup */
0210 static int tcp_fastopen_cookie_gen_check(struct sock *sk,
0211                      struct request_sock *req,
0212                      struct sk_buff *syn,
0213                      struct tcp_fastopen_cookie *orig,
0214                      struct tcp_fastopen_cookie *valid_foc)
0215 {
0216     struct tcp_fastopen_cookie search_foc = { .len = -1 };
0217     struct tcp_fastopen_cookie *foc = valid_foc;
0218     struct tcp_fastopen_context *ctx;
0219     int i, ret = 0;
0220 
0221     rcu_read_lock();
0222     ctx = tcp_fastopen_get_ctx(sk);
0223     if (!ctx)
0224         goto out;
0225     for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
0226         __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
0227         if (tcp_fastopen_cookie_match(foc, orig)) {
0228             ret = i + 1;
0229             goto out;
0230         }
0231         foc = &search_foc;
0232     }
0233 out:
0234     rcu_read_unlock();
0235     return ret;
0236 }
0237 
0238 static struct sock *tcp_fastopen_create_child(struct sock *sk,
0239                           struct sk_buff *skb,
0240                           struct request_sock *req)
0241 {
0242     struct tcp_sock *tp;
0243     struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
0244     struct sock *child;
0245     bool own_req;
0246 
0247     child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
0248                              NULL, &own_req);
0249     if (!child)
0250         return NULL;
0251 
0252     spin_lock(&queue->fastopenq.lock);
0253     queue->fastopenq.qlen++;
0254     spin_unlock(&queue->fastopenq.lock);
0255 
0256     /* Initialize the child socket. Have to fix some values to take
0257      * into account the child is a Fast Open socket and is created
0258      * only out of the bits carried in the SYN packet.
0259      */
0260     tp = tcp_sk(child);
0261 
0262     rcu_assign_pointer(tp->fastopen_rsk, req);
0263     tcp_rsk(req)->tfo_listener = true;
0264 
0265     /* RFC1323: The window in SYN & SYN/ACK segments is never
0266      * scaled. So correct it appropriately.
0267      */
0268     tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
0269     tp->max_window = tp->snd_wnd;
0270 
0271     /* Activate the retrans timer so that SYNACK can be retransmitted.
0272      * The request socket is not added to the ehash
0273      * because it's been added to the accept queue directly.
0274      */
0275     inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
0276                   TCP_TIMEOUT_INIT, TCP_RTO_MAX);
0277 
0278     refcount_set(&req->rsk_refcnt, 2);
0279 
0280     /* Now finish processing the fastopen child socket. */
0281     tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
0282 
0283     tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
0284 
0285     tcp_fastopen_add_skb(child, skb);
0286 
0287     tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
0288     tp->rcv_wup = tp->rcv_nxt;
0289     /* tcp_conn_request() is sending the SYNACK,
0290      * and queues the child into listener accept queue.
0291      */
0292     return child;
0293 }
0294 
0295 static bool tcp_fastopen_queue_check(struct sock *sk)
0296 {
0297     struct fastopen_queue *fastopenq;
0298 
0299     /* Make sure the listener has enabled fastopen, and we don't
0300      * exceed the max # of pending TFO requests allowed before trying
0301      * to validating the cookie in order to avoid burning CPU cycles
0302      * unnecessarily.
0303      *
0304      * XXX (TFO) - The implication of checking the max_qlen before
0305      * processing a cookie request is that clients can't differentiate
0306      * between qlen overflow causing Fast Open to be disabled
0307      * temporarily vs a server not supporting Fast Open at all.
0308      */
0309     fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
0310     if (fastopenq->max_qlen == 0)
0311         return false;
0312 
0313     if (fastopenq->qlen >= fastopenq->max_qlen) {
0314         struct request_sock *req1;
0315         spin_lock(&fastopenq->lock);
0316         req1 = fastopenq->rskq_rst_head;
0317         if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
0318             __NET_INC_STATS(sock_net(sk),
0319                     LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
0320             spin_unlock(&fastopenq->lock);
0321             return false;
0322         }
0323         fastopenq->rskq_rst_head = req1->dl_next;
0324         fastopenq->qlen--;
0325         spin_unlock(&fastopenq->lock);
0326         reqsk_put(req1);
0327     }
0328     return true;
0329 }
0330 
0331 static bool tcp_fastopen_no_cookie(const struct sock *sk,
0332                    const struct dst_entry *dst,
0333                    int flag)
0334 {
0335     return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
0336            tcp_sk(sk)->fastopen_no_cookie ||
0337            (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
0338 }
0339 
0340 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
0341  * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
0342  * cookie request (foc->len == 0).
0343  */
0344 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
0345                   struct request_sock *req,
0346                   struct tcp_fastopen_cookie *foc,
0347                   const struct dst_entry *dst)
0348 {
0349     bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
0350     int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
0351     struct tcp_fastopen_cookie valid_foc = { .len = -1 };
0352     struct sock *child;
0353     int ret = 0;
0354 
0355     if (foc->len == 0) /* Client requests a cookie */
0356         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
0357 
0358     if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
0359           (syn_data || foc->len >= 0) &&
0360           tcp_fastopen_queue_check(sk))) {
0361         foc->len = -1;
0362         return NULL;
0363     }
0364 
0365     if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
0366         goto fastopen;
0367 
0368     if (foc->len == 0) {
0369         /* Client requests a cookie. */
0370         tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
0371     } else if (foc->len > 0) {
0372         ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
0373                             &valid_foc);
0374         if (!ret) {
0375             NET_INC_STATS(sock_net(sk),
0376                       LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
0377         } else {
0378             /* Cookie is valid. Create a (full) child socket to
0379              * accept the data in SYN before returning a SYN-ACK to
0380              * ack the data. If we fail to create the socket, fall
0381              * back and ack the ISN only but includes the same
0382              * cookie.
0383              *
0384              * Note: Data-less SYN with valid cookie is allowed to
0385              * send data in SYN_RECV state.
0386              */
0387 fastopen:
0388             child = tcp_fastopen_create_child(sk, skb, req);
0389             if (child) {
0390                 if (ret == 2) {
0391                     valid_foc.exp = foc->exp;
0392                     *foc = valid_foc;
0393                     NET_INC_STATS(sock_net(sk),
0394                               LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
0395                 } else {
0396                     foc->len = -1;
0397                 }
0398                 NET_INC_STATS(sock_net(sk),
0399                           LINUX_MIB_TCPFASTOPENPASSIVE);
0400                 return child;
0401             }
0402             NET_INC_STATS(sock_net(sk),
0403                       LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
0404         }
0405     }
0406     valid_foc.exp = foc->exp;
0407     *foc = valid_foc;
0408     return NULL;
0409 }
0410 
0411 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
0412                    struct tcp_fastopen_cookie *cookie)
0413 {
0414     const struct dst_entry *dst;
0415 
0416     tcp_fastopen_cache_get(sk, mss, cookie);
0417 
0418     /* Firewall blackhole issue check */
0419     if (tcp_fastopen_active_should_disable(sk)) {
0420         cookie->len = -1;
0421         return false;
0422     }
0423 
0424     dst = __sk_dst_get(sk);
0425 
0426     if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
0427         cookie->len = -1;
0428         return true;
0429     }
0430     if (cookie->len > 0)
0431         return true;
0432     tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
0433     return false;
0434 }
0435 
0436 /* This function checks if we want to defer sending SYN until the first
0437  * write().  We defer under the following conditions:
0438  * 1. fastopen_connect sockopt is set
0439  * 2. we have a valid cookie
0440  * Return value: return true if we want to defer until application writes data
0441  *               return false if we want to send out SYN immediately
0442  */
0443 bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
0444 {
0445     struct tcp_fastopen_cookie cookie = { .len = 0 };
0446     struct tcp_sock *tp = tcp_sk(sk);
0447     u16 mss;
0448 
0449     if (tp->fastopen_connect && !tp->fastopen_req) {
0450         if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
0451             inet_sk(sk)->defer_connect = 1;
0452             return true;
0453         }
0454 
0455         /* Alloc fastopen_req in order for FO option to be included
0456          * in SYN
0457          */
0458         tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
0459                        sk->sk_allocation);
0460         if (tp->fastopen_req)
0461             tp->fastopen_req->cookie = cookie;
0462         else
0463             *err = -ENOBUFS;
0464     }
0465     return false;
0466 }
0467 EXPORT_SYMBOL(tcp_fastopen_defer_connect);
0468 
0469 /*
0470  * The following code block is to deal with middle box issues with TFO:
0471  * Middlebox firewall issues can potentially cause server's data being
0472  * blackholed after a successful 3WHS using TFO.
0473  * The proposed solution is to disable active TFO globally under the
0474  * following circumstances:
0475  *   1. client side TFO socket receives out of order FIN
0476  *   2. client side TFO socket receives out of order RST
0477  *   3. client side TFO socket has timed out three times consecutively during
0478  *      or after handshake
0479  * We disable active side TFO globally for 1hr at first. Then if it
0480  * happens again, we disable it for 2h, then 4h, 8h, ...
0481  * And we reset the timeout back to 1hr when we see a successful active
0482  * TFO connection with data exchanges.
0483  */
0484 
0485 /* Disable active TFO and record current jiffies and
0486  * tfo_active_disable_times
0487  */
0488 void tcp_fastopen_active_disable(struct sock *sk)
0489 {
0490     struct net *net = sock_net(sk);
0491 
0492     if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
0493         return;
0494 
0495     /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
0496     WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
0497 
0498     /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
0499      * We want net->ipv4.tfo_active_disable_stamp to be updated first.
0500      */
0501     smp_mb__before_atomic();
0502     atomic_inc(&net->ipv4.tfo_active_disable_times);
0503 
0504     NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
0505 }
0506 
0507 /* Calculate timeout for tfo active disable
0508  * Return true if we are still in the active TFO disable period
0509  * Return false if timeout already expired and we should use active TFO
0510  */
0511 bool tcp_fastopen_active_should_disable(struct sock *sk)
0512 {
0513     unsigned int tfo_bh_timeout =
0514         READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
0515     unsigned long timeout;
0516     int tfo_da_times;
0517     int multiplier;
0518 
0519     if (!tfo_bh_timeout)
0520         return false;
0521 
0522     tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
0523     if (!tfo_da_times)
0524         return false;
0525 
0526     /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
0527     smp_rmb();
0528 
0529     /* Limit timeout to max: 2^6 * initial timeout */
0530     multiplier = 1 << min(tfo_da_times - 1, 6);
0531 
0532     /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
0533     timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
0534           multiplier * tfo_bh_timeout * HZ;
0535     if (time_before(jiffies, timeout))
0536         return true;
0537 
0538     /* Mark check bit so we can check for successful active TFO
0539      * condition and reset tfo_active_disable_times
0540      */
0541     tcp_sk(sk)->syn_fastopen_ch = 1;
0542     return false;
0543 }
0544 
0545 /* Disable active TFO if FIN is the only packet in the ofo queue
0546  * and no data is received.
0547  * Also check if we can reset tfo_active_disable_times if data is
0548  * received successfully on a marked active TFO sockets opened on
0549  * a non-loopback interface
0550  */
0551 void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
0552 {
0553     struct tcp_sock *tp = tcp_sk(sk);
0554     struct dst_entry *dst;
0555     struct sk_buff *skb;
0556 
0557     if (!tp->syn_fastopen)
0558         return;
0559 
0560     if (!tp->data_segs_in) {
0561         skb = skb_rb_first(&tp->out_of_order_queue);
0562         if (skb && !skb_rb_next(skb)) {
0563             if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
0564                 tcp_fastopen_active_disable(sk);
0565                 return;
0566             }
0567         }
0568     } else if (tp->syn_fastopen_ch &&
0569            atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
0570         dst = sk_dst_get(sk);
0571         if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
0572             atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
0573         dst_release(dst);
0574     }
0575 }
0576 
0577 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
0578 {
0579     u32 timeouts = inet_csk(sk)->icsk_retransmits;
0580     struct tcp_sock *tp = tcp_sk(sk);
0581 
0582     /* Broken middle-boxes may black-hole Fast Open connection during or
0583      * even after the handshake. Be extremely conservative and pause
0584      * Fast Open globally after hitting the third consecutive timeout or
0585      * exceeding the configured timeout limit.
0586      */
0587     if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
0588         (timeouts == 2 || (timeouts < 2 && expired))) {
0589         tcp_fastopen_active_disable(sk);
0590         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
0591     }
0592 }