Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Multipath TCP
0003  *
0004  * Copyright (c) 2017 - 2019, Intel Corporation.
0005  */
0006 
0007 #define pr_fmt(fmt) "MPTCP: " fmt
0008 
0009 #include <linux/kernel.h>
0010 #include <crypto/sha2.h>
0011 #include <net/tcp.h>
0012 #include <net/mptcp.h>
0013 #include "protocol.h"
0014 #include "mib.h"
0015 
0016 #include <trace/events/mptcp.h>
0017 
0018 static bool mptcp_cap_flag_sha256(u8 flags)
0019 {
0020     return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256;
0021 }
0022 
0023 static void mptcp_parse_option(const struct sk_buff *skb,
0024                    const unsigned char *ptr, int opsize,
0025                    struct mptcp_options_received *mp_opt)
0026 {
0027     u8 subtype = *ptr >> 4;
0028     int expected_opsize;
0029     u8 version;
0030     u8 flags;
0031     u8 i;
0032 
0033     switch (subtype) {
0034     case MPTCPOPT_MP_CAPABLE:
0035         /* strict size checking */
0036         if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
0037             if (skb->len > tcp_hdr(skb)->doff << 2)
0038                 expected_opsize = TCPOLEN_MPTCP_MPC_ACK_DATA;
0039             else
0040                 expected_opsize = TCPOLEN_MPTCP_MPC_ACK;
0041         } else {
0042             if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)
0043                 expected_opsize = TCPOLEN_MPTCP_MPC_SYNACK;
0044             else
0045                 expected_opsize = TCPOLEN_MPTCP_MPC_SYN;
0046         }
0047 
0048         /* Cfr RFC 8684 Section 3.3.0:
0049          * If a checksum is present but its use had
0050          * not been negotiated in the MP_CAPABLE handshake, the receiver MUST
0051          * close the subflow with a RST, as it is not behaving as negotiated.
0052          * If a checksum is not present when its use has been negotiated, the
0053          * receiver MUST close the subflow with a RST, as it is considered
0054          * broken
0055          * We parse even option with mismatching csum presence, so that
0056          * later in subflow_data_ready we can trigger the reset.
0057          */
0058         if (opsize != expected_opsize &&
0059             (expected_opsize != TCPOLEN_MPTCP_MPC_ACK_DATA ||
0060              opsize != TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM))
0061             break;
0062 
0063         /* try to be gentle vs future versions on the initial syn */
0064         version = *ptr++ & MPTCP_VERSION_MASK;
0065         if (opsize != TCPOLEN_MPTCP_MPC_SYN) {
0066             if (version != MPTCP_SUPPORTED_VERSION)
0067                 break;
0068         } else if (version < MPTCP_SUPPORTED_VERSION) {
0069             break;
0070         }
0071 
0072         flags = *ptr++;
0073         if (!mptcp_cap_flag_sha256(flags) ||
0074             (flags & MPTCP_CAP_EXTENSIBILITY))
0075             break;
0076 
0077         /* RFC 6824, Section 3.1:
0078          * "For the Checksum Required bit (labeled "A"), if either
0079          * host requires the use of checksums, checksums MUST be used.
0080          * In other words, the only way for checksums not to be used
0081          * is if both hosts in their SYNs set A=0."
0082          */
0083         if (flags & MPTCP_CAP_CHECKSUM_REQD)
0084             mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
0085 
0086         mp_opt->deny_join_id0 = !!(flags & MPTCP_CAP_DENY_JOIN_ID0);
0087 
0088         mp_opt->suboptions |= OPTIONS_MPTCP_MPC;
0089         if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) {
0090             mp_opt->sndr_key = get_unaligned_be64(ptr);
0091             ptr += 8;
0092         }
0093         if (opsize >= TCPOLEN_MPTCP_MPC_ACK) {
0094             mp_opt->rcvr_key = get_unaligned_be64(ptr);
0095             ptr += 8;
0096         }
0097         if (opsize >= TCPOLEN_MPTCP_MPC_ACK_DATA) {
0098             /* Section 3.1.:
0099              * "the data parameters in a MP_CAPABLE are semantically
0100              * equivalent to those in a DSS option and can be used
0101              * interchangeably."
0102              */
0103             mp_opt->suboptions |= OPTION_MPTCP_DSS;
0104             mp_opt->use_map = 1;
0105             mp_opt->mpc_map = 1;
0106             mp_opt->data_len = get_unaligned_be16(ptr);
0107             ptr += 2;
0108         }
0109         if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) {
0110             mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
0111             mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
0112             ptr += 2;
0113         }
0114         pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
0115              version, flags, opsize, mp_opt->sndr_key,
0116              mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
0117         break;
0118 
0119     case MPTCPOPT_MP_JOIN:
0120         mp_opt->suboptions |= OPTIONS_MPTCP_MPJ;
0121         if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
0122             mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
0123             mp_opt->join_id = *ptr++;
0124             mp_opt->token = get_unaligned_be32(ptr);
0125             ptr += 4;
0126             mp_opt->nonce = get_unaligned_be32(ptr);
0127             ptr += 4;
0128             pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
0129                  mp_opt->backup, mp_opt->join_id,
0130                  mp_opt->token, mp_opt->nonce);
0131         } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
0132             mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
0133             mp_opt->join_id = *ptr++;
0134             mp_opt->thmac = get_unaligned_be64(ptr);
0135             ptr += 8;
0136             mp_opt->nonce = get_unaligned_be32(ptr);
0137             ptr += 4;
0138             pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
0139                  mp_opt->backup, mp_opt->join_id,
0140                  mp_opt->thmac, mp_opt->nonce);
0141         } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
0142             ptr += 2;
0143             memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
0144             pr_debug("MP_JOIN hmac");
0145         } else {
0146             mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ;
0147         }
0148         break;
0149 
0150     case MPTCPOPT_DSS:
0151         pr_debug("DSS");
0152         ptr++;
0153 
0154         /* we must clear 'mpc_map' be able to detect MP_CAPABLE
0155          * map vs DSS map in mptcp_incoming_options(), and reconstruct
0156          * map info accordingly
0157          */
0158         mp_opt->mpc_map = 0;
0159         flags = (*ptr++) & MPTCP_DSS_FLAG_MASK;
0160         mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0;
0161         mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0;
0162         mp_opt->use_map = (flags & MPTCP_DSS_HAS_MAP) != 0;
0163         mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
0164         mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
0165 
0166         pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
0167              mp_opt->data_fin, mp_opt->dsn64,
0168              mp_opt->use_map, mp_opt->ack64,
0169              mp_opt->use_ack);
0170 
0171         expected_opsize = TCPOLEN_MPTCP_DSS_BASE;
0172 
0173         if (mp_opt->use_ack) {
0174             if (mp_opt->ack64)
0175                 expected_opsize += TCPOLEN_MPTCP_DSS_ACK64;
0176             else
0177                 expected_opsize += TCPOLEN_MPTCP_DSS_ACK32;
0178         }
0179 
0180         if (mp_opt->use_map) {
0181             if (mp_opt->dsn64)
0182                 expected_opsize += TCPOLEN_MPTCP_DSS_MAP64;
0183             else
0184                 expected_opsize += TCPOLEN_MPTCP_DSS_MAP32;
0185         }
0186 
0187         /* Always parse any csum presence combination, we will enforce
0188          * RFC 8684 Section 3.3.0 checks later in subflow_data_ready
0189          */
0190         if (opsize != expected_opsize &&
0191             opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM)
0192             break;
0193 
0194         mp_opt->suboptions |= OPTION_MPTCP_DSS;
0195         if (mp_opt->use_ack) {
0196             if (mp_opt->ack64) {
0197                 mp_opt->data_ack = get_unaligned_be64(ptr);
0198                 ptr += 8;
0199             } else {
0200                 mp_opt->data_ack = get_unaligned_be32(ptr);
0201                 ptr += 4;
0202             }
0203 
0204             pr_debug("data_ack=%llu", mp_opt->data_ack);
0205         }
0206 
0207         if (mp_opt->use_map) {
0208             if (mp_opt->dsn64) {
0209                 mp_opt->data_seq = get_unaligned_be64(ptr);
0210                 ptr += 8;
0211             } else {
0212                 mp_opt->data_seq = get_unaligned_be32(ptr);
0213                 ptr += 4;
0214             }
0215 
0216             mp_opt->subflow_seq = get_unaligned_be32(ptr);
0217             ptr += 4;
0218 
0219             mp_opt->data_len = get_unaligned_be16(ptr);
0220             ptr += 2;
0221 
0222             if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) {
0223                 mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
0224                 mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
0225                 ptr += 2;
0226             }
0227 
0228             pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
0229                  mp_opt->data_seq, mp_opt->subflow_seq,
0230                  mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
0231                  mp_opt->csum);
0232         }
0233 
0234         break;
0235 
0236     case MPTCPOPT_ADD_ADDR:
0237         mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO;
0238         if (!mp_opt->echo) {
0239             if (opsize == TCPOLEN_MPTCP_ADD_ADDR ||
0240                 opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT)
0241                 mp_opt->addr.family = AF_INET;
0242 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
0243             else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 ||
0244                  opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT)
0245                 mp_opt->addr.family = AF_INET6;
0246 #endif
0247             else
0248                 break;
0249         } else {
0250             if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE ||
0251                 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT)
0252                 mp_opt->addr.family = AF_INET;
0253 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
0254             else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE ||
0255                  opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT)
0256                 mp_opt->addr.family = AF_INET6;
0257 #endif
0258             else
0259                 break;
0260         }
0261 
0262         mp_opt->suboptions |= OPTION_MPTCP_ADD_ADDR;
0263         mp_opt->addr.id = *ptr++;
0264         mp_opt->addr.port = 0;
0265         mp_opt->ahmac = 0;
0266         if (mp_opt->addr.family == AF_INET) {
0267             memcpy((u8 *)&mp_opt->addr.addr.s_addr, (u8 *)ptr, 4);
0268             ptr += 4;
0269             if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT ||
0270                 opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) {
0271                 mp_opt->addr.port = htons(get_unaligned_be16(ptr));
0272                 ptr += 2;
0273             }
0274         }
0275 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
0276         else {
0277             memcpy(mp_opt->addr.addr6.s6_addr, (u8 *)ptr, 16);
0278             ptr += 16;
0279             if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT ||
0280                 opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) {
0281                 mp_opt->addr.port = htons(get_unaligned_be16(ptr));
0282                 ptr += 2;
0283             }
0284         }
0285 #endif
0286         if (!mp_opt->echo) {
0287             mp_opt->ahmac = get_unaligned_be64(ptr);
0288             ptr += 8;
0289         }
0290         pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
0291              (mp_opt->addr.family == AF_INET6) ? "6" : "",
0292              mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
0293         break;
0294 
0295     case MPTCPOPT_RM_ADDR:
0296         if (opsize < TCPOLEN_MPTCP_RM_ADDR_BASE + 1 ||
0297             opsize > TCPOLEN_MPTCP_RM_ADDR_BASE + MPTCP_RM_IDS_MAX)
0298             break;
0299 
0300         ptr++;
0301 
0302         mp_opt->suboptions |= OPTION_MPTCP_RM_ADDR;
0303         mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
0304         for (i = 0; i < mp_opt->rm_list.nr; i++)
0305             mp_opt->rm_list.ids[i] = *ptr++;
0306         pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
0307         break;
0308 
0309     case MPTCPOPT_MP_PRIO:
0310         if (opsize != TCPOLEN_MPTCP_PRIO)
0311             break;
0312 
0313         mp_opt->suboptions |= OPTION_MPTCP_PRIO;
0314         mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
0315         pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
0316         break;
0317 
0318     case MPTCPOPT_MP_FASTCLOSE:
0319         if (opsize != TCPOLEN_MPTCP_FASTCLOSE)
0320             break;
0321 
0322         ptr += 2;
0323         mp_opt->rcvr_key = get_unaligned_be64(ptr);
0324         ptr += 8;
0325         mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
0326         pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
0327         break;
0328 
0329     case MPTCPOPT_RST:
0330         if (opsize != TCPOLEN_MPTCP_RST)
0331             break;
0332 
0333         if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
0334             break;
0335 
0336         mp_opt->suboptions |= OPTION_MPTCP_RST;
0337         flags = *ptr++;
0338         mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
0339         mp_opt->reset_reason = *ptr;
0340         pr_debug("MP_RST: transient=%u reason=%u",
0341              mp_opt->reset_transient, mp_opt->reset_reason);
0342         break;
0343 
0344     case MPTCPOPT_MP_FAIL:
0345         if (opsize != TCPOLEN_MPTCP_FAIL)
0346             break;
0347 
0348         ptr += 2;
0349         mp_opt->suboptions |= OPTION_MPTCP_FAIL;
0350         mp_opt->fail_seq = get_unaligned_be64(ptr);
0351         pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
0352         break;
0353 
0354     default:
0355         break;
0356     }
0357 }
0358 
0359 void mptcp_get_options(const struct sk_buff *skb,
0360                struct mptcp_options_received *mp_opt)
0361 {
0362     const struct tcphdr *th = tcp_hdr(skb);
0363     const unsigned char *ptr;
0364     int length;
0365 
0366     /* initialize option status */
0367     mp_opt->suboptions = 0;
0368 
0369     length = (th->doff * 4) - sizeof(struct tcphdr);
0370     ptr = (const unsigned char *)(th + 1);
0371 
0372     while (length > 0) {
0373         int opcode = *ptr++;
0374         int opsize;
0375 
0376         switch (opcode) {
0377         case TCPOPT_EOL:
0378             return;
0379         case TCPOPT_NOP:    /* Ref: RFC 793 section 3.1 */
0380             length--;
0381             continue;
0382         default:
0383             if (length < 2)
0384                 return;
0385             opsize = *ptr++;
0386             if (opsize < 2) /* "silly options" */
0387                 return;
0388             if (opsize > length)
0389                 return; /* don't parse partial options */
0390             if (opcode == TCPOPT_MPTCP)
0391                 mptcp_parse_option(skb, ptr, opsize, mp_opt);
0392             ptr += opsize - 2;
0393             length -= opsize;
0394         }
0395     }
0396 }
0397 
0398 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
0399                unsigned int *size, struct mptcp_out_options *opts)
0400 {
0401     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0402 
0403     /* we will use snd_isn to detect first pkt [re]transmission
0404      * in mptcp_established_options_mp()
0405      */
0406     subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
0407     if (subflow->request_mptcp) {
0408         opts->suboptions = OPTION_MPTCP_MPC_SYN;
0409         opts->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk));
0410         opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk));
0411         *size = TCPOLEN_MPTCP_MPC_SYN;
0412         return true;
0413     } else if (subflow->request_join) {
0414         pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
0415              subflow->local_nonce);
0416         opts->suboptions = OPTION_MPTCP_MPJ_SYN;
0417         opts->join_id = subflow->local_id;
0418         opts->token = subflow->remote_token;
0419         opts->nonce = subflow->local_nonce;
0420         opts->backup = subflow->request_bkup;
0421         *size = TCPOLEN_MPTCP_MPJ_SYN;
0422         return true;
0423     }
0424     return false;
0425 }
0426 
0427 static void clear_3rdack_retransmission(struct sock *sk)
0428 {
0429     struct inet_connection_sock *icsk = inet_csk(sk);
0430 
0431     sk_stop_timer(sk, &icsk->icsk_delack_timer);
0432     icsk->icsk_ack.timeout = 0;
0433     icsk->icsk_ack.ato = 0;
0434     icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER);
0435 }
0436 
0437 static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
0438                      bool snd_data_fin_enable,
0439                      unsigned int *size,
0440                      unsigned int remaining,
0441                      struct mptcp_out_options *opts)
0442 {
0443     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0444     struct mptcp_sock *msk = mptcp_sk(subflow->conn);
0445     struct mptcp_ext *mpext;
0446     unsigned int data_len;
0447     u8 len;
0448 
0449     /* When skb is not available, we better over-estimate the emitted
0450      * options len. A full DSS option (28 bytes) is longer than
0451      * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so
0452      * tell the caller to defer the estimate to
0453      * mptcp_established_options_dss(), which will reserve enough space.
0454      */
0455     if (!skb)
0456         return false;
0457 
0458     /* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */
0459     if (subflow->fully_established || snd_data_fin_enable ||
0460         subflow->snd_isn != TCP_SKB_CB(skb)->seq ||
0461         sk->sk_state != TCP_ESTABLISHED)
0462         return false;
0463 
0464     if (subflow->mp_capable) {
0465         mpext = mptcp_get_ext(skb);
0466         data_len = mpext ? mpext->data_len : 0;
0467 
0468         /* we will check ops->data_len in mptcp_write_options() to
0469          * discriminate between TCPOLEN_MPTCP_MPC_ACK_DATA and
0470          * TCPOLEN_MPTCP_MPC_ACK
0471          */
0472         opts->data_len = data_len;
0473         opts->suboptions = OPTION_MPTCP_MPC_ACK;
0474         opts->sndr_key = subflow->local_key;
0475         opts->rcvr_key = subflow->remote_key;
0476         opts->csum_reqd = READ_ONCE(msk->csum_enabled);
0477         opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk));
0478 
0479         /* Section 3.1.
0480          * The MP_CAPABLE option is carried on the SYN, SYN/ACK, and ACK
0481          * packets that start the first subflow of an MPTCP connection,
0482          * as well as the first packet that carries data
0483          */
0484         if (data_len > 0) {
0485             len = TCPOLEN_MPTCP_MPC_ACK_DATA;
0486             if (opts->csum_reqd) {
0487                 /* we need to propagate more info to csum the pseudo hdr */
0488                 opts->data_seq = mpext->data_seq;
0489                 opts->subflow_seq = mpext->subflow_seq;
0490                 opts->csum = mpext->csum;
0491                 len += TCPOLEN_MPTCP_DSS_CHECKSUM;
0492             }
0493             *size = ALIGN(len, 4);
0494         } else {
0495             *size = TCPOLEN_MPTCP_MPC_ACK;
0496         }
0497 
0498         pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
0499              subflow, subflow->local_key, subflow->remote_key,
0500              data_len);
0501 
0502         return true;
0503     } else if (subflow->mp_join) {
0504         opts->suboptions = OPTION_MPTCP_MPJ_ACK;
0505         memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
0506         *size = TCPOLEN_MPTCP_MPJ_ACK;
0507         pr_debug("subflow=%p", subflow);
0508 
0509         /* we can use the full delegate action helper only from BH context
0510          * If we are in process context - sk is flushing the backlog at
0511          * socket lock release time - just set the appropriate flag, will
0512          * be handled by the release callback
0513          */
0514         if (sock_owned_by_user(sk))
0515             set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status);
0516         else
0517             mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK);
0518         return true;
0519     }
0520     return false;
0521 }
0522 
0523 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
0524                  struct sk_buff *skb, struct mptcp_ext *ext)
0525 {
0526     /* The write_seq value has already been incremented, so the actual
0527      * sequence number for the DATA_FIN is one less.
0528      */
0529     u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1;
0530 
0531     if (!ext->use_map || !skb->len) {
0532         /* RFC6824 requires a DSS mapping with specific values
0533          * if DATA_FIN is set but no data payload is mapped
0534          */
0535         ext->data_fin = 1;
0536         ext->use_map = 1;
0537         ext->dsn64 = 1;
0538         ext->data_seq = data_fin_tx_seq;
0539         ext->subflow_seq = 0;
0540         ext->data_len = 1;
0541     } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
0542         /* If there's an existing DSS mapping and it is the
0543          * final mapping, DATA_FIN consumes 1 additional byte of
0544          * mapping space.
0545          */
0546         ext->data_fin = 1;
0547         ext->data_len++;
0548     }
0549 }
0550 
0551 static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
0552                       bool snd_data_fin_enable,
0553                       unsigned int *size,
0554                       unsigned int remaining,
0555                       struct mptcp_out_options *opts)
0556 {
0557     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0558     struct mptcp_sock *msk = mptcp_sk(subflow->conn);
0559     unsigned int dss_size = 0;
0560     struct mptcp_ext *mpext;
0561     unsigned int ack_size;
0562     bool ret = false;
0563     u64 ack_seq;
0564 
0565     opts->csum_reqd = READ_ONCE(msk->csum_enabled);
0566     mpext = skb ? mptcp_get_ext(skb) : NULL;
0567 
0568     if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
0569         unsigned int map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64;
0570 
0571         if (mpext) {
0572             if (opts->csum_reqd)
0573                 map_size += TCPOLEN_MPTCP_DSS_CHECKSUM;
0574 
0575             opts->ext_copy = *mpext;
0576         }
0577 
0578         remaining -= map_size;
0579         dss_size = map_size;
0580         if (skb && snd_data_fin_enable)
0581             mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
0582         opts->suboptions = OPTION_MPTCP_DSS;
0583         ret = true;
0584     }
0585 
0586     /* passive sockets msk will set the 'can_ack' after accept(), even
0587      * if the first subflow may have the already the remote key handy
0588      */
0589     opts->ext_copy.use_ack = 0;
0590     if (!READ_ONCE(msk->can_ack)) {
0591         *size = ALIGN(dss_size, 4);
0592         return ret;
0593     }
0594 
0595     ack_seq = READ_ONCE(msk->ack_seq);
0596     if (READ_ONCE(msk->use_64bit_ack)) {
0597         ack_size = TCPOLEN_MPTCP_DSS_ACK64;
0598         opts->ext_copy.data_ack = ack_seq;
0599         opts->ext_copy.ack64 = 1;
0600     } else {
0601         ack_size = TCPOLEN_MPTCP_DSS_ACK32;
0602         opts->ext_copy.data_ack32 = (uint32_t)ack_seq;
0603         opts->ext_copy.ack64 = 0;
0604     }
0605     opts->ext_copy.use_ack = 1;
0606     opts->suboptions = OPTION_MPTCP_DSS;
0607     WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
0608 
0609     /* Add kind/length/subtype/flag overhead if mapping is not populated */
0610     if (dss_size == 0)
0611         ack_size += TCPOLEN_MPTCP_DSS_BASE;
0612 
0613     dss_size += ack_size;
0614 
0615     *size = ALIGN(dss_size, 4);
0616     return true;
0617 }
0618 
0619 static u64 add_addr_generate_hmac(u64 key1, u64 key2,
0620                   struct mptcp_addr_info *addr)
0621 {
0622     u16 port = ntohs(addr->port);
0623     u8 hmac[SHA256_DIGEST_SIZE];
0624     u8 msg[19];
0625     int i = 0;
0626 
0627     msg[i++] = addr->id;
0628     if (addr->family == AF_INET) {
0629         memcpy(&msg[i], &addr->addr.s_addr, 4);
0630         i += 4;
0631     }
0632 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
0633     else if (addr->family == AF_INET6) {
0634         memcpy(&msg[i], &addr->addr6.s6_addr, 16);
0635         i += 16;
0636     }
0637 #endif
0638     msg[i++] = port >> 8;
0639     msg[i++] = port & 0xFF;
0640 
0641     mptcp_crypto_hmac_sha(key1, key2, msg, i, hmac);
0642 
0643     return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]);
0644 }
0645 
0646 static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *skb,
0647                            unsigned int *size,
0648                            unsigned int remaining,
0649                            struct mptcp_out_options *opts)
0650 {
0651     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0652     struct mptcp_sock *msk = mptcp_sk(subflow->conn);
0653     bool drop_other_suboptions = false;
0654     unsigned int opt_size = *size;
0655     bool echo;
0656     int len;
0657 
0658     /* add addr will strip the existing options, be sure to avoid breaking
0659      * MPC/MPJ handshakes
0660      */
0661     if (!mptcp_pm_should_add_signal(msk) ||
0662         (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) ||
0663         !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr,
0664             &echo, &drop_other_suboptions))
0665         return false;
0666 
0667     if (drop_other_suboptions)
0668         remaining += opt_size;
0669     len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
0670     if (remaining < len)
0671         return false;
0672 
0673     *size = len;
0674     if (drop_other_suboptions) {
0675         pr_debug("drop other suboptions");
0676         opts->suboptions = 0;
0677 
0678         /* note that e.g. DSS could have written into the memory
0679          * aliased by ahmac, we must reset the field here
0680          * to avoid appending the hmac even for ADD_ADDR echo
0681          * options
0682          */
0683         opts->ahmac = 0;
0684         *size -= opt_size;
0685     }
0686     opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
0687     if (!echo) {
0688         opts->ahmac = add_addr_generate_hmac(msk->local_key,
0689                              msk->remote_key,
0690                              &opts->addr);
0691     }
0692     pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
0693          opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
0694 
0695     return true;
0696 }
0697 
0698 static bool mptcp_established_options_rm_addr(struct sock *sk,
0699                           unsigned int *size,
0700                           unsigned int remaining,
0701                           struct mptcp_out_options *opts)
0702 {
0703     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0704     struct mptcp_sock *msk = mptcp_sk(subflow->conn);
0705     struct mptcp_rm_list rm_list;
0706     int i, len;
0707 
0708     if (!mptcp_pm_should_rm_signal(msk) ||
0709         !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_list)))
0710         return false;
0711 
0712     len = mptcp_rm_addr_len(&rm_list);
0713     if (len < 0)
0714         return false;
0715     if (remaining < len)
0716         return false;
0717 
0718     *size = len;
0719     opts->suboptions |= OPTION_MPTCP_RM_ADDR;
0720     opts->rm_list = rm_list;
0721 
0722     for (i = 0; i < opts->rm_list.nr; i++)
0723         pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
0724 
0725     return true;
0726 }
0727 
0728 static bool mptcp_established_options_mp_prio(struct sock *sk,
0729                           unsigned int *size,
0730                           unsigned int remaining,
0731                           struct mptcp_out_options *opts)
0732 {
0733     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0734 
0735     /* can't send MP_PRIO with MPC, as they share the same option space:
0736      * 'backup'. Also it makes no sense at all
0737      */
0738     if (!subflow->send_mp_prio || (opts->suboptions & OPTIONS_MPTCP_MPC))
0739         return false;
0740 
0741     /* account for the trailing 'nop' option */
0742     if (remaining < TCPOLEN_MPTCP_PRIO_ALIGN)
0743         return false;
0744 
0745     *size = TCPOLEN_MPTCP_PRIO_ALIGN;
0746     opts->suboptions |= OPTION_MPTCP_PRIO;
0747     opts->backup = subflow->request_bkup;
0748 
0749     pr_debug("prio=%d", opts->backup);
0750 
0751     return true;
0752 }
0753 
0754 static noinline bool mptcp_established_options_rst(struct sock *sk, struct sk_buff *skb,
0755                            unsigned int *size,
0756                            unsigned int remaining,
0757                            struct mptcp_out_options *opts)
0758 {
0759     const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0760 
0761     if (remaining < TCPOLEN_MPTCP_RST)
0762         return false;
0763 
0764     *size = TCPOLEN_MPTCP_RST;
0765     opts->suboptions |= OPTION_MPTCP_RST;
0766     opts->reset_transient = subflow->reset_transient;
0767     opts->reset_reason = subflow->reset_reason;
0768     MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPRSTTX);
0769 
0770     return true;
0771 }
0772 
0773 static bool mptcp_established_options_fastclose(struct sock *sk,
0774                         unsigned int *size,
0775                         unsigned int remaining,
0776                         struct mptcp_out_options *opts)
0777 {
0778     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0779     struct mptcp_sock *msk = mptcp_sk(subflow->conn);
0780 
0781     if (likely(!subflow->send_fastclose))
0782         return false;
0783 
0784     if (remaining < TCPOLEN_MPTCP_FASTCLOSE)
0785         return false;
0786 
0787     *size = TCPOLEN_MPTCP_FASTCLOSE;
0788     opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
0789     opts->rcvr_key = msk->remote_key;
0790 
0791     pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
0792     MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
0793     return true;
0794 }
0795 
0796 static bool mptcp_established_options_mp_fail(struct sock *sk,
0797                           unsigned int *size,
0798                           unsigned int remaining,
0799                           struct mptcp_out_options *opts)
0800 {
0801     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0802 
0803     if (likely(!subflow->send_mp_fail))
0804         return false;
0805 
0806     if (remaining < TCPOLEN_MPTCP_FAIL)
0807         return false;
0808 
0809     *size = TCPOLEN_MPTCP_FAIL;
0810     opts->suboptions |= OPTION_MPTCP_FAIL;
0811     opts->fail_seq = subflow->map_seq;
0812 
0813     pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
0814     MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
0815 
0816     return true;
0817 }
0818 
0819 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
0820                    unsigned int *size, unsigned int remaining,
0821                    struct mptcp_out_options *opts)
0822 {
0823     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
0824     struct mptcp_sock *msk = mptcp_sk(subflow->conn);
0825     unsigned int opt_size = 0;
0826     bool snd_data_fin;
0827     bool ret = false;
0828 
0829     opts->suboptions = 0;
0830 
0831     if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
0832         return false;
0833 
0834     if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
0835         if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) ||
0836             mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) {
0837             *size += opt_size;
0838             remaining -= opt_size;
0839         }
0840         /* MP_RST can be used with MP_FASTCLOSE and MP_FAIL if there is room */
0841         if (mptcp_established_options_rst(sk, skb, &opt_size, remaining, opts)) {
0842             *size += opt_size;
0843             remaining -= opt_size;
0844         }
0845         return true;
0846     }
0847 
0848     snd_data_fin = mptcp_data_fin_enabled(msk);
0849     if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts))
0850         ret = true;
0851     else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts)) {
0852         unsigned int mp_fail_size;
0853 
0854         ret = true;
0855         if (mptcp_established_options_mp_fail(sk, &mp_fail_size,
0856                               remaining - opt_size, opts)) {
0857             *size += opt_size + mp_fail_size;
0858             remaining -= opt_size - mp_fail_size;
0859             return true;
0860         }
0861     }
0862 
0863     /* we reserved enough space for the above options, and exceeding the
0864      * TCP option space would be fatal
0865      */
0866     if (WARN_ON_ONCE(opt_size > remaining))
0867         return false;
0868 
0869     *size += opt_size;
0870     remaining -= opt_size;
0871     if (mptcp_established_options_add_addr(sk, skb, &opt_size, remaining, opts)) {
0872         *size += opt_size;
0873         remaining -= opt_size;
0874         ret = true;
0875     } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) {
0876         *size += opt_size;
0877         remaining -= opt_size;
0878         ret = true;
0879     }
0880 
0881     if (mptcp_established_options_mp_prio(sk, &opt_size, remaining, opts)) {
0882         *size += opt_size;
0883         remaining -= opt_size;
0884         ret = true;
0885     }
0886 
0887     return ret;
0888 }
0889 
0890 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
0891               struct mptcp_out_options *opts)
0892 {
0893     struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
0894 
0895     if (subflow_req->mp_capable) {
0896         opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
0897         opts->sndr_key = subflow_req->local_key;
0898         opts->csum_reqd = subflow_req->csum_reqd;
0899         opts->allow_join_id0 = subflow_req->allow_join_id0;
0900         *size = TCPOLEN_MPTCP_MPC_SYNACK;
0901         pr_debug("subflow_req=%p, local_key=%llu",
0902              subflow_req, subflow_req->local_key);
0903         return true;
0904     } else if (subflow_req->mp_join) {
0905         opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
0906         opts->backup = subflow_req->backup;
0907         opts->join_id = subflow_req->local_id;
0908         opts->thmac = subflow_req->thmac;
0909         opts->nonce = subflow_req->local_nonce;
0910         pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
0911              subflow_req, opts->backup, opts->join_id,
0912              opts->thmac, opts->nonce);
0913         *size = TCPOLEN_MPTCP_MPJ_SYNACK;
0914         return true;
0915     }
0916     return false;
0917 }
0918 
0919 static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
0920                     struct mptcp_subflow_context *subflow,
0921                     struct sk_buff *skb,
0922                     struct mptcp_options_received *mp_opt)
0923 {
0924     /* here we can process OoO, in-window pkts, only in-sequence 4th ack
0925      * will make the subflow fully established
0926      */
0927     if (likely(subflow->fully_established)) {
0928         /* on passive sockets, check for 3rd ack retransmission
0929          * note that msk is always set by subflow_syn_recv_sock()
0930          * for mp_join subflows
0931          */
0932         if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 &&
0933             TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
0934             subflow->mp_join && (mp_opt->suboptions & OPTIONS_MPTCP_MPJ) &&
0935             !subflow->request_join)
0936             tcp_send_ack(ssk);
0937         goto fully_established;
0938     }
0939 
0940     /* we must process OoO packets before the first subflow is fully
0941      * established. OoO packets are instead a protocol violation
0942      * for MP_JOIN subflows as the peer must not send any data
0943      * before receiving the forth ack - cfr. RFC 8684 section 3.2.
0944      */
0945     if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
0946         if (subflow->mp_join)
0947             goto reset;
0948         return subflow->mp_capable;
0949     }
0950 
0951     if (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) ||
0952         ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo)) {
0953         /* subflows are fully established as soon as we get any
0954          * additional ack, including ADD_ADDR.
0955          */
0956         subflow->fully_established = 1;
0957         WRITE_ONCE(msk->fully_established, true);
0958         goto fully_established;
0959     }
0960 
0961     /* If the first established packet does not contain MP_CAPABLE + data
0962      * then fallback to TCP. Fallback scenarios requires a reset for
0963      * MP_JOIN subflows.
0964      */
0965     if (!(mp_opt->suboptions & OPTIONS_MPTCP_MPC)) {
0966         if (subflow->mp_join)
0967             goto reset;
0968         subflow->mp_capable = 0;
0969         pr_fallback(msk);
0970         mptcp_do_fallback(ssk);
0971         return false;
0972     }
0973 
0974     if (mp_opt->deny_join_id0)
0975         WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
0976 
0977     if (unlikely(!READ_ONCE(msk->pm.server_side)))
0978         pr_warn_once("bogus mpc option on established client sk");
0979     mptcp_subflow_fully_established(subflow, mp_opt);
0980 
0981 fully_established:
0982     /* if the subflow is not already linked into the conn_list, we can't
0983      * notify the PM: this subflow is still on the listener queue
0984      * and the PM possibly acquiring the subflow lock could race with
0985      * the listener close
0986      */
0987     if (likely(subflow->pm_notified) || list_empty(&subflow->node))
0988         return true;
0989 
0990     subflow->pm_notified = 1;
0991     if (subflow->mp_join) {
0992         clear_3rdack_retransmission(ssk);
0993         mptcp_pm_subflow_established(msk);
0994     } else {
0995         mptcp_pm_fully_established(msk, ssk, GFP_ATOMIC);
0996     }
0997     return true;
0998 
0999 reset:
1000     mptcp_subflow_reset(ssk);
1001     return false;
1002 }
1003 
1004 u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
1005 {
1006     u32 old_seq32, cur_seq32;
1007 
1008     old_seq32 = (u32)old_seq;
1009     cur_seq32 = (u32)cur_seq;
1010     cur_seq = (old_seq & GENMASK_ULL(63, 32)) + cur_seq32;
1011     if (unlikely(cur_seq32 < old_seq32 && before(old_seq32, cur_seq32)))
1012         return cur_seq + (1LL << 32);
1013 
1014     /* reverse wrap could happen, too */
1015     if (unlikely(cur_seq32 > old_seq32 && after(old_seq32, cur_seq32)))
1016         return cur_seq - (1LL << 32);
1017     return cur_seq;
1018 }
1019 
1020 static void ack_update_msk(struct mptcp_sock *msk,
1021                struct sock *ssk,
1022                struct mptcp_options_received *mp_opt)
1023 {
1024     u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt);
1025     struct sock *sk = (struct sock *)msk;
1026     u64 old_snd_una;
1027 
1028     mptcp_data_lock(sk);
1029 
1030     /* avoid ack expansion on update conflict, to reduce the risk of
1031      * wrongly expanding to a future ack sequence number, which is way
1032      * more dangerous than missing an ack
1033      */
1034     old_snd_una = msk->snd_una;
1035     new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
1036 
1037     /* ACK for data not even sent yet? Ignore.*/
1038     if (unlikely(after64(new_snd_una, snd_nxt)))
1039         new_snd_una = old_snd_una;
1040 
1041     new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
1042 
1043     if (after64(new_wnd_end, msk->wnd_end))
1044         msk->wnd_end = new_wnd_end;
1045 
1046     /* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
1047     if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
1048         __mptcp_check_push(sk, ssk);
1049 
1050     if (after64(new_snd_una, old_snd_una)) {
1051         msk->snd_una = new_snd_una;
1052         __mptcp_data_acked(sk);
1053     }
1054     mptcp_data_unlock(sk);
1055 
1056     trace_ack_update_msk(mp_opt->data_ack,
1057                  old_snd_una, new_snd_una,
1058                  new_wnd_end, msk->wnd_end);
1059 }
1060 
1061 bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
1062 {
1063     /* Skip if DATA_FIN was already received.
1064      * If updating simultaneously with the recvmsg loop, values
1065      * should match. If they mismatch, the peer is misbehaving and
1066      * we will prefer the most recent information.
1067      */
1068     if (READ_ONCE(msk->rcv_data_fin))
1069         return false;
1070 
1071     WRITE_ONCE(msk->rcv_data_fin_seq,
1072            mptcp_expand_seq(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
1073     WRITE_ONCE(msk->rcv_data_fin, 1);
1074 
1075     return true;
1076 }
1077 
1078 static bool add_addr_hmac_valid(struct mptcp_sock *msk,
1079                 struct mptcp_options_received *mp_opt)
1080 {
1081     u64 hmac = 0;
1082 
1083     if (mp_opt->echo)
1084         return true;
1085 
1086     hmac = add_addr_generate_hmac(msk->remote_key,
1087                       msk->local_key,
1088                       &mp_opt->addr);
1089 
1090     pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
1091          msk, hmac, mp_opt->ahmac);
1092 
1093     return hmac == mp_opt->ahmac;
1094 }
1095 
1096 /* Return false if a subflow has been reset, else return true */
1097 bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
1098 {
1099     struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1100     struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1101     struct mptcp_options_received mp_opt;
1102     struct mptcp_ext *mpext;
1103 
1104     if (__mptcp_check_fallback(msk)) {
1105         /* Keep it simple and unconditionally trigger send data cleanup and
1106          * pending queue spooling. We will need to acquire the data lock
1107          * for more accurate checks, and once the lock is acquired, such
1108          * helpers are cheap.
1109          */
1110         mptcp_data_lock(subflow->conn);
1111         if (sk_stream_memory_free(sk))
1112             __mptcp_check_push(subflow->conn, sk);
1113         __mptcp_data_acked(subflow->conn);
1114         mptcp_data_unlock(subflow->conn);
1115         return true;
1116     }
1117 
1118     mptcp_get_options(skb, &mp_opt);
1119 
1120     /* The subflow can be in close state only if check_fully_established()
1121      * just sent a reset. If so, tell the caller to ignore the current packet.
1122      */
1123     if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
1124         return sk->sk_state != TCP_CLOSE;
1125 
1126     if (unlikely(mp_opt.suboptions != OPTION_MPTCP_DSS)) {
1127         if ((mp_opt.suboptions & OPTION_MPTCP_FASTCLOSE) &&
1128             msk->local_key == mp_opt.rcvr_key) {
1129             WRITE_ONCE(msk->rcv_fastclose, true);
1130             mptcp_schedule_work((struct sock *)msk);
1131             MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSERX);
1132         }
1133 
1134         if ((mp_opt.suboptions & OPTION_MPTCP_ADD_ADDR) &&
1135             add_addr_hmac_valid(msk, &mp_opt)) {
1136             if (!mp_opt.echo) {
1137                 mptcp_pm_add_addr_received(sk, &mp_opt.addr);
1138                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
1139             } else {
1140                 mptcp_pm_add_addr_echoed(msk, &mp_opt.addr);
1141                 mptcp_pm_del_add_timer(msk, &mp_opt.addr, true);
1142                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
1143             }
1144 
1145             if (mp_opt.addr.port)
1146                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD);
1147         }
1148 
1149         if (mp_opt.suboptions & OPTION_MPTCP_RM_ADDR)
1150             mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list);
1151 
1152         if (mp_opt.suboptions & OPTION_MPTCP_PRIO) {
1153             mptcp_pm_mp_prio_received(sk, mp_opt.backup);
1154             MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX);
1155         }
1156 
1157         if (mp_opt.suboptions & OPTION_MPTCP_FAIL) {
1158             mptcp_pm_mp_fail_received(sk, mp_opt.fail_seq);
1159             MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILRX);
1160         }
1161 
1162         if (mp_opt.suboptions & OPTION_MPTCP_RST) {
1163             subflow->reset_seen = 1;
1164             subflow->reset_reason = mp_opt.reset_reason;
1165             subflow->reset_transient = mp_opt.reset_transient;
1166             MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPRSTRX);
1167         }
1168 
1169         if (!(mp_opt.suboptions & OPTION_MPTCP_DSS))
1170             return true;
1171     }
1172 
1173     /* we can't wait for recvmsg() to update the ack_seq, otherwise
1174      * monodirectional flows will stuck
1175      */
1176     if (mp_opt.use_ack)
1177         ack_update_msk(msk, sk, &mp_opt);
1178 
1179     /* Zero-data-length packets are dropped by the caller and not
1180      * propagated to the MPTCP layer, so the skb extension does not
1181      * need to be allocated or populated. DATA_FIN information, if
1182      * present, needs to be updated here before the skb is freed.
1183      */
1184     if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
1185         if (mp_opt.data_fin && mp_opt.data_len == 1 &&
1186             mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
1187             schedule_work(&msk->work))
1188             sock_hold(subflow->conn);
1189 
1190         return true;
1191     }
1192 
1193     mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
1194     if (!mpext)
1195         return true;
1196 
1197     memset(mpext, 0, sizeof(*mpext));
1198 
1199     if (likely(mp_opt.use_map)) {
1200         if (mp_opt.mpc_map) {
1201             /* this is an MP_CAPABLE carrying MPTCP data
1202              * we know this map the first chunk of data
1203              */
1204             mptcp_crypto_key_sha(subflow->remote_key, NULL,
1205                          &mpext->data_seq);
1206             mpext->data_seq++;
1207             mpext->subflow_seq = 1;
1208             mpext->dsn64 = 1;
1209             mpext->mpc_map = 1;
1210             mpext->data_fin = 0;
1211         } else {
1212             mpext->data_seq = mp_opt.data_seq;
1213             mpext->subflow_seq = mp_opt.subflow_seq;
1214             mpext->dsn64 = mp_opt.dsn64;
1215             mpext->data_fin = mp_opt.data_fin;
1216         }
1217         mpext->data_len = mp_opt.data_len;
1218         mpext->use_map = 1;
1219         mpext->csum_reqd = !!(mp_opt.suboptions & OPTION_MPTCP_CSUMREQD);
1220 
1221         if (mpext->csum_reqd)
1222             mpext->csum = mp_opt.csum;
1223     }
1224 
1225     return true;
1226 }
1227 
1228 static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
1229 {
1230     const struct sock *ssk = (const struct sock *)tp;
1231     struct mptcp_subflow_context *subflow;
1232     u64 ack_seq, rcv_wnd_old, rcv_wnd_new;
1233     struct mptcp_sock *msk;
1234     u32 new_win;
1235     u64 win;
1236 
1237     subflow = mptcp_subflow_ctx(ssk);
1238     msk = mptcp_sk(subflow->conn);
1239 
1240     ack_seq = READ_ONCE(msk->ack_seq);
1241     rcv_wnd_new = ack_seq + tp->rcv_wnd;
1242 
1243     rcv_wnd_old = atomic64_read(&msk->rcv_wnd_sent);
1244     if (after64(rcv_wnd_new, rcv_wnd_old)) {
1245         u64 rcv_wnd;
1246 
1247         for (;;) {
1248             rcv_wnd = atomic64_cmpxchg(&msk->rcv_wnd_sent, rcv_wnd_old, rcv_wnd_new);
1249 
1250             if (rcv_wnd == rcv_wnd_old)
1251                 break;
1252             if (before64(rcv_wnd_new, rcv_wnd)) {
1253                 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICTUPDATE);
1254                 goto raise_win;
1255             }
1256             MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
1257             rcv_wnd_old = rcv_wnd;
1258         }
1259         return;
1260     }
1261 
1262     if (rcv_wnd_new != rcv_wnd_old) {
1263 raise_win:
1264         win = rcv_wnd_old - ack_seq;
1265         tp->rcv_wnd = min_t(u64, win, U32_MAX);
1266         new_win = tp->rcv_wnd;
1267 
1268         /* Make sure we do not exceed the maximum possible
1269          * scaled window.
1270          */
1271         if (unlikely(th->syn))
1272             new_win = min(new_win, 65535U) << tp->rx_opt.rcv_wscale;
1273         if (!tp->rx_opt.rcv_wscale &&
1274             READ_ONCE(sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows))
1275             new_win = min(new_win, MAX_TCP_WINDOW);
1276         else
1277             new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
1278 
1279         /* RFC1323 scaling applied */
1280         new_win >>= tp->rx_opt.rcv_wscale;
1281         th->window = htons(new_win);
1282         MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDSHARED);
1283     }
1284 }
1285 
1286 __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
1287 {
1288     struct csum_pseudo_header header;
1289     __wsum csum;
1290 
1291     /* cfr RFC 8684 3.3.1.:
1292      * the data sequence number used in the pseudo-header is
1293      * always the 64-bit value, irrespective of what length is used in the
1294      * DSS option itself.
1295      */
1296     header.data_seq = cpu_to_be64(data_seq);
1297     header.subflow_seq = htonl(subflow_seq);
1298     header.data_len = htons(data_len);
1299     header.csum = 0;
1300 
1301     csum = csum_partial(&header, sizeof(header), sum);
1302     return csum_fold(csum);
1303 }
1304 
1305 static __sum16 mptcp_make_csum(const struct mptcp_ext *mpext)
1306 {
1307     return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1308                  ~csum_unfold(mpext->csum));
1309 }
1310 
1311 static void put_len_csum(u16 len, __sum16 csum, void *data)
1312 {
1313     __sum16 *sumptr = data + 2;
1314     __be16 *ptr = data;
1315 
1316     put_unaligned_be16(len, ptr);
1317 
1318     put_unaligned(csum, sumptr);
1319 }
1320 
1321 void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
1322              struct mptcp_out_options *opts)
1323 {
1324     const struct sock *ssk = (const struct sock *)tp;
1325     struct mptcp_subflow_context *subflow;
1326 
1327     /* Which options can be used together?
1328      *
1329      * X: mutually exclusive
1330      * O: often used together
1331      * C: can be used together in some cases
1332      * P: could be used together but we prefer not to (optimisations)
1333      *
1334      *  Opt: | MPC  | MPJ  | DSS  | ADD  |  RM  | PRIO | FAIL |  FC  |
1335      * ------|------|------|------|------|------|------|------|------|
1336      *  MPC  |------|------|------|------|------|------|------|------|
1337      *  MPJ  |  X   |------|------|------|------|------|------|------|
1338      *  DSS  |  X   |  X   |------|------|------|------|------|------|
1339      *  ADD  |  X   |  X   |  P   |------|------|------|------|------|
1340      *  RM   |  C   |  C   |  C   |  P   |------|------|------|------|
1341      *  PRIO |  X   |  C   |  C   |  C   |  C   |------|------|------|
1342      *  FAIL |  X   |  X   |  C   |  X   |  X   |  X   |------|------|
1343      *  FC   |  X   |  X   |  X   |  X   |  X   |  X   |  X   |------|
1344      *  RST  |  X   |  X   |  X   |  X   |  X   |  X   |  O   |  O   |
1345      * ------|------|------|------|------|------|------|------|------|
1346      *
1347      * The same applies in mptcp_established_options() function.
1348      */
1349     if (likely(OPTION_MPTCP_DSS & opts->suboptions)) {
1350         struct mptcp_ext *mpext = &opts->ext_copy;
1351         u8 len = TCPOLEN_MPTCP_DSS_BASE;
1352         u8 flags = 0;
1353 
1354         if (mpext->use_ack) {
1355             flags = MPTCP_DSS_HAS_ACK;
1356             if (mpext->ack64) {
1357                 len += TCPOLEN_MPTCP_DSS_ACK64;
1358                 flags |= MPTCP_DSS_ACK64;
1359             } else {
1360                 len += TCPOLEN_MPTCP_DSS_ACK32;
1361             }
1362         }
1363 
1364         if (mpext->use_map) {
1365             len += TCPOLEN_MPTCP_DSS_MAP64;
1366 
1367             /* Use only 64-bit mapping flags for now, add
1368              * support for optional 32-bit mappings later.
1369              */
1370             flags |= MPTCP_DSS_HAS_MAP | MPTCP_DSS_DSN64;
1371             if (mpext->data_fin)
1372                 flags |= MPTCP_DSS_DATA_FIN;
1373 
1374             if (opts->csum_reqd)
1375                 len += TCPOLEN_MPTCP_DSS_CHECKSUM;
1376         }
1377 
1378         *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags);
1379 
1380         if (mpext->use_ack) {
1381             if (mpext->ack64) {
1382                 put_unaligned_be64(mpext->data_ack, ptr);
1383                 ptr += 2;
1384             } else {
1385                 put_unaligned_be32(mpext->data_ack32, ptr);
1386                 ptr += 1;
1387             }
1388         }
1389 
1390         if (mpext->use_map) {
1391             put_unaligned_be64(mpext->data_seq, ptr);
1392             ptr += 2;
1393             put_unaligned_be32(mpext->subflow_seq, ptr);
1394             ptr += 1;
1395             if (opts->csum_reqd) {
1396                 /* data_len == 0 is reserved for the infinite mapping,
1397                  * the checksum will also be set to 0.
1398                  */
1399                 put_len_csum(mpext->data_len,
1400                          (mpext->data_len ? mptcp_make_csum(mpext) : 0),
1401                          ptr);
1402             } else {
1403                 put_unaligned_be32(mpext->data_len << 16 |
1404                            TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1405             }
1406             ptr += 1;
1407         }
1408 
1409         /* We might need to add MP_FAIL options in rare cases */
1410         if (unlikely(OPTION_MPTCP_FAIL & opts->suboptions))
1411             goto mp_fail;
1412     } else if (OPTIONS_MPTCP_MPC & opts->suboptions) {
1413         u8 len, flag = MPTCP_CAP_HMAC_SHA256;
1414 
1415         if (OPTION_MPTCP_MPC_SYN & opts->suboptions) {
1416             len = TCPOLEN_MPTCP_MPC_SYN;
1417         } else if (OPTION_MPTCP_MPC_SYNACK & opts->suboptions) {
1418             len = TCPOLEN_MPTCP_MPC_SYNACK;
1419         } else if (opts->data_len) {
1420             len = TCPOLEN_MPTCP_MPC_ACK_DATA;
1421             if (opts->csum_reqd)
1422                 len += TCPOLEN_MPTCP_DSS_CHECKSUM;
1423         } else {
1424             len = TCPOLEN_MPTCP_MPC_ACK;
1425         }
1426 
1427         if (opts->csum_reqd)
1428             flag |= MPTCP_CAP_CHECKSUM_REQD;
1429 
1430         if (!opts->allow_join_id0)
1431             flag |= MPTCP_CAP_DENY_JOIN_ID0;
1432 
1433         *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len,
1434                       MPTCP_SUPPORTED_VERSION,
1435                       flag);
1436 
1437         if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) &
1438             opts->suboptions))
1439             goto mp_capable_done;
1440 
1441         put_unaligned_be64(opts->sndr_key, ptr);
1442         ptr += 2;
1443         if (!((OPTION_MPTCP_MPC_ACK) & opts->suboptions))
1444             goto mp_capable_done;
1445 
1446         put_unaligned_be64(opts->rcvr_key, ptr);
1447         ptr += 2;
1448         if (!opts->data_len)
1449             goto mp_capable_done;
1450 
1451         if (opts->csum_reqd) {
1452             put_len_csum(opts->data_len,
1453                      __mptcp_make_csum(opts->data_seq,
1454                                opts->subflow_seq,
1455                                opts->data_len,
1456                                ~csum_unfold(opts->csum)),
1457                      ptr);
1458         } else {
1459             put_unaligned_be32(opts->data_len << 16 |
1460                        TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
1461         }
1462         ptr += 1;
1463 
1464         /* MPC is additionally mutually exclusive with MP_PRIO */
1465         goto mp_capable_done;
1466     } else if (OPTIONS_MPTCP_MPJ & opts->suboptions) {
1467         if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) {
1468             *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1469                           TCPOLEN_MPTCP_MPJ_SYN,
1470                           opts->backup, opts->join_id);
1471             put_unaligned_be32(opts->token, ptr);
1472             ptr += 1;
1473             put_unaligned_be32(opts->nonce, ptr);
1474             ptr += 1;
1475         } else if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) {
1476             *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1477                           TCPOLEN_MPTCP_MPJ_SYNACK,
1478                           opts->backup, opts->join_id);
1479             put_unaligned_be64(opts->thmac, ptr);
1480             ptr += 2;
1481             put_unaligned_be32(opts->nonce, ptr);
1482             ptr += 1;
1483         } else {
1484             *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
1485                           TCPOLEN_MPTCP_MPJ_ACK, 0, 0);
1486             memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN);
1487             ptr += 5;
1488         }
1489     } else if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
1490         u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
1491         u8 echo = MPTCP_ADDR_ECHO;
1492 
1493 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1494         if (opts->addr.family == AF_INET6)
1495             len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
1496 #endif
1497 
1498         if (opts->addr.port)
1499             len += TCPOLEN_MPTCP_PORT_LEN;
1500 
1501         if (opts->ahmac) {
1502             len += sizeof(opts->ahmac);
1503             echo = 0;
1504         }
1505 
1506         *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
1507                       len, echo, opts->addr.id);
1508         if (opts->addr.family == AF_INET) {
1509             memcpy((u8 *)ptr, (u8 *)&opts->addr.addr.s_addr, 4);
1510             ptr += 1;
1511         }
1512 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1513         else if (opts->addr.family == AF_INET6) {
1514             memcpy((u8 *)ptr, opts->addr.addr6.s6_addr, 16);
1515             ptr += 4;
1516         }
1517 #endif
1518 
1519         if (!opts->addr.port) {
1520             if (opts->ahmac) {
1521                 put_unaligned_be64(opts->ahmac, ptr);
1522                 ptr += 2;
1523             }
1524         } else {
1525             u16 port = ntohs(opts->addr.port);
1526 
1527             if (opts->ahmac) {
1528                 u8 *bptr = (u8 *)ptr;
1529 
1530                 put_unaligned_be16(port, bptr);
1531                 bptr += 2;
1532                 put_unaligned_be64(opts->ahmac, bptr);
1533                 bptr += 8;
1534                 put_unaligned_be16(TCPOPT_NOP << 8 |
1535                            TCPOPT_NOP, bptr);
1536 
1537                 ptr += 3;
1538             } else {
1539                 put_unaligned_be32(port << 16 |
1540                            TCPOPT_NOP << 8 |
1541                            TCPOPT_NOP, ptr);
1542                 ptr += 1;
1543             }
1544         }
1545     } else if (unlikely(OPTION_MPTCP_FASTCLOSE & opts->suboptions)) {
1546         /* FASTCLOSE is mutually exclusive with others except RST */
1547         *ptr++ = mptcp_option(MPTCPOPT_MP_FASTCLOSE,
1548                       TCPOLEN_MPTCP_FASTCLOSE,
1549                       0, 0);
1550         put_unaligned_be64(opts->rcvr_key, ptr);
1551         ptr += 2;
1552 
1553         if (OPTION_MPTCP_RST & opts->suboptions)
1554             goto mp_rst;
1555         return;
1556     } else if (unlikely(OPTION_MPTCP_FAIL & opts->suboptions)) {
1557 mp_fail:
1558         /* MP_FAIL is mutually exclusive with others except RST */
1559         subflow = mptcp_subflow_ctx(ssk);
1560         subflow->send_mp_fail = 0;
1561 
1562         *ptr++ = mptcp_option(MPTCPOPT_MP_FAIL,
1563                       TCPOLEN_MPTCP_FAIL,
1564                       0, 0);
1565         put_unaligned_be64(opts->fail_seq, ptr);
1566         ptr += 2;
1567 
1568         if (OPTION_MPTCP_RST & opts->suboptions)
1569             goto mp_rst;
1570         return;
1571     } else if (unlikely(OPTION_MPTCP_RST & opts->suboptions)) {
1572 mp_rst:
1573         *ptr++ = mptcp_option(MPTCPOPT_RST,
1574                       TCPOLEN_MPTCP_RST,
1575                       opts->reset_transient,
1576                       opts->reset_reason);
1577         return;
1578     }
1579 
1580     if (OPTION_MPTCP_PRIO & opts->suboptions) {
1581         subflow = mptcp_subflow_ctx(ssk);
1582         subflow->send_mp_prio = 0;
1583 
1584         *ptr++ = mptcp_option(MPTCPOPT_MP_PRIO,
1585                       TCPOLEN_MPTCP_PRIO,
1586                       opts->backup, TCPOPT_NOP);
1587 
1588         MPTCP_INC_STATS(sock_net((const struct sock *)tp),
1589                 MPTCP_MIB_MPPRIOTX);
1590     }
1591 
1592 mp_capable_done:
1593     if (OPTION_MPTCP_RM_ADDR & opts->suboptions) {
1594         u8 i = 1;
1595 
1596         *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR,
1597                       TCPOLEN_MPTCP_RM_ADDR_BASE + opts->rm_list.nr,
1598                       0, opts->rm_list.ids[0]);
1599 
1600         while (i < opts->rm_list.nr) {
1601             u8 id1, id2, id3, id4;
1602 
1603             id1 = opts->rm_list.ids[i];
1604             id2 = i + 1 < opts->rm_list.nr ? opts->rm_list.ids[i + 1] : TCPOPT_NOP;
1605             id3 = i + 2 < opts->rm_list.nr ? opts->rm_list.ids[i + 2] : TCPOPT_NOP;
1606             id4 = i + 3 < opts->rm_list.nr ? opts->rm_list.ids[i + 3] : TCPOPT_NOP;
1607             put_unaligned_be32(id1 << 24 | id2 << 16 | id3 << 8 | id4, ptr);
1608             ptr += 1;
1609             i += 4;
1610         }
1611     }
1612 
1613     if (tp)
1614         mptcp_set_rwin(tp, th);
1615 }
1616 
1617 __be32 mptcp_get_reset_option(const struct sk_buff *skb)
1618 {
1619     const struct mptcp_ext *ext = mptcp_get_ext(skb);
1620     u8 flags, reason;
1621 
1622     if (ext) {
1623         flags = ext->reset_transient;
1624         reason = ext->reset_reason;
1625 
1626         return mptcp_option(MPTCPOPT_RST, TCPOLEN_MPTCP_RST,
1627                     flags, reason);
1628     }
1629 
1630     return htonl(0u);
1631 }
1632 EXPORT_SYMBOL_GPL(mptcp_get_reset_option);