0001
0002
0003
0004
0005
0006
0007 #define pr_fmt(fmt) "MPTCP: " fmt
0008
0009 #include <linux/kernel.h>
0010 #include <linux/module.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/sched/signal.h>
0013 #include <linux/atomic.h>
0014 #include <net/sock.h>
0015 #include <net/inet_common.h>
0016 #include <net/inet_hashtables.h>
0017 #include <net/protocol.h>
0018 #include <net/tcp.h>
0019 #include <net/tcp_states.h>
0020 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
0021 #include <net/transp_v6.h>
0022 #endif
0023 #include <net/mptcp.h>
0024 #include <net/xfrm.h>
0025 #include <asm/ioctls.h>
0026 #include "protocol.h"
0027 #include "mib.h"
0028
0029 #define CREATE_TRACE_POINTS
0030 #include <trace/events/mptcp.h>
0031
0032 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
0033 struct mptcp6_sock {
0034 struct mptcp_sock msk;
0035 struct ipv6_pinfo np;
0036 };
0037 #endif
0038
0039 struct mptcp_skb_cb {
0040 u64 map_seq;
0041 u64 end_seq;
0042 u32 offset;
0043 u8 has_rxtstamp:1;
0044 };
0045
0046 #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
0047
0048 enum {
0049 MPTCP_CMSG_TS = BIT(0),
0050 MPTCP_CMSG_INQ = BIT(1),
0051 };
0052
0053 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
0054
0055 static void __mptcp_destroy_sock(struct sock *sk);
0056 static void __mptcp_check_send_data_fin(struct sock *sk);
0057
0058 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
0059 static struct net_device mptcp_napi_dev;
0060
0061
0062
0063
0064
0065 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
0066 {
0067 if (!msk->subflow || READ_ONCE(msk->can_ack))
0068 return NULL;
0069
0070 return msk->subflow;
0071 }
0072
0073
0074 static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
0075 {
0076 return READ_ONCE(msk->wnd_end);
0077 }
0078
0079 static bool mptcp_is_tcpsk(struct sock *sk)
0080 {
0081 struct socket *sock = sk->sk_socket;
0082
0083 if (unlikely(sk->sk_prot == &tcp_prot)) {
0084
0085
0086
0087
0088
0089
0090
0091 sock->ops = &inet_stream_ops;
0092 return true;
0093 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
0094 } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
0095 sock->ops = &inet6_stream_ops;
0096 return true;
0097 #endif
0098 }
0099
0100 return false;
0101 }
0102
0103 static int __mptcp_socket_create(struct mptcp_sock *msk)
0104 {
0105 struct mptcp_subflow_context *subflow;
0106 struct sock *sk = (struct sock *)msk;
0107 struct socket *ssock;
0108 int err;
0109
0110 err = mptcp_subflow_create_socket(sk, &ssock);
0111 if (err)
0112 return err;
0113
0114 msk->first = ssock->sk;
0115 msk->subflow = ssock;
0116 subflow = mptcp_subflow_ctx(ssock->sk);
0117 list_add(&subflow->node, &msk->conn_list);
0118 sock_hold(ssock->sk);
0119 subflow->request_mptcp = 1;
0120
0121
0122 subflow->local_id_valid = 1;
0123 mptcp_sock_graft(msk->first, sk->sk_socket);
0124
0125 return 0;
0126 }
0127
0128 static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
0129 {
0130 sk_drops_add(sk, skb);
0131 __kfree_skb(skb);
0132 }
0133
0134 static void mptcp_rmem_charge(struct sock *sk, int size)
0135 {
0136 mptcp_sk(sk)->rmem_fwd_alloc -= size;
0137 }
0138
0139 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
0140 struct sk_buff *from)
0141 {
0142 bool fragstolen;
0143 int delta;
0144
0145 if (MPTCP_SKB_CB(from)->offset ||
0146 !skb_try_coalesce(to, from, &fragstolen, &delta))
0147 return false;
0148
0149 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
0150 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
0151 to->len, MPTCP_SKB_CB(from)->end_seq);
0152 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
0153
0154
0155
0156
0157
0158 atomic_add(delta, &sk->sk_rmem_alloc);
0159 mptcp_rmem_charge(sk, delta);
0160 kfree_skb_partial(from, fragstolen);
0161
0162 return true;
0163 }
0164
0165 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
0166 struct sk_buff *from)
0167 {
0168 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
0169 return false;
0170
0171 return mptcp_try_coalesce((struct sock *)msk, to, from);
0172 }
0173
0174 static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
0175 {
0176 amount >>= PAGE_SHIFT;
0177 mptcp_sk(sk)->rmem_fwd_alloc -= amount << PAGE_SHIFT;
0178 __sk_mem_reduce_allocated(sk, amount);
0179 }
0180
0181 static void mptcp_rmem_uncharge(struct sock *sk, int size)
0182 {
0183 struct mptcp_sock *msk = mptcp_sk(sk);
0184 int reclaimable;
0185
0186 msk->rmem_fwd_alloc += size;
0187 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
0188
0189
0190 if (unlikely(reclaimable >= PAGE_SIZE))
0191 __mptcp_rmem_reclaim(sk, reclaimable);
0192 }
0193
0194 static void mptcp_rfree(struct sk_buff *skb)
0195 {
0196 unsigned int len = skb->truesize;
0197 struct sock *sk = skb->sk;
0198
0199 atomic_sub(len, &sk->sk_rmem_alloc);
0200 mptcp_rmem_uncharge(sk, len);
0201 }
0202
0203 static void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
0204 {
0205 skb_orphan(skb);
0206 skb->sk = sk;
0207 skb->destructor = mptcp_rfree;
0208 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
0209 mptcp_rmem_charge(sk, skb->truesize);
0210 }
0211
0212
0213
0214
0215
0216 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
0217 {
0218 struct sock *sk = (struct sock *)msk;
0219 struct rb_node **p, *parent;
0220 u64 seq, end_seq, max_seq;
0221 struct sk_buff *skb1;
0222
0223 seq = MPTCP_SKB_CB(skb)->map_seq;
0224 end_seq = MPTCP_SKB_CB(skb)->end_seq;
0225 max_seq = atomic64_read(&msk->rcv_wnd_sent);
0226
0227 pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
0228 RB_EMPTY_ROOT(&msk->out_of_order_queue));
0229 if (after64(end_seq, max_seq)) {
0230
0231 mptcp_drop(sk, skb);
0232 pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
0233 (unsigned long long)end_seq - (unsigned long)max_seq,
0234 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent));
0235 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
0236 return;
0237 }
0238
0239 p = &msk->out_of_order_queue.rb_node;
0240 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
0241 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
0242 rb_link_node(&skb->rbnode, NULL, p);
0243 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
0244 msk->ooo_last_skb = skb;
0245 goto end;
0246 }
0247
0248
0249
0250
0251 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
0252 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
0253 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
0254 return;
0255 }
0256
0257
0258 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
0259 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
0260 parent = &msk->ooo_last_skb->rbnode;
0261 p = &parent->rb_right;
0262 goto insert;
0263 }
0264
0265
0266 parent = NULL;
0267 while (*p) {
0268 parent = *p;
0269 skb1 = rb_to_skb(parent);
0270 if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
0271 p = &parent->rb_left;
0272 continue;
0273 }
0274 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
0275 if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
0276
0277 mptcp_drop(sk, skb);
0278 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
0279 return;
0280 }
0281 if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
0282
0283
0284
0285
0286
0287 } else {
0288
0289
0290
0291 rb_replace_node(&skb1->rbnode, &skb->rbnode,
0292 &msk->out_of_order_queue);
0293 mptcp_drop(sk, skb1);
0294 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
0295 goto merge_right;
0296 }
0297 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
0298 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
0299 return;
0300 }
0301 p = &parent->rb_right;
0302 }
0303
0304 insert:
0305
0306 rb_link_node(&skb->rbnode, parent, p);
0307 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
0308
0309 merge_right:
0310
0311 while ((skb1 = skb_rb_next(skb)) != NULL) {
0312 if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
0313 break;
0314 rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
0315 mptcp_drop(sk, skb1);
0316 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
0317 }
0318
0319 if (!skb1)
0320 msk->ooo_last_skb = skb;
0321
0322 end:
0323 skb_condense(skb);
0324 mptcp_set_owner_r(skb, sk);
0325 }
0326
0327 static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
0328 {
0329 struct mptcp_sock *msk = mptcp_sk(sk);
0330 int amt, amount;
0331
0332 if (size <= msk->rmem_fwd_alloc)
0333 return true;
0334
0335 size -= msk->rmem_fwd_alloc;
0336 amt = sk_mem_pages(size);
0337 amount = amt << PAGE_SHIFT;
0338 if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
0339 return false;
0340
0341 msk->rmem_fwd_alloc += amount;
0342 return true;
0343 }
0344
0345 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
0346 struct sk_buff *skb, unsigned int offset,
0347 size_t copy_len)
0348 {
0349 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
0350 struct sock *sk = (struct sock *)msk;
0351 struct sk_buff *tail;
0352 bool has_rxtstamp;
0353
0354 __skb_unlink(skb, &ssk->sk_receive_queue);
0355
0356 skb_ext_reset(skb);
0357 skb_orphan(skb);
0358
0359
0360 if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
0361 goto drop;
0362
0363 has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
0364
0365
0366
0367
0368
0369 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
0370 MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
0371 MPTCP_SKB_CB(skb)->offset = offset;
0372 MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
0373
0374 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
0375
0376 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
0377 tail = skb_peek_tail(&sk->sk_receive_queue);
0378 if (tail && mptcp_try_coalesce(sk, tail, skb))
0379 return true;
0380
0381 mptcp_set_owner_r(skb, sk);
0382 __skb_queue_tail(&sk->sk_receive_queue, skb);
0383 return true;
0384 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
0385 mptcp_data_queue_ofo(msk, skb);
0386 return false;
0387 }
0388
0389
0390
0391
0392 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
0393 drop:
0394 mptcp_drop(sk, skb);
0395 return false;
0396 }
0397
0398 static void mptcp_stop_timer(struct sock *sk)
0399 {
0400 struct inet_connection_sock *icsk = inet_csk(sk);
0401
0402 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
0403 mptcp_sk(sk)->timer_ival = 0;
0404 }
0405
0406 static void mptcp_close_wake_up(struct sock *sk)
0407 {
0408 if (sock_flag(sk, SOCK_DEAD))
0409 return;
0410
0411 sk->sk_state_change(sk);
0412 if (sk->sk_shutdown == SHUTDOWN_MASK ||
0413 sk->sk_state == TCP_CLOSE)
0414 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
0415 else
0416 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
0417 }
0418
0419 static bool mptcp_pending_data_fin_ack(struct sock *sk)
0420 {
0421 struct mptcp_sock *msk = mptcp_sk(sk);
0422
0423 return !__mptcp_check_fallback(msk) &&
0424 ((1 << sk->sk_state) &
0425 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
0426 msk->write_seq == READ_ONCE(msk->snd_una);
0427 }
0428
0429 static void mptcp_check_data_fin_ack(struct sock *sk)
0430 {
0431 struct mptcp_sock *msk = mptcp_sk(sk);
0432
0433
0434 if (mptcp_pending_data_fin_ack(sk)) {
0435 WRITE_ONCE(msk->snd_data_fin_enable, 0);
0436
0437 switch (sk->sk_state) {
0438 case TCP_FIN_WAIT1:
0439 inet_sk_state_store(sk, TCP_FIN_WAIT2);
0440 break;
0441 case TCP_CLOSING:
0442 case TCP_LAST_ACK:
0443 inet_sk_state_store(sk, TCP_CLOSE);
0444 break;
0445 }
0446
0447 mptcp_close_wake_up(sk);
0448 }
0449 }
0450
0451 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
0452 {
0453 struct mptcp_sock *msk = mptcp_sk(sk);
0454
0455 if (READ_ONCE(msk->rcv_data_fin) &&
0456 ((1 << sk->sk_state) &
0457 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
0458 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
0459
0460 if (msk->ack_seq == rcv_data_fin_seq) {
0461 if (seq)
0462 *seq = rcv_data_fin_seq;
0463
0464 return true;
0465 }
0466 }
0467
0468 return false;
0469 }
0470
0471 static void mptcp_set_datafin_timeout(const struct sock *sk)
0472 {
0473 struct inet_connection_sock *icsk = inet_csk(sk);
0474 u32 retransmits;
0475
0476 retransmits = min_t(u32, icsk->icsk_retransmits,
0477 ilog2(TCP_RTO_MAX / TCP_RTO_MIN));
0478
0479 mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits;
0480 }
0481
0482 static void __mptcp_set_timeout(struct sock *sk, long tout)
0483 {
0484 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
0485 }
0486
0487 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
0488 {
0489 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
0490
0491 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
0492 inet_csk(ssk)->icsk_timeout - jiffies : 0;
0493 }
0494
0495 static void mptcp_set_timeout(struct sock *sk)
0496 {
0497 struct mptcp_subflow_context *subflow;
0498 long tout = 0;
0499
0500 mptcp_for_each_subflow(mptcp_sk(sk), subflow)
0501 tout = max(tout, mptcp_timeout_from_subflow(subflow));
0502 __mptcp_set_timeout(sk, tout);
0503 }
0504
0505 static inline bool tcp_can_send_ack(const struct sock *ssk)
0506 {
0507 return !((1 << inet_sk_state_load(ssk)) &
0508 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
0509 }
0510
0511 void __mptcp_subflow_send_ack(struct sock *ssk)
0512 {
0513 if (tcp_can_send_ack(ssk))
0514 tcp_send_ack(ssk);
0515 }
0516
0517 static void mptcp_subflow_send_ack(struct sock *ssk)
0518 {
0519 bool slow;
0520
0521 slow = lock_sock_fast(ssk);
0522 __mptcp_subflow_send_ack(ssk);
0523 unlock_sock_fast(ssk, slow);
0524 }
0525
0526 static void mptcp_send_ack(struct mptcp_sock *msk)
0527 {
0528 struct mptcp_subflow_context *subflow;
0529
0530 mptcp_for_each_subflow(msk, subflow)
0531 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
0532 }
0533
0534 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
0535 {
0536 bool slow;
0537
0538 slow = lock_sock_fast(ssk);
0539 if (tcp_can_send_ack(ssk))
0540 tcp_cleanup_rbuf(ssk, 1);
0541 unlock_sock_fast(ssk, slow);
0542 }
0543
0544 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
0545 {
0546 const struct inet_connection_sock *icsk = inet_csk(ssk);
0547 u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
0548 const struct tcp_sock *tp = tcp_sk(ssk);
0549
0550 return (ack_pending & ICSK_ACK_SCHED) &&
0551 ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
0552 READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
0553 (rx_empty && ack_pending &
0554 (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
0555 }
0556
0557 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
0558 {
0559 int old_space = READ_ONCE(msk->old_wspace);
0560 struct mptcp_subflow_context *subflow;
0561 struct sock *sk = (struct sock *)msk;
0562 int space = __mptcp_space(sk);
0563 bool cleanup, rx_empty;
0564
0565 cleanup = (space > 0) && (space >= (old_space << 1));
0566 rx_empty = !__mptcp_rmem(sk);
0567
0568 mptcp_for_each_subflow(msk, subflow) {
0569 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
0570
0571 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
0572 mptcp_subflow_cleanup_rbuf(ssk);
0573 }
0574 }
0575
0576 static bool mptcp_check_data_fin(struct sock *sk)
0577 {
0578 struct mptcp_sock *msk = mptcp_sk(sk);
0579 u64 rcv_data_fin_seq;
0580 bool ret = false;
0581
0582 if (__mptcp_check_fallback(msk))
0583 return ret;
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
0599 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
0600 WRITE_ONCE(msk->rcv_data_fin, 0);
0601
0602 sk->sk_shutdown |= RCV_SHUTDOWN;
0603 smp_mb__before_atomic();
0604
0605 switch (sk->sk_state) {
0606 case TCP_ESTABLISHED:
0607 inet_sk_state_store(sk, TCP_CLOSE_WAIT);
0608 break;
0609 case TCP_FIN_WAIT1:
0610 inet_sk_state_store(sk, TCP_CLOSING);
0611 break;
0612 case TCP_FIN_WAIT2:
0613 inet_sk_state_store(sk, TCP_CLOSE);
0614 break;
0615 default:
0616
0617 WARN_ON_ONCE(1);
0618 break;
0619 }
0620
0621 ret = true;
0622 mptcp_send_ack(msk);
0623 mptcp_close_wake_up(sk);
0624 }
0625 return ret;
0626 }
0627
0628 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
0629 struct sock *ssk,
0630 unsigned int *bytes)
0631 {
0632 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
0633 struct sock *sk = (struct sock *)msk;
0634 unsigned int moved = 0;
0635 bool more_data_avail;
0636 struct tcp_sock *tp;
0637 bool done = false;
0638 int sk_rbuf;
0639
0640 sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
0641
0642 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
0643 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
0644
0645 if (unlikely(ssk_rbuf > sk_rbuf)) {
0646 WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf);
0647 sk_rbuf = ssk_rbuf;
0648 }
0649 }
0650
0651 pr_debug("msk=%p ssk=%p", msk, ssk);
0652 tp = tcp_sk(ssk);
0653 do {
0654 u32 map_remaining, offset;
0655 u32 seq = tp->copied_seq;
0656 struct sk_buff *skb;
0657 bool fin;
0658
0659
0660 map_remaining = subflow->map_data_len -
0661 mptcp_subflow_get_map_offset(subflow);
0662
0663 skb = skb_peek(&ssk->sk_receive_queue);
0664 if (!skb) {
0665
0666
0667
0668
0669 if (!moved)
0670 done = true;
0671 break;
0672 }
0673
0674 if (__mptcp_check_fallback(msk)) {
0675
0676
0677
0678
0679 map_remaining = skb->len;
0680 subflow->map_data_len = skb->len;
0681 }
0682
0683 offset = seq - TCP_SKB_CB(skb)->seq;
0684 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
0685 if (fin) {
0686 done = true;
0687 seq++;
0688 }
0689
0690 if (offset < skb->len) {
0691 size_t len = skb->len - offset;
0692
0693 if (tp->urg_data)
0694 done = true;
0695
0696 if (__mptcp_move_skb(msk, ssk, skb, offset, len))
0697 moved += len;
0698 seq += len;
0699
0700 if (WARN_ON_ONCE(map_remaining < len))
0701 break;
0702 } else {
0703 WARN_ON_ONCE(!fin);
0704 sk_eat_skb(ssk, skb);
0705 done = true;
0706 }
0707
0708 WRITE_ONCE(tp->copied_seq, seq);
0709 more_data_avail = mptcp_subflow_data_available(ssk);
0710
0711 if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) {
0712 done = true;
0713 break;
0714 }
0715 } while (more_data_avail);
0716
0717 *bytes += moved;
0718 return done;
0719 }
0720
0721 static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
0722 {
0723 struct sock *sk = (struct sock *)msk;
0724 struct sk_buff *skb, *tail;
0725 bool moved = false;
0726 struct rb_node *p;
0727 u64 end_seq;
0728
0729 p = rb_first(&msk->out_of_order_queue);
0730 pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
0731 while (p) {
0732 skb = rb_to_skb(p);
0733 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
0734 break;
0735
0736 p = rb_next(p);
0737 rb_erase(&skb->rbnode, &msk->out_of_order_queue);
0738
0739 if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
0740 msk->ack_seq))) {
0741 mptcp_drop(sk, skb);
0742 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
0743 continue;
0744 }
0745
0746 end_seq = MPTCP_SKB_CB(skb)->end_seq;
0747 tail = skb_peek_tail(&sk->sk_receive_queue);
0748 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
0749 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
0750
0751
0752 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
0753 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
0754 delta);
0755 MPTCP_SKB_CB(skb)->offset += delta;
0756 MPTCP_SKB_CB(skb)->map_seq += delta;
0757 __skb_queue_tail(&sk->sk_receive_queue, skb);
0758 }
0759 msk->ack_seq = end_seq;
0760 moved = true;
0761 }
0762 return moved;
0763 }
0764
0765
0766
0767
0768 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
0769 {
0770 struct sock *sk = (struct sock *)msk;
0771 unsigned int moved = 0;
0772
0773 __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
0774 __mptcp_ofo_queue(msk);
0775 if (unlikely(ssk->sk_err)) {
0776 if (!sock_owned_by_user(sk))
0777 __mptcp_error_report(sk);
0778 else
0779 __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags);
0780 }
0781
0782
0783
0784
0785
0786
0787 if (mptcp_pending_data_fin(sk, NULL))
0788 mptcp_schedule_work(sk);
0789 return moved > 0;
0790 }
0791
0792 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
0793 {
0794 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
0795 struct mptcp_sock *msk = mptcp_sk(sk);
0796 int sk_rbuf, ssk_rbuf;
0797
0798
0799
0800
0801
0802 if (unlikely(subflow->disposable))
0803 return;
0804
0805 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
0806 sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
0807 if (unlikely(ssk_rbuf > sk_rbuf))
0808 sk_rbuf = ssk_rbuf;
0809
0810
0811 if (__mptcp_rmem(sk) > sk_rbuf) {
0812 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
0813 return;
0814 }
0815
0816
0817 mptcp_data_lock(sk);
0818 if (move_skbs_to_msk(msk, ssk))
0819 sk->sk_data_ready(sk);
0820
0821 mptcp_data_unlock(sk);
0822 }
0823
0824 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
0825 {
0826 struct sock *sk = (struct sock *)msk;
0827
0828 if (sk->sk_state != TCP_ESTABLISHED)
0829 return false;
0830
0831
0832
0833
0834 if (sk->sk_socket && !ssk->sk_socket)
0835 mptcp_sock_graft(ssk, sk->sk_socket);
0836
0837 mptcp_propagate_sndbuf((struct sock *)msk, ssk);
0838 mptcp_sockopt_sync_locked(msk, ssk);
0839 return true;
0840 }
0841
0842 static void __mptcp_flush_join_list(struct sock *sk)
0843 {
0844 struct mptcp_subflow_context *tmp, *subflow;
0845 struct mptcp_sock *msk = mptcp_sk(sk);
0846
0847 list_for_each_entry_safe(subflow, tmp, &msk->join_list, node) {
0848 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
0849 bool slow = lock_sock_fast(ssk);
0850
0851 list_move_tail(&subflow->node, &msk->conn_list);
0852 if (!__mptcp_finish_join(msk, ssk))
0853 mptcp_subflow_reset(ssk);
0854 unlock_sock_fast(ssk, slow);
0855 }
0856 }
0857
0858 static bool mptcp_timer_pending(struct sock *sk)
0859 {
0860 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
0861 }
0862
0863 static void mptcp_reset_timer(struct sock *sk)
0864 {
0865 struct inet_connection_sock *icsk = inet_csk(sk);
0866 unsigned long tout;
0867
0868
0869 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
0870 return;
0871
0872 tout = mptcp_sk(sk)->timer_ival;
0873 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
0874 }
0875
0876 bool mptcp_schedule_work(struct sock *sk)
0877 {
0878 if (inet_sk_state_load(sk) != TCP_CLOSE &&
0879 schedule_work(&mptcp_sk(sk)->work)) {
0880
0881
0882
0883 sock_hold(sk);
0884 return true;
0885 }
0886 return false;
0887 }
0888
0889 void mptcp_subflow_eof(struct sock *sk)
0890 {
0891 if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags))
0892 mptcp_schedule_work(sk);
0893 }
0894
0895 static void mptcp_check_for_eof(struct mptcp_sock *msk)
0896 {
0897 struct mptcp_subflow_context *subflow;
0898 struct sock *sk = (struct sock *)msk;
0899 int receivers = 0;
0900
0901 mptcp_for_each_subflow(msk, subflow)
0902 receivers += !subflow->rx_eof;
0903 if (receivers)
0904 return;
0905
0906 if (!(sk->sk_shutdown & RCV_SHUTDOWN)) {
0907
0908
0909
0910 sk->sk_shutdown |= RCV_SHUTDOWN;
0911
0912 smp_mb__before_atomic();
0913 sk->sk_data_ready(sk);
0914 }
0915
0916 switch (sk->sk_state) {
0917 case TCP_ESTABLISHED:
0918 inet_sk_state_store(sk, TCP_CLOSE_WAIT);
0919 break;
0920 case TCP_FIN_WAIT1:
0921 inet_sk_state_store(sk, TCP_CLOSING);
0922 break;
0923 case TCP_FIN_WAIT2:
0924 inet_sk_state_store(sk, TCP_CLOSE);
0925 break;
0926 default:
0927 return;
0928 }
0929 mptcp_close_wake_up(sk);
0930 }
0931
0932 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
0933 {
0934 struct mptcp_subflow_context *subflow;
0935 struct sock *sk = (struct sock *)msk;
0936
0937 sock_owned_by_me(sk);
0938
0939 mptcp_for_each_subflow(msk, subflow) {
0940 if (READ_ONCE(subflow->data_avail))
0941 return mptcp_subflow_tcp_sock(subflow);
0942 }
0943
0944 return NULL;
0945 }
0946
0947 static bool mptcp_skb_can_collapse_to(u64 write_seq,
0948 const struct sk_buff *skb,
0949 const struct mptcp_ext *mpext)
0950 {
0951 if (!tcp_skb_can_collapse_to(skb))
0952 return false;
0953
0954
0955
0956
0957 return mpext && mpext->data_seq + mpext->data_len == write_seq &&
0958 !mpext->frozen;
0959 }
0960
0961
0962
0963
0964
0965
0966 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
0967 const struct page_frag *pfrag,
0968 const struct mptcp_data_frag *df)
0969 {
0970 return df && pfrag->page == df->page &&
0971 pfrag->size - pfrag->offset > 0 &&
0972 pfrag->offset == (df->offset + df->data_len) &&
0973 df->data_seq + df->data_len == msk->write_seq;
0974 }
0975
0976 static void dfrag_uncharge(struct sock *sk, int len)
0977 {
0978 sk_mem_uncharge(sk, len);
0979 sk_wmem_queued_add(sk, -len);
0980 }
0981
0982 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
0983 {
0984 int len = dfrag->data_len + dfrag->overhead;
0985
0986 list_del(&dfrag->list);
0987 dfrag_uncharge(sk, len);
0988 put_page(dfrag->page);
0989 }
0990
0991 static void __mptcp_clean_una(struct sock *sk)
0992 {
0993 struct mptcp_sock *msk = mptcp_sk(sk);
0994 struct mptcp_data_frag *dtmp, *dfrag;
0995 u64 snd_una;
0996
0997
0998
0999
1000 if (__mptcp_check_fallback(msk))
1001 msk->snd_una = READ_ONCE(msk->snd_nxt);
1002
1003 snd_una = msk->snd_una;
1004 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1005 if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
1006 break;
1007
1008 if (unlikely(dfrag == msk->first_pending)) {
1009
1010 if (WARN_ON_ONCE(!msk->recovery))
1011 break;
1012
1013 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1014 }
1015
1016 dfrag_clear(sk, dfrag);
1017 }
1018
1019 dfrag = mptcp_rtx_head(sk);
1020 if (dfrag && after64(snd_una, dfrag->data_seq)) {
1021 u64 delta = snd_una - dfrag->data_seq;
1022
1023
1024 if (unlikely(delta > dfrag->already_sent)) {
1025 if (WARN_ON_ONCE(!msk->recovery))
1026 goto out;
1027 if (WARN_ON_ONCE(delta > dfrag->data_len))
1028 goto out;
1029 dfrag->already_sent += delta - dfrag->already_sent;
1030 }
1031
1032 dfrag->data_seq += delta;
1033 dfrag->offset += delta;
1034 dfrag->data_len -= delta;
1035 dfrag->already_sent -= delta;
1036
1037 dfrag_uncharge(sk, delta);
1038 }
1039
1040
1041 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1042 msk->recovery = false;
1043
1044 out:
1045 if (snd_una == READ_ONCE(msk->snd_nxt) &&
1046 snd_una == READ_ONCE(msk->write_seq)) {
1047 if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
1048 mptcp_stop_timer(sk);
1049 } else {
1050 mptcp_reset_timer(sk);
1051 }
1052 }
1053
1054 static void __mptcp_clean_una_wakeup(struct sock *sk)
1055 {
1056 lockdep_assert_held_once(&sk->sk_lock.slock);
1057
1058 __mptcp_clean_una(sk);
1059 mptcp_write_space(sk);
1060 }
1061
1062 static void mptcp_clean_una_wakeup(struct sock *sk)
1063 {
1064 mptcp_data_lock(sk);
1065 __mptcp_clean_una_wakeup(sk);
1066 mptcp_data_unlock(sk);
1067 }
1068
1069 static void mptcp_enter_memory_pressure(struct sock *sk)
1070 {
1071 struct mptcp_subflow_context *subflow;
1072 struct mptcp_sock *msk = mptcp_sk(sk);
1073 bool first = true;
1074
1075 sk_stream_moderate_sndbuf(sk);
1076 mptcp_for_each_subflow(msk, subflow) {
1077 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1078
1079 if (first)
1080 tcp_enter_memory_pressure(ssk);
1081 sk_stream_moderate_sndbuf(ssk);
1082 first = false;
1083 }
1084 }
1085
1086
1087
1088
1089 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1090 {
1091 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
1092 pfrag, sk->sk_allocation)))
1093 return true;
1094
1095 mptcp_enter_memory_pressure(sk);
1096 return false;
1097 }
1098
1099 static struct mptcp_data_frag *
1100 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1101 int orig_offset)
1102 {
1103 int offset = ALIGN(orig_offset, sizeof(long));
1104 struct mptcp_data_frag *dfrag;
1105
1106 dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
1107 dfrag->data_len = 0;
1108 dfrag->data_seq = msk->write_seq;
1109 dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
1110 dfrag->offset = offset + sizeof(struct mptcp_data_frag);
1111 dfrag->already_sent = 0;
1112 dfrag->page = pfrag->page;
1113
1114 return dfrag;
1115 }
1116
1117 struct mptcp_sendmsg_info {
1118 int mss_now;
1119 int size_goal;
1120 u16 limit;
1121 u16 sent;
1122 unsigned int flags;
1123 bool data_lock_held;
1124 };
1125
1126 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk,
1127 u64 data_seq, int avail_size)
1128 {
1129 u64 window_end = mptcp_wnd_end(msk);
1130 u64 mptcp_snd_wnd;
1131
1132 if (__mptcp_check_fallback(msk))
1133 return avail_size;
1134
1135 mptcp_snd_wnd = window_end - data_seq;
1136 avail_size = min_t(unsigned int, mptcp_snd_wnd, avail_size);
1137
1138 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) {
1139 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd);
1140 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED);
1141 }
1142
1143 return avail_size;
1144 }
1145
1146 static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
1147 {
1148 struct skb_ext *mpext = __skb_ext_alloc(gfp);
1149
1150 if (!mpext)
1151 return false;
1152 __skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
1153 return true;
1154 }
1155
1156 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
1157 {
1158 struct sk_buff *skb;
1159
1160 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
1161 if (likely(skb)) {
1162 if (likely(__mptcp_add_ext(skb, gfp))) {
1163 skb_reserve(skb, MAX_TCP_HEADER);
1164 skb->ip_summed = CHECKSUM_PARTIAL;
1165 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
1166 return skb;
1167 }
1168 __kfree_skb(skb);
1169 } else {
1170 mptcp_enter_memory_pressure(sk);
1171 }
1172 return NULL;
1173 }
1174
1175 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1176 {
1177 struct sk_buff *skb;
1178
1179 skb = __mptcp_do_alloc_tx_skb(sk, gfp);
1180 if (!skb)
1181 return NULL;
1182
1183 if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1184 tcp_skb_entail(ssk, skb);
1185 return skb;
1186 }
1187 tcp_skb_tsorted_anchor_cleanup(skb);
1188 kfree_skb(skb);
1189 return NULL;
1190 }
1191
1192 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
1193 {
1194 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;
1195
1196 return __mptcp_alloc_tx_skb(sk, ssk, gfp);
1197 }
1198
1199
1200
1201
1202 static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
1203 {
1204 struct mptcp_ext *mpext = mptcp_get_ext(skb);
1205 __wsum csum = ~csum_unfold(mpext->csum);
1206 int offset = skb->len - added;
1207
1208 mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
1209 }
1210
1211 static void mptcp_update_infinite_map(struct mptcp_sock *msk,
1212 struct sock *ssk,
1213 struct mptcp_ext *mpext)
1214 {
1215 if (!mpext)
1216 return;
1217
1218 mpext->infinite_map = 1;
1219 mpext->data_len = 0;
1220
1221 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
1222 mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
1223 pr_fallback(msk);
1224 mptcp_do_fallback(ssk);
1225 }
1226
1227 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1228 struct mptcp_data_frag *dfrag,
1229 struct mptcp_sendmsg_info *info)
1230 {
1231 u64 data_seq = dfrag->data_seq + info->sent;
1232 int offset = dfrag->offset + info->sent;
1233 struct mptcp_sock *msk = mptcp_sk(sk);
1234 bool zero_window_probe = false;
1235 struct mptcp_ext *mpext = NULL;
1236 bool can_coalesce = false;
1237 bool reuse_skb = true;
1238 struct sk_buff *skb;
1239 size_t copy;
1240 int i;
1241
1242 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
1243 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1244
1245 if (WARN_ON_ONCE(info->sent > info->limit ||
1246 info->limit > dfrag->data_len))
1247 return 0;
1248
1249 if (unlikely(!__tcp_can_send(ssk)))
1250 return -EAGAIN;
1251
1252
1253 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1254 copy = info->size_goal;
1255
1256 skb = tcp_write_queue_tail(ssk);
1257 if (skb && copy > skb->len) {
1258
1259
1260
1261
1262
1263
1264 mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
1265 if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
1266 TCP_SKB_CB(skb)->eor = 1;
1267 goto alloc_skb;
1268 }
1269
1270 i = skb_shinfo(skb)->nr_frags;
1271 can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset);
1272 if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) {
1273 tcp_mark_push(tcp_sk(ssk), skb);
1274 goto alloc_skb;
1275 }
1276
1277 copy -= skb->len;
1278 } else {
1279 alloc_skb:
1280 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
1281 if (!skb)
1282 return -ENOMEM;
1283
1284 i = skb_shinfo(skb)->nr_frags;
1285 reuse_skb = false;
1286 mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
1287 }
1288
1289
1290 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy);
1291 if (copy == 0) {
1292 u64 snd_una = READ_ONCE(msk->snd_una);
1293
1294 if (snd_una != msk->snd_nxt) {
1295 tcp_remove_empty_skb(ssk);
1296 return 0;
1297 }
1298
1299 zero_window_probe = true;
1300 data_seq = snd_una - 1;
1301 copy = 1;
1302
1303
1304
1305
1306 WARN_ON_ONCE(reuse_skb);
1307 }
1308
1309 copy = min_t(size_t, copy, info->limit - info->sent);
1310 if (!sk_wmem_schedule(ssk, copy)) {
1311 tcp_remove_empty_skb(ssk);
1312 return -ENOMEM;
1313 }
1314
1315 if (can_coalesce) {
1316 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1317 } else {
1318 get_page(dfrag->page);
1319 skb_fill_page_desc(skb, i, dfrag->page, offset, copy);
1320 }
1321
1322 skb->len += copy;
1323 skb->data_len += copy;
1324 skb->truesize += copy;
1325 sk_wmem_queued_add(ssk, copy);
1326 sk_mem_charge(ssk, copy);
1327 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
1328 TCP_SKB_CB(skb)->end_seq += copy;
1329 tcp_skb_pcount_set(skb, 0);
1330
1331
1332 if (reuse_skb) {
1333 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1334 mpext->data_len += copy;
1335 WARN_ON_ONCE(zero_window_probe);
1336 goto out;
1337 }
1338
1339 memset(mpext, 0, sizeof(*mpext));
1340 mpext->data_seq = data_seq;
1341 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1342 mpext->data_len = copy;
1343 mpext->use_map = 1;
1344 mpext->dsn64 = 1;
1345
1346 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
1347 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
1348 mpext->dsn64);
1349
1350 if (zero_window_probe) {
1351 mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1352 mpext->frozen = 1;
1353 if (READ_ONCE(msk->csum_enabled))
1354 mptcp_update_data_checksum(skb, copy);
1355 tcp_push_pending_frames(ssk);
1356 return 0;
1357 }
1358 out:
1359 if (READ_ONCE(msk->csum_enabled))
1360 mptcp_update_data_checksum(skb, copy);
1361 if (mptcp_subflow_ctx(ssk)->send_infinite_map)
1362 mptcp_update_infinite_map(msk, ssk, mpext);
1363 trace_mptcp_sendmsg_frag(mpext);
1364 mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1365 return copy;
1366 }
1367
1368 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
1369 sizeof(struct tcphdr) - \
1370 MAX_TCP_OPTION_SPACE - \
1371 sizeof(struct ipv6hdr) - \
1372 sizeof(struct frag_hdr))
1373
1374 struct subflow_send_info {
1375 struct sock *ssk;
1376 u64 linger_time;
1377 };
1378
1379 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
1380 {
1381 if (!subflow->stale)
1382 return;
1383
1384 subflow->stale = 0;
1385 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
1386 }
1387
1388 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1389 {
1390 if (unlikely(subflow->stale)) {
1391 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);
1392
1393 if (subflow->stale_rcv_tstamp == rcv_tstamp)
1394 return false;
1395
1396 mptcp_subflow_set_active(subflow);
1397 }
1398 return __mptcp_subflow_active(subflow);
1399 }
1400
1401 #define SSK_MODE_ACTIVE 0
1402 #define SSK_MODE_BACKUP 1
1403 #define SSK_MODE_MAX 2
1404
1405
1406
1407
1408
1409 static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
1410 {
1411 struct subflow_send_info send_info[SSK_MODE_MAX];
1412 struct mptcp_subflow_context *subflow;
1413 struct sock *sk = (struct sock *)msk;
1414 u32 pace, burst, wmem;
1415 int i, nr_active = 0;
1416 struct sock *ssk;
1417 u64 linger_time;
1418 long tout = 0;
1419
1420 sock_owned_by_me(sk);
1421
1422 if (__mptcp_check_fallback(msk)) {
1423 if (!msk->first)
1424 return NULL;
1425 return __tcp_can_send(msk->first) &&
1426 sk_stream_memory_free(msk->first) ? msk->first : NULL;
1427 }
1428
1429
1430 if (msk->last_snd && msk->snd_burst > 0 &&
1431 sk_stream_memory_free(msk->last_snd) &&
1432 mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
1433 mptcp_set_timeout(sk);
1434 return msk->last_snd;
1435 }
1436
1437
1438 for (i = 0; i < SSK_MODE_MAX; ++i) {
1439 send_info[i].ssk = NULL;
1440 send_info[i].linger_time = -1;
1441 }
1442
1443 mptcp_for_each_subflow(msk, subflow) {
1444 trace_mptcp_subflow_get_send(subflow);
1445 ssk = mptcp_subflow_tcp_sock(subflow);
1446 if (!mptcp_subflow_active(subflow))
1447 continue;
1448
1449 tout = max(tout, mptcp_timeout_from_subflow(subflow));
1450 nr_active += !subflow->backup;
1451 pace = subflow->avg_pacing_rate;
1452 if (unlikely(!pace)) {
1453
1454 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
1455 pace = subflow->avg_pacing_rate;
1456 if (!pace)
1457 continue;
1458 }
1459
1460 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
1461 if (linger_time < send_info[subflow->backup].linger_time) {
1462 send_info[subflow->backup].ssk = ssk;
1463 send_info[subflow->backup].linger_time = linger_time;
1464 }
1465 }
1466 __mptcp_set_timeout(sk, tout);
1467
1468
1469 if (!nr_active)
1470 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 ssk = send_info[SSK_MODE_ACTIVE].ssk;
1484 if (!ssk || !sk_stream_memory_free(ssk))
1485 return NULL;
1486
1487 burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
1488 wmem = READ_ONCE(ssk->sk_wmem_queued);
1489 if (!burst) {
1490 msk->last_snd = NULL;
1491 return ssk;
1492 }
1493
1494 subflow = mptcp_subflow_ctx(ssk);
1495 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
1496 READ_ONCE(ssk->sk_pacing_rate) * burst,
1497 burst + wmem);
1498 msk->last_snd = ssk;
1499 msk->snd_burst = burst;
1500 return ssk;
1501 }
1502
1503 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
1504 {
1505 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1506 release_sock(ssk);
1507 }
1508
1509 static void mptcp_update_post_push(struct mptcp_sock *msk,
1510 struct mptcp_data_frag *dfrag,
1511 u32 sent)
1512 {
1513 u64 snd_nxt_new = dfrag->data_seq;
1514
1515 dfrag->already_sent += sent;
1516
1517 msk->snd_burst -= sent;
1518
1519 snd_nxt_new += dfrag->already_sent;
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 if (likely(after64(snd_nxt_new, msk->snd_nxt)))
1531 msk->snd_nxt = snd_nxt_new;
1532 }
1533
1534 void mptcp_check_and_set_pending(struct sock *sk)
1535 {
1536 if (mptcp_send_head(sk))
1537 mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING);
1538 }
1539
1540 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
1541 {
1542 struct sock *prev_ssk = NULL, *ssk = NULL;
1543 struct mptcp_sock *msk = mptcp_sk(sk);
1544 struct mptcp_sendmsg_info info = {
1545 .flags = flags,
1546 };
1547 struct mptcp_data_frag *dfrag;
1548 int len, copied = 0;
1549
1550 while ((dfrag = mptcp_send_head(sk))) {
1551 info.sent = dfrag->already_sent;
1552 info.limit = dfrag->data_len;
1553 len = dfrag->data_len - dfrag->already_sent;
1554 while (len > 0) {
1555 int ret = 0;
1556
1557 prev_ssk = ssk;
1558 ssk = mptcp_subflow_get_send(msk);
1559
1560
1561
1562
1563 if (ssk != prev_ssk && prev_ssk)
1564 mptcp_push_release(prev_ssk, &info);
1565 if (!ssk)
1566 goto out;
1567
1568
1569
1570
1571
1572 if (ssk != prev_ssk)
1573 lock_sock(ssk);
1574
1575 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
1576 if (ret <= 0) {
1577 if (ret == -EAGAIN)
1578 continue;
1579 mptcp_push_release(ssk, &info);
1580 goto out;
1581 }
1582
1583 info.sent += ret;
1584 copied += ret;
1585 len -= ret;
1586
1587 mptcp_update_post_push(msk, dfrag, ret);
1588 }
1589 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1590 }
1591
1592
1593 if (ssk)
1594 mptcp_push_release(ssk, &info);
1595
1596 out:
1597
1598 if (!mptcp_timer_pending(sk))
1599 mptcp_reset_timer(sk);
1600 if (copied)
1601 __mptcp_check_send_data_fin(sk);
1602 }
1603
1604 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
1605 {
1606 struct mptcp_sock *msk = mptcp_sk(sk);
1607 struct mptcp_sendmsg_info info = {
1608 .data_lock_held = true,
1609 };
1610 struct mptcp_data_frag *dfrag;
1611 struct sock *xmit_ssk;
1612 int len, copied = 0;
1613 bool first = true;
1614
1615 info.flags = 0;
1616 while ((dfrag = mptcp_send_head(sk))) {
1617 info.sent = dfrag->already_sent;
1618 info.limit = dfrag->data_len;
1619 len = dfrag->data_len - dfrag->already_sent;
1620 while (len > 0) {
1621 int ret = 0;
1622
1623
1624
1625
1626
1627 xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk));
1628 if (!xmit_ssk)
1629 goto out;
1630 if (xmit_ssk != ssk) {
1631 mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
1632 MPTCP_DELEGATE_SEND);
1633 goto out;
1634 }
1635
1636 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
1637 if (ret <= 0)
1638 goto out;
1639
1640 info.sent += ret;
1641 copied += ret;
1642 len -= ret;
1643 first = false;
1644
1645 mptcp_update_post_push(msk, dfrag, ret);
1646 }
1647 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1648 }
1649
1650 out:
1651
1652
1653
1654 if (copied) {
1655 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1656 info.size_goal);
1657 if (!mptcp_timer_pending(sk))
1658 mptcp_reset_timer(sk);
1659
1660 if (msk->snd_data_fin_enable &&
1661 msk->snd_nxt + 1 == msk->write_seq)
1662 mptcp_schedule_work(sk);
1663 }
1664 }
1665
1666 static void mptcp_set_nospace(struct sock *sk)
1667 {
1668
1669 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1670
1671
1672 set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
1673 }
1674
1675 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1676 {
1677 struct mptcp_sock *msk = mptcp_sk(sk);
1678 struct page_frag *pfrag;
1679 size_t copied = 0;
1680 int ret = 0;
1681 long timeo;
1682
1683
1684 if (msg->msg_flags & MSG_FASTOPEN)
1685 return -EOPNOTSUPP;
1686
1687
1688 msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL;
1689
1690 lock_sock(sk);
1691
1692 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1693
1694 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1695 ret = sk_stream_wait_connect(sk, &timeo);
1696 if (ret)
1697 goto out;
1698 }
1699
1700 pfrag = sk_page_frag(sk);
1701
1702 while (msg_data_left(msg)) {
1703 int total_ts, frag_truesize = 0;
1704 struct mptcp_data_frag *dfrag;
1705 bool dfrag_collapsed;
1706 size_t psize, offset;
1707
1708 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
1709 ret = -EPIPE;
1710 goto out;
1711 }
1712
1713
1714
1715
1716 dfrag = mptcp_pending_tail(sk);
1717 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1718 if (!dfrag_collapsed) {
1719 if (!sk_stream_memory_free(sk))
1720 goto wait_for_memory;
1721
1722 if (!mptcp_page_frag_refill(sk, pfrag))
1723 goto wait_for_memory;
1724
1725 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1726 frag_truesize = dfrag->overhead;
1727 }
1728
1729
1730
1731
1732
1733 offset = dfrag->offset + dfrag->data_len;
1734 psize = pfrag->size - offset;
1735 psize = min_t(size_t, psize, msg_data_left(msg));
1736 total_ts = psize + frag_truesize;
1737
1738 if (!sk_wmem_schedule(sk, total_ts))
1739 goto wait_for_memory;
1740
1741 if (copy_page_from_iter(dfrag->page, offset, psize,
1742 &msg->msg_iter) != psize) {
1743 ret = -EFAULT;
1744 goto out;
1745 }
1746
1747
1748 sk->sk_forward_alloc -= total_ts;
1749 copied += psize;
1750 dfrag->data_len += psize;
1751 frag_truesize += psize;
1752 pfrag->offset += frag_truesize;
1753 WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
1754
1755
1756
1757
1758 sk_wmem_queued_add(sk, frag_truesize);
1759 if (!dfrag_collapsed) {
1760 get_page(dfrag->page);
1761 list_add_tail(&dfrag->list, &msk->rtx_queue);
1762 if (!msk->first_pending)
1763 WRITE_ONCE(msk->first_pending, dfrag);
1764 }
1765 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
1766 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
1767 !dfrag_collapsed);
1768
1769 continue;
1770
1771 wait_for_memory:
1772 mptcp_set_nospace(sk);
1773 __mptcp_push_pending(sk, msg->msg_flags);
1774 ret = sk_stream_wait_memory(sk, &timeo);
1775 if (ret)
1776 goto out;
1777 }
1778
1779 if (copied)
1780 __mptcp_push_pending(sk, msg->msg_flags);
1781
1782 out:
1783 release_sock(sk);
1784 return copied ? : ret;
1785 }
1786
1787 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
1788 struct msghdr *msg,
1789 size_t len, int flags,
1790 struct scm_timestamping_internal *tss,
1791 int *cmsg_flags)
1792 {
1793 struct sk_buff *skb, *tmp;
1794 int copied = 0;
1795
1796 skb_queue_walk_safe(&msk->receive_queue, skb, tmp) {
1797 u32 offset = MPTCP_SKB_CB(skb)->offset;
1798 u32 data_len = skb->len - offset;
1799 u32 count = min_t(size_t, len - copied, data_len);
1800 int err;
1801
1802 if (!(flags & MSG_TRUNC)) {
1803 err = skb_copy_datagram_msg(skb, offset, msg, count);
1804 if (unlikely(err < 0)) {
1805 if (!copied)
1806 return err;
1807 break;
1808 }
1809 }
1810
1811 if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
1812 tcp_update_recv_tstamps(skb, tss);
1813 *cmsg_flags |= MPTCP_CMSG_TS;
1814 }
1815
1816 copied += count;
1817
1818 if (count < data_len) {
1819 if (!(flags & MSG_PEEK)) {
1820 MPTCP_SKB_CB(skb)->offset += count;
1821 MPTCP_SKB_CB(skb)->map_seq += count;
1822 }
1823 break;
1824 }
1825
1826 if (!(flags & MSG_PEEK)) {
1827
1828 skb->destructor = NULL;
1829 WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
1830 __skb_unlink(skb, &msk->receive_queue);
1831 __kfree_skb(skb);
1832 }
1833
1834 if (copied >= len)
1835 break;
1836 }
1837
1838 return copied;
1839 }
1840
1841
1842
1843
1844
1845 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1846 {
1847 struct mptcp_subflow_context *subflow;
1848 struct sock *sk = (struct sock *)msk;
1849 u32 time, advmss = 1;
1850 u64 rtt_us, mstamp;
1851
1852 sock_owned_by_me(sk);
1853
1854 if (copied <= 0)
1855 return;
1856
1857 msk->rcvq_space.copied += copied;
1858
1859 mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
1860 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1861
1862 rtt_us = msk->rcvq_space.rtt_us;
1863 if (rtt_us && time < (rtt_us >> 3))
1864 return;
1865
1866 rtt_us = 0;
1867 mptcp_for_each_subflow(msk, subflow) {
1868 const struct tcp_sock *tp;
1869 u64 sf_rtt_us;
1870 u32 sf_advmss;
1871
1872 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
1873
1874 sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
1875 sf_advmss = READ_ONCE(tp->advmss);
1876
1877 rtt_us = max(sf_rtt_us, rtt_us);
1878 advmss = max(sf_advmss, advmss);
1879 }
1880
1881 msk->rcvq_space.rtt_us = rtt_us;
1882 if (time < (rtt_us >> 3) || rtt_us == 0)
1883 return;
1884
1885 if (msk->rcvq_space.copied <= msk->rcvq_space.space)
1886 goto new_measure;
1887
1888 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
1889 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
1890 int rcvmem, rcvbuf;
1891 u64 rcvwin, grow;
1892
1893 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
1894
1895 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
1896
1897 do_div(grow, msk->rcvq_space.space);
1898 rcvwin += (grow << 1);
1899
1900 rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
1901 while (tcp_win_from_space(sk, rcvmem) < advmss)
1902 rcvmem += 128;
1903
1904 do_div(rcvwin, advmss);
1905 rcvbuf = min_t(u64, rcvwin * rcvmem,
1906 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
1907
1908 if (rcvbuf > sk->sk_rcvbuf) {
1909 u32 window_clamp;
1910
1911 window_clamp = tcp_win_from_space(sk, rcvbuf);
1912 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
1913
1914
1915
1916
1917
1918
1919 mptcp_for_each_subflow(msk, subflow) {
1920 struct sock *ssk;
1921 bool slow;
1922
1923 ssk = mptcp_subflow_tcp_sock(subflow);
1924 slow = lock_sock_fast(ssk);
1925 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
1926 tcp_sk(ssk)->window_clamp = window_clamp;
1927 tcp_cleanup_rbuf(ssk, 1);
1928 unlock_sock_fast(ssk, slow);
1929 }
1930 }
1931 }
1932
1933 msk->rcvq_space.space = msk->rcvq_space.copied;
1934 new_measure:
1935 msk->rcvq_space.copied = 0;
1936 msk->rcvq_space.time = mstamp;
1937 }
1938
1939 static void __mptcp_update_rmem(struct sock *sk)
1940 {
1941 struct mptcp_sock *msk = mptcp_sk(sk);
1942
1943 if (!msk->rmem_released)
1944 return;
1945
1946 atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
1947 mptcp_rmem_uncharge(sk, msk->rmem_released);
1948 WRITE_ONCE(msk->rmem_released, 0);
1949 }
1950
1951 static void __mptcp_splice_receive_queue(struct sock *sk)
1952 {
1953 struct mptcp_sock *msk = mptcp_sk(sk);
1954
1955 skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
1956 }
1957
1958 static bool __mptcp_move_skbs(struct mptcp_sock *msk)
1959 {
1960 struct sock *sk = (struct sock *)msk;
1961 unsigned int moved = 0;
1962 bool ret, done;
1963
1964 do {
1965 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1966 bool slowpath;
1967
1968
1969
1970
1971
1972 if (likely(!ssk))
1973 break;
1974
1975 slowpath = lock_sock_fast(ssk);
1976 mptcp_data_lock(sk);
1977 __mptcp_update_rmem(sk);
1978 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
1979 mptcp_data_unlock(sk);
1980
1981 if (unlikely(ssk->sk_err))
1982 __mptcp_error_report(sk);
1983 unlock_sock_fast(ssk, slowpath);
1984 } while (!done);
1985
1986
1987 ret = moved > 0;
1988 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
1989 !skb_queue_empty_lockless(&sk->sk_receive_queue)) {
1990 mptcp_data_lock(sk);
1991 __mptcp_update_rmem(sk);
1992 ret |= __mptcp_ofo_queue(msk);
1993 __mptcp_splice_receive_queue(sk);
1994 mptcp_data_unlock(sk);
1995 }
1996 if (ret)
1997 mptcp_check_data_fin((struct sock *)msk);
1998 return !skb_queue_empty(&msk->receive_queue);
1999 }
2000
2001 static unsigned int mptcp_inq_hint(const struct sock *sk)
2002 {
2003 const struct mptcp_sock *msk = mptcp_sk(sk);
2004 const struct sk_buff *skb;
2005
2006 skb = skb_peek(&msk->receive_queue);
2007 if (skb) {
2008 u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
2009
2010 if (hint_val >= INT_MAX)
2011 return INT_MAX;
2012
2013 return (unsigned int)hint_val;
2014 }
2015
2016 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
2017 return 1;
2018
2019 return 0;
2020 }
2021
2022 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2023 int flags, int *addr_len)
2024 {
2025 struct mptcp_sock *msk = mptcp_sk(sk);
2026 struct scm_timestamping_internal tss;
2027 int copied = 0, cmsg_flags = 0;
2028 int target;
2029 long timeo;
2030
2031
2032 if (unlikely(flags & MSG_ERRQUEUE))
2033 return inet_recv_error(sk, msg, len, addr_len);
2034
2035 lock_sock(sk);
2036 if (unlikely(sk->sk_state == TCP_LISTEN)) {
2037 copied = -ENOTCONN;
2038 goto out_err;
2039 }
2040
2041 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2042
2043 len = min_t(size_t, len, INT_MAX);
2044 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2045
2046 if (unlikely(msk->recvmsg_inq))
2047 cmsg_flags = MPTCP_CMSG_INQ;
2048
2049 while (copied < len) {
2050 int bytes_read;
2051
2052 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
2053 if (unlikely(bytes_read < 0)) {
2054 if (!copied)
2055 copied = bytes_read;
2056 goto out_err;
2057 }
2058
2059 copied += bytes_read;
2060
2061
2062 mptcp_cleanup_rbuf(msk);
2063
2064 if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
2065 continue;
2066
2067
2068
2069
2070 if (copied >= target)
2071 break;
2072
2073 if (copied) {
2074 if (sk->sk_err ||
2075 sk->sk_state == TCP_CLOSE ||
2076 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2077 !timeo ||
2078 signal_pending(current))
2079 break;
2080 } else {
2081 if (sk->sk_err) {
2082 copied = sock_error(sk);
2083 break;
2084 }
2085
2086 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
2087 mptcp_check_for_eof(msk);
2088
2089 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2090
2091
2092
2093 if (__mptcp_move_skbs(msk))
2094 continue;
2095 break;
2096 }
2097
2098 if (sk->sk_state == TCP_CLOSE) {
2099 copied = -ENOTCONN;
2100 break;
2101 }
2102
2103 if (!timeo) {
2104 copied = -EAGAIN;
2105 break;
2106 }
2107
2108 if (signal_pending(current)) {
2109 copied = sock_intr_errno(timeo);
2110 break;
2111 }
2112 }
2113
2114 pr_debug("block timeout %ld", timeo);
2115 sk_wait_data(sk, &timeo, NULL);
2116 }
2117
2118 out_err:
2119 if (cmsg_flags && copied >= 0) {
2120 if (cmsg_flags & MPTCP_CMSG_TS)
2121 tcp_recv_timestamp(msg, sk, &tss);
2122
2123 if (cmsg_flags & MPTCP_CMSG_INQ) {
2124 unsigned int inq = mptcp_inq_hint(sk);
2125
2126 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2127 }
2128 }
2129
2130 pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
2131 msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
2132 skb_queue_empty(&msk->receive_queue), copied);
2133 if (!(flags & MSG_PEEK))
2134 mptcp_rcv_space_adjust(msk, copied);
2135
2136 release_sock(sk);
2137 return copied;
2138 }
2139
2140 static void mptcp_retransmit_timer(struct timer_list *t)
2141 {
2142 struct inet_connection_sock *icsk = from_timer(icsk, t,
2143 icsk_retransmit_timer);
2144 struct sock *sk = &icsk->icsk_inet.sk;
2145 struct mptcp_sock *msk = mptcp_sk(sk);
2146
2147 bh_lock_sock(sk);
2148 if (!sock_owned_by_user(sk)) {
2149
2150 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
2151 mptcp_schedule_work(sk);
2152 } else {
2153
2154 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags);
2155 }
2156 bh_unlock_sock(sk);
2157 sock_put(sk);
2158 }
2159
2160 static void mptcp_timeout_timer(struct timer_list *t)
2161 {
2162 struct sock *sk = from_timer(sk, t, sk_timer);
2163
2164 mptcp_schedule_work(sk);
2165 sock_put(sk);
2166 }
2167
2168
2169
2170
2171
2172
2173 static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
2174 {
2175 struct sock *backup = NULL, *pick = NULL;
2176 struct mptcp_subflow_context *subflow;
2177 int min_stale_count = INT_MAX;
2178
2179 sock_owned_by_me((const struct sock *)msk);
2180
2181 if (__mptcp_check_fallback(msk))
2182 return NULL;
2183
2184 mptcp_for_each_subflow(msk, subflow) {
2185 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2186
2187 if (!__mptcp_subflow_active(subflow))
2188 continue;
2189
2190
2191 if (!tcp_rtx_and_write_queues_empty(ssk)) {
2192 mptcp_pm_subflow_chk_stale(msk, ssk);
2193 min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
2194 continue;
2195 }
2196
2197 if (subflow->backup) {
2198 if (!backup)
2199 backup = ssk;
2200 continue;
2201 }
2202
2203 if (!pick)
2204 pick = ssk;
2205 }
2206
2207 if (pick)
2208 return pick;
2209
2210
2211 return min_stale_count > 1 ? backup : NULL;
2212 }
2213
2214 static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
2215 {
2216 if (msk->subflow) {
2217 iput(SOCK_INODE(msk->subflow));
2218 msk->subflow = NULL;
2219 }
2220 }
2221
2222 bool __mptcp_retransmit_pending_data(struct sock *sk)
2223 {
2224 struct mptcp_data_frag *cur, *rtx_head;
2225 struct mptcp_sock *msk = mptcp_sk(sk);
2226
2227 if (__mptcp_check_fallback(mptcp_sk(sk)))
2228 return false;
2229
2230 if (tcp_rtx_and_write_queues_empty(sk))
2231 return false;
2232
2233
2234
2235
2236
2237 mptcp_data_lock(sk);
2238 __mptcp_clean_una_wakeup(sk);
2239 rtx_head = mptcp_rtx_head(sk);
2240 if (!rtx_head) {
2241 mptcp_data_unlock(sk);
2242 return false;
2243 }
2244
2245 msk->recovery_snd_nxt = msk->snd_nxt;
2246 msk->recovery = true;
2247 mptcp_data_unlock(sk);
2248
2249 msk->first_pending = rtx_head;
2250 msk->snd_burst = 0;
2251
2252
2253 list_for_each_entry(cur, &msk->rtx_queue, list) {
2254 if (!cur->already_sent)
2255 break;
2256 cur->already_sent = 0;
2257 }
2258
2259 return true;
2260 }
2261
2262
2263 #define MPTCP_CF_PUSH BIT(1)
2264 #define MPTCP_CF_FASTCLOSE BIT(2)
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2275 struct mptcp_subflow_context *subflow,
2276 unsigned int flags)
2277 {
2278 struct mptcp_sock *msk = mptcp_sk(sk);
2279 bool need_push, dispose_it;
2280
2281 dispose_it = !msk->subflow || ssk != msk->subflow->sk;
2282 if (dispose_it)
2283 list_del(&subflow->node);
2284
2285 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2286
2287 if (flags & MPTCP_CF_FASTCLOSE)
2288 subflow->send_fastclose = 1;
2289
2290 need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
2291 if (!dispose_it) {
2292 tcp_disconnect(ssk, 0);
2293 msk->subflow->state = SS_UNCONNECTED;
2294 mptcp_subflow_ctx_reset(subflow);
2295 release_sock(ssk);
2296
2297 goto out;
2298 }
2299
2300
2301
2302
2303 if (ssk->sk_socket)
2304 sock_orphan(ssk);
2305
2306 subflow->disposable = 1;
2307
2308
2309
2310
2311
2312 if (!inet_csk(ssk)->icsk_ulp_ops) {
2313 kfree_rcu(subflow, rcu);
2314 } else {
2315
2316 if (ssk->sk_state == TCP_LISTEN) {
2317 tcp_set_state(ssk, TCP_CLOSE);
2318 mptcp_subflow_queue_clean(ssk);
2319 inet_csk_listen_stop(ssk);
2320 }
2321 __tcp_close(ssk, 0);
2322
2323
2324 __sock_put(ssk);
2325 }
2326 release_sock(ssk);
2327
2328 sock_put(ssk);
2329
2330 if (ssk == msk->first)
2331 msk->first = NULL;
2332
2333 out:
2334 if (ssk == msk->last_snd)
2335 msk->last_snd = NULL;
2336
2337 if (need_push)
2338 __mptcp_push_pending(sk, 0);
2339 }
2340
2341 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2342 struct mptcp_subflow_context *subflow)
2343 {
2344 if (sk->sk_state == TCP_ESTABLISHED)
2345 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
2346
2347
2348
2349
2350 mptcp_pm_subflow_check_next(mptcp_sk(sk), ssk, subflow);
2351
2352 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
2353 }
2354
2355 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
2356 {
2357 return 0;
2358 }
2359
2360 static void __mptcp_close_subflow(struct mptcp_sock *msk)
2361 {
2362 struct mptcp_subflow_context *subflow, *tmp;
2363
2364 might_sleep();
2365
2366 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
2367 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2368
2369 if (inet_sk_state_load(ssk) != TCP_CLOSE)
2370 continue;
2371
2372
2373 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
2374 continue;
2375
2376 mptcp_close_ssk((struct sock *)msk, ssk, subflow);
2377 }
2378 }
2379
2380 static bool mptcp_check_close_timeout(const struct sock *sk)
2381 {
2382 s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
2383 struct mptcp_subflow_context *subflow;
2384
2385 if (delta >= TCP_TIMEWAIT_LEN)
2386 return true;
2387
2388
2389
2390
2391 mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
2392 if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
2393 TCP_CLOSE)
2394 return false;
2395 }
2396 return true;
2397 }
2398
2399 static void mptcp_check_fastclose(struct mptcp_sock *msk)
2400 {
2401 struct mptcp_subflow_context *subflow, *tmp;
2402 struct sock *sk = &msk->sk.icsk_inet.sk;
2403
2404 if (likely(!READ_ONCE(msk->rcv_fastclose)))
2405 return;
2406
2407 mptcp_token_destroy(msk);
2408
2409 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
2410 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2411 bool slow;
2412
2413 slow = lock_sock_fast(tcp_sk);
2414 if (tcp_sk->sk_state != TCP_CLOSE) {
2415 tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
2416 tcp_set_state(tcp_sk, TCP_CLOSE);
2417 }
2418 unlock_sock_fast(tcp_sk, slow);
2419 }
2420
2421 inet_sk_state_store(sk, TCP_CLOSE);
2422 sk->sk_shutdown = SHUTDOWN_MASK;
2423 smp_mb__before_atomic();
2424 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2425
2426 mptcp_close_wake_up(sk);
2427 }
2428
2429 static void __mptcp_retrans(struct sock *sk)
2430 {
2431 struct mptcp_sock *msk = mptcp_sk(sk);
2432 struct mptcp_sendmsg_info info = {};
2433 struct mptcp_data_frag *dfrag;
2434 size_t copied = 0;
2435 struct sock *ssk;
2436 int ret;
2437
2438 mptcp_clean_una_wakeup(sk);
2439
2440
2441 ssk = mptcp_subflow_get_retrans(msk);
2442 dfrag = mptcp_rtx_head(sk);
2443 if (!dfrag) {
2444 if (mptcp_data_fin_enabled(msk)) {
2445 struct inet_connection_sock *icsk = inet_csk(sk);
2446
2447 icsk->icsk_retransmits++;
2448 mptcp_set_datafin_timeout(sk);
2449 mptcp_send_ack(msk);
2450
2451 goto reset_timer;
2452 }
2453
2454 if (!mptcp_send_head(sk))
2455 return;
2456
2457 goto reset_timer;
2458 }
2459
2460 if (!ssk)
2461 goto reset_timer;
2462
2463 lock_sock(ssk);
2464
2465
2466 info.sent = 0;
2467 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
2468 while (info.sent < info.limit) {
2469 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2470 if (ret <= 0)
2471 break;
2472
2473 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2474 copied += ret;
2475 info.sent += ret;
2476 }
2477 if (copied) {
2478 dfrag->already_sent = max(dfrag->already_sent, info.sent);
2479 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2480 info.size_goal);
2481 WRITE_ONCE(msk->allow_infinite_fallback, false);
2482 }
2483
2484 release_sock(ssk);
2485
2486 reset_timer:
2487 mptcp_check_and_set_pending(sk);
2488
2489 if (!mptcp_timer_pending(sk))
2490 mptcp_reset_timer(sk);
2491 }
2492
2493
2494
2495
2496 void mptcp_reset_timeout(struct mptcp_sock *msk, unsigned long fail_tout)
2497 {
2498 struct sock *sk = (struct sock *)msk;
2499 unsigned long timeout, close_timeout;
2500
2501 if (!fail_tout && !sock_flag(sk, SOCK_DEAD))
2502 return;
2503
2504 close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN;
2505
2506
2507
2508
2509 timeout = sock_flag(sk, SOCK_DEAD) ? close_timeout : fail_tout;
2510
2511 sk_reset_timer(sk, &sk->sk_timer, timeout);
2512 }
2513
2514 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
2515 {
2516 struct sock *ssk = msk->first;
2517 bool slow;
2518
2519 if (!ssk)
2520 return;
2521
2522 pr_debug("MP_FAIL doesn't respond, reset the subflow");
2523
2524 slow = lock_sock_fast(ssk);
2525 mptcp_subflow_reset(ssk);
2526 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
2527 unlock_sock_fast(ssk, slow);
2528
2529 mptcp_reset_timeout(msk, 0);
2530 }
2531
2532 static void mptcp_worker(struct work_struct *work)
2533 {
2534 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2535 struct sock *sk = &msk->sk.icsk_inet.sk;
2536 unsigned long fail_tout;
2537 int state;
2538
2539 lock_sock(sk);
2540 state = sk->sk_state;
2541 if (unlikely(state == TCP_CLOSE))
2542 goto unlock;
2543
2544 mptcp_check_data_fin_ack(sk);
2545
2546 mptcp_check_fastclose(msk);
2547
2548 mptcp_pm_nl_work(msk);
2549
2550 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
2551 mptcp_check_for_eof(msk);
2552
2553 __mptcp_check_send_data_fin(sk);
2554 mptcp_check_data_fin(sk);
2555
2556
2557
2558
2559
2560 if (sock_flag(sk, SOCK_DEAD) &&
2561 (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) {
2562 inet_sk_state_store(sk, TCP_CLOSE);
2563 __mptcp_destroy_sock(sk);
2564 goto unlock;
2565 }
2566
2567 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2568 __mptcp_close_subflow(msk);
2569
2570 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
2571 __mptcp_retrans(sk);
2572
2573 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0;
2574 if (fail_tout && time_after(jiffies, fail_tout))
2575 mptcp_mp_fail_no_response(msk);
2576
2577 unlock:
2578 release_sock(sk);
2579 sock_put(sk);
2580 }
2581
2582 static int __mptcp_init_sock(struct sock *sk)
2583 {
2584 struct mptcp_sock *msk = mptcp_sk(sk);
2585
2586 INIT_LIST_HEAD(&msk->conn_list);
2587 INIT_LIST_HEAD(&msk->join_list);
2588 INIT_LIST_HEAD(&msk->rtx_queue);
2589 INIT_WORK(&msk->work, mptcp_worker);
2590 __skb_queue_head_init(&msk->receive_queue);
2591 msk->out_of_order_queue = RB_ROOT;
2592 msk->first_pending = NULL;
2593 msk->rmem_fwd_alloc = 0;
2594 WRITE_ONCE(msk->rmem_released, 0);
2595 msk->timer_ival = TCP_RTO_MIN;
2596
2597 msk->first = NULL;
2598 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
2599 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
2600 WRITE_ONCE(msk->allow_infinite_fallback, true);
2601 msk->recovery = false;
2602
2603 mptcp_pm_data_init(msk);
2604
2605
2606 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
2607 timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
2608
2609 return 0;
2610 }
2611
2612 static void mptcp_ca_reset(struct sock *sk)
2613 {
2614 struct inet_connection_sock *icsk = inet_csk(sk);
2615
2616 tcp_assign_congestion_control(sk);
2617 strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
2618
2619
2620 tcp_cleanup_congestion_control(sk);
2621 icsk->icsk_ca_ops = NULL;
2622 }
2623
2624 static int mptcp_init_sock(struct sock *sk)
2625 {
2626 struct net *net = sock_net(sk);
2627 int ret;
2628
2629 ret = __mptcp_init_sock(sk);
2630 if (ret)
2631 return ret;
2632
2633 if (!mptcp_is_enabled(net))
2634 return -ENOPROTOOPT;
2635
2636 if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
2637 return -ENOMEM;
2638
2639 ret = __mptcp_socket_create(mptcp_sk(sk));
2640 if (ret)
2641 return ret;
2642
2643
2644
2645
2646 mptcp_ca_reset(sk);
2647
2648 sk_sockets_allocated_inc(sk);
2649 sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
2650 sk->sk_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
2651
2652 return 0;
2653 }
2654
2655 static void __mptcp_clear_xmit(struct sock *sk)
2656 {
2657 struct mptcp_sock *msk = mptcp_sk(sk);
2658 struct mptcp_data_frag *dtmp, *dfrag;
2659
2660 WRITE_ONCE(msk->first_pending, NULL);
2661 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
2662 dfrag_clear(sk, dfrag);
2663 }
2664
2665 void mptcp_cancel_work(struct sock *sk)
2666 {
2667 struct mptcp_sock *msk = mptcp_sk(sk);
2668
2669 if (cancel_work_sync(&msk->work))
2670 __sock_put(sk);
2671 }
2672
2673 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
2674 {
2675 lock_sock(ssk);
2676
2677 switch (ssk->sk_state) {
2678 case TCP_LISTEN:
2679 if (!(how & RCV_SHUTDOWN))
2680 break;
2681 fallthrough;
2682 case TCP_SYN_SENT:
2683 tcp_disconnect(ssk, O_NONBLOCK);
2684 break;
2685 default:
2686 if (__mptcp_check_fallback(mptcp_sk(sk))) {
2687 pr_debug("Fallback");
2688 ssk->sk_shutdown |= how;
2689 tcp_shutdown(ssk, how);
2690 } else {
2691 pr_debug("Sending DATA_FIN on subflow %p", ssk);
2692 tcp_send_ack(ssk);
2693 if (!mptcp_timer_pending(sk))
2694 mptcp_reset_timer(sk);
2695 }
2696 break;
2697 }
2698
2699 release_sock(ssk);
2700 }
2701
2702 static const unsigned char new_state[16] = {
2703
2704 [0 ] = TCP_CLOSE,
2705 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2706 [TCP_SYN_SENT] = TCP_CLOSE,
2707 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2708 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
2709 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
2710 [TCP_TIME_WAIT] = TCP_CLOSE,
2711 [TCP_CLOSE] = TCP_CLOSE,
2712 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
2713 [TCP_LAST_ACK] = TCP_LAST_ACK,
2714 [TCP_LISTEN] = TCP_CLOSE,
2715 [TCP_CLOSING] = TCP_CLOSING,
2716 [TCP_NEW_SYN_RECV] = TCP_CLOSE,
2717 };
2718
2719 static int mptcp_close_state(struct sock *sk)
2720 {
2721 int next = (int)new_state[sk->sk_state];
2722 int ns = next & TCP_STATE_MASK;
2723
2724 inet_sk_state_store(sk, ns);
2725
2726 return next & TCP_ACTION_FIN;
2727 }
2728
2729 static void __mptcp_check_send_data_fin(struct sock *sk)
2730 {
2731 struct mptcp_subflow_context *subflow;
2732 struct mptcp_sock *msk = mptcp_sk(sk);
2733
2734 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
2735 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
2736 msk->snd_nxt, msk->write_seq);
2737
2738
2739
2740
2741 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
2742 mptcp_send_head(sk))
2743 return;
2744
2745 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
2746
2747
2748
2749
2750 if (__mptcp_check_fallback(msk)) {
2751 WRITE_ONCE(msk->snd_una, msk->write_seq);
2752 if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
2753 inet_sk_state_store(sk, TCP_CLOSE);
2754 mptcp_close_wake_up(sk);
2755 } else if (sk->sk_state == TCP_FIN_WAIT1) {
2756 inet_sk_state_store(sk, TCP_FIN_WAIT2);
2757 }
2758 }
2759
2760 mptcp_for_each_subflow(msk, subflow) {
2761 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2762
2763 mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
2764 }
2765 }
2766
2767 static void __mptcp_wr_shutdown(struct sock *sk)
2768 {
2769 struct mptcp_sock *msk = mptcp_sk(sk);
2770
2771 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
2772 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
2773 !!mptcp_send_head(sk));
2774
2775
2776 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2777 WRITE_ONCE(msk->snd_data_fin_enable, 1);
2778
2779 __mptcp_check_send_data_fin(sk);
2780 }
2781
2782 static void __mptcp_destroy_sock(struct sock *sk)
2783 {
2784 struct mptcp_sock *msk = mptcp_sk(sk);
2785
2786 pr_debug("msk=%p", msk);
2787
2788 might_sleep();
2789
2790 mptcp_stop_timer(sk);
2791 sk_stop_timer(sk, &sk->sk_timer);
2792 msk->pm.status = 0;
2793
2794 sk->sk_prot->destroy(sk);
2795
2796 WARN_ON_ONCE(msk->rmem_fwd_alloc);
2797 WARN_ON_ONCE(msk->rmem_released);
2798 sk_stream_kill_queues(sk);
2799 xfrm_sk_free_policy(sk);
2800
2801 sk_refcnt_debug_release(sk);
2802 sock_put(sk);
2803 }
2804
2805 bool __mptcp_close(struct sock *sk, long timeout)
2806 {
2807 struct mptcp_subflow_context *subflow;
2808 struct mptcp_sock *msk = mptcp_sk(sk);
2809 bool do_cancel_work = false;
2810
2811 sk->sk_shutdown = SHUTDOWN_MASK;
2812
2813 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
2814 inet_sk_state_store(sk, TCP_CLOSE);
2815 goto cleanup;
2816 }
2817
2818 if (mptcp_close_state(sk))
2819 __mptcp_wr_shutdown(sk);
2820
2821 sk_stream_wait_close(sk, timeout);
2822
2823 cleanup:
2824
2825 inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
2826 mptcp_for_each_subflow(msk, subflow) {
2827 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2828 bool slow = lock_sock_fast_nested(ssk);
2829
2830
2831
2832
2833 if (ssk == msk->first)
2834 subflow->fail_tout = 0;
2835
2836 sock_orphan(ssk);
2837 unlock_sock_fast(ssk, slow);
2838 }
2839 sock_orphan(sk);
2840
2841 sock_hold(sk);
2842 pr_debug("msk=%p state=%d", sk, sk->sk_state);
2843 if (mptcp_sk(sk)->token)
2844 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
2845
2846 if (sk->sk_state == TCP_CLOSE) {
2847 __mptcp_destroy_sock(sk);
2848 do_cancel_work = true;
2849 } else {
2850 mptcp_reset_timeout(msk, 0);
2851 }
2852
2853 return do_cancel_work;
2854 }
2855
2856 static void mptcp_close(struct sock *sk, long timeout)
2857 {
2858 bool do_cancel_work;
2859
2860 lock_sock(sk);
2861
2862 do_cancel_work = __mptcp_close(sk, timeout);
2863 release_sock(sk);
2864 if (do_cancel_work)
2865 mptcp_cancel_work(sk);
2866
2867 sock_put(sk);
2868 }
2869
2870 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
2871 {
2872 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2873 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
2874 struct ipv6_pinfo *msk6 = inet6_sk(msk);
2875
2876 msk->sk_v6_daddr = ssk->sk_v6_daddr;
2877 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
2878
2879 if (msk6 && ssk6) {
2880 msk6->saddr = ssk6->saddr;
2881 msk6->flow_label = ssk6->flow_label;
2882 }
2883 #endif
2884
2885 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
2886 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
2887 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
2888 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
2889 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
2890 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
2891 }
2892
2893 static int mptcp_disconnect(struct sock *sk, int flags)
2894 {
2895 struct mptcp_sock *msk = mptcp_sk(sk);
2896
2897 inet_sk_state_store(sk, TCP_CLOSE);
2898
2899 mptcp_stop_timer(sk);
2900 sk_stop_timer(sk, &sk->sk_timer);
2901
2902 if (mptcp_sk(sk)->token)
2903 mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
2904
2905
2906
2907
2908 mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
2909 msk->last_snd = NULL;
2910 WRITE_ONCE(msk->flags, 0);
2911 msk->cb_flags = 0;
2912 msk->push_pending = 0;
2913 msk->recovery = false;
2914 msk->can_ack = false;
2915 msk->fully_established = false;
2916 msk->rcv_data_fin = false;
2917 msk->snd_data_fin_enable = false;
2918 msk->rcv_fastclose = false;
2919 msk->use_64bit_ack = false;
2920 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
2921 mptcp_pm_data_reset(msk);
2922 mptcp_ca_reset(sk);
2923
2924 sk->sk_shutdown = 0;
2925 sk_error_report(sk);
2926 return 0;
2927 }
2928
2929 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2930 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
2931 {
2932 unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
2933
2934 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
2935 }
2936 #endif
2937
2938 struct sock *mptcp_sk_clone(const struct sock *sk,
2939 const struct mptcp_options_received *mp_opt,
2940 struct request_sock *req)
2941 {
2942 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
2943 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
2944 struct mptcp_sock *msk;
2945 u64 ack_seq;
2946
2947 if (!nsk)
2948 return NULL;
2949
2950 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2951 if (nsk->sk_family == AF_INET6)
2952 inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
2953 #endif
2954
2955 __mptcp_init_sock(nsk);
2956
2957 msk = mptcp_sk(nsk);
2958 msk->local_key = subflow_req->local_key;
2959 msk->token = subflow_req->token;
2960 msk->subflow = NULL;
2961 WRITE_ONCE(msk->fully_established, false);
2962 if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
2963 WRITE_ONCE(msk->csum_enabled, true);
2964
2965 msk->write_seq = subflow_req->idsn + 1;
2966 msk->snd_nxt = msk->write_seq;
2967 msk->snd_una = msk->write_seq;
2968 msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
2969 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
2970
2971 if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) {
2972 msk->can_ack = true;
2973 msk->remote_key = mp_opt->sndr_key;
2974 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
2975 ack_seq++;
2976 WRITE_ONCE(msk->ack_seq, ack_seq);
2977 atomic64_set(&msk->rcv_wnd_sent, ack_seq);
2978 }
2979
2980 sock_reset_flag(nsk, SOCK_RCU_FREE);
2981
2982 inet_sk_state_store(nsk, TCP_SYN_RECV);
2983
2984 security_inet_csk_clone(nsk, req);
2985 bh_unlock_sock(nsk);
2986
2987
2988 __sock_put(nsk);
2989 return nsk;
2990 }
2991
2992 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2993 {
2994 const struct tcp_sock *tp = tcp_sk(ssk);
2995
2996 msk->rcvq_space.copied = 0;
2997 msk->rcvq_space.rtt_us = 0;
2998
2999 msk->rcvq_space.time = tp->tcp_mstamp;
3000
3001
3002 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
3003 TCP_INIT_CWND * tp->advmss);
3004 if (msk->rcvq_space.space == 0)
3005 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
3006
3007 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
3008 }
3009
3010 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
3011 bool kern)
3012 {
3013 struct mptcp_sock *msk = mptcp_sk(sk);
3014 struct socket *listener;
3015 struct sock *newsk;
3016
3017 listener = __mptcp_nmpc_socket(msk);
3018 if (WARN_ON_ONCE(!listener)) {
3019 *err = -EINVAL;
3020 return NULL;
3021 }
3022
3023 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
3024 newsk = inet_csk_accept(listener->sk, flags, err, kern);
3025 if (!newsk)
3026 return NULL;
3027
3028 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
3029 if (sk_is_mptcp(newsk)) {
3030 struct mptcp_subflow_context *subflow;
3031 struct sock *new_mptcp_sock;
3032
3033 subflow = mptcp_subflow_ctx(newsk);
3034 new_mptcp_sock = subflow->conn;
3035
3036
3037
3038
3039 if (WARN_ON_ONCE(!new_mptcp_sock)) {
3040 tcp_sk(newsk)->is_mptcp = 0;
3041 goto out;
3042 }
3043
3044
3045 sock_hold(new_mptcp_sock);
3046 newsk = new_mptcp_sock;
3047 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
3048 } else {
3049 MPTCP_INC_STATS(sock_net(sk),
3050 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
3051 }
3052
3053 out:
3054 newsk->sk_kern_sock = kern;
3055 return newsk;
3056 }
3057
3058 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
3059 {
3060 struct mptcp_subflow_context *subflow, *tmp;
3061 struct sock *sk = (struct sock *)msk;
3062
3063 __mptcp_clear_xmit(sk);
3064
3065
3066 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node)
3067 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
3068
3069
3070 mptcp_data_lock(sk);
3071 skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
3072 __skb_queue_purge(&sk->sk_receive_queue);
3073 skb_rbtree_purge(&msk->out_of_order_queue);
3074 mptcp_data_unlock(sk);
3075
3076
3077
3078
3079 sk->sk_forward_alloc += msk->rmem_fwd_alloc;
3080 msk->rmem_fwd_alloc = 0;
3081 mptcp_token_destroy(msk);
3082 mptcp_pm_free_anno_list(msk);
3083 mptcp_free_local_addr_list(msk);
3084 }
3085
3086 static void mptcp_destroy(struct sock *sk)
3087 {
3088 struct mptcp_sock *msk = mptcp_sk(sk);
3089
3090
3091
3092
3093 mptcp_dispose_initial_subflow(msk);
3094 mptcp_destroy_common(msk, 0);
3095 sk_sockets_allocated_dec(sk);
3096 }
3097
3098 void __mptcp_data_acked(struct sock *sk)
3099 {
3100 if (!sock_owned_by_user(sk))
3101 __mptcp_clean_una(sk);
3102 else
3103 __set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
3104
3105 if (mptcp_pending_data_fin_ack(sk))
3106 mptcp_schedule_work(sk);
3107 }
3108
3109 void __mptcp_check_push(struct sock *sk, struct sock *ssk)
3110 {
3111 if (!mptcp_send_head(sk))
3112 return;
3113
3114 if (!sock_owned_by_user(sk)) {
3115 struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));
3116
3117 if (xmit_ssk == ssk)
3118 __mptcp_subflow_push_pending(sk, ssk);
3119 else if (xmit_ssk)
3120 mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
3121 } else {
3122 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3123 }
3124 }
3125
3126 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3127 BIT(MPTCP_RETRANSMIT) | \
3128 BIT(MPTCP_FLUSH_JOIN_LIST))
3129
3130
3131 static void mptcp_release_cb(struct sock *sk)
3132 __must_hold(&sk->sk_lock.slock)
3133 {
3134 struct mptcp_sock *msk = mptcp_sk(sk);
3135
3136 for (;;) {
3137 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) |
3138 msk->push_pending;
3139 if (!flags)
3140 break;
3141
3142
3143
3144
3145
3146
3147
3148
3149 msk->push_pending = 0;
3150 msk->cb_flags &= ~flags;
3151 spin_unlock_bh(&sk->sk_lock.slock);
3152 if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
3153 __mptcp_flush_join_list(sk);
3154 if (flags & BIT(MPTCP_PUSH_PENDING))
3155 __mptcp_push_pending(sk, 0);
3156 if (flags & BIT(MPTCP_RETRANSMIT))
3157 __mptcp_retrans(sk);
3158
3159 cond_resched();
3160 spin_lock_bh(&sk->sk_lock.slock);
3161 }
3162
3163 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
3164 __mptcp_clean_una_wakeup(sk);
3165 if (unlikely(&msk->cb_flags)) {
3166
3167
3168
3169 if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags))
3170 __mptcp_set_connected(sk);
3171 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
3172 __mptcp_error_report(sk);
3173 if (__test_and_clear_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags))
3174 msk->last_snd = NULL;
3175 }
3176
3177 __mptcp_update_rmem(sk);
3178 }
3179
3180
3181
3182
3183
3184 static void schedule_3rdack_retransmission(struct sock *ssk)
3185 {
3186 struct inet_connection_sock *icsk = inet_csk(ssk);
3187 struct tcp_sock *tp = tcp_sk(ssk);
3188 unsigned long timeout;
3189
3190 if (mptcp_subflow_ctx(ssk)->fully_established)
3191 return;
3192
3193
3194 if (tp->srtt_us)
3195 timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
3196 else
3197 timeout = TCP_TIMEOUT_INIT;
3198 timeout += jiffies;
3199
3200 WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
3201 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3202 icsk->icsk_ack.timeout = timeout;
3203 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
3204 }
3205
3206 void mptcp_subflow_process_delegated(struct sock *ssk)
3207 {
3208 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3209 struct sock *sk = subflow->conn;
3210
3211 if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
3212 mptcp_data_lock(sk);
3213 if (!sock_owned_by_user(sk))
3214 __mptcp_subflow_push_pending(sk, ssk);
3215 else
3216 __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
3217 mptcp_data_unlock(sk);
3218 mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
3219 }
3220 if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
3221 schedule_3rdack_retransmission(ssk);
3222 mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
3223 }
3224 }
3225
3226 static int mptcp_hash(struct sock *sk)
3227 {
3228
3229
3230
3231 WARN_ON_ONCE(1);
3232 return 0;
3233 }
3234
3235 static void mptcp_unhash(struct sock *sk)
3236 {
3237
3238 }
3239
3240 static int mptcp_get_port(struct sock *sk, unsigned short snum)
3241 {
3242 struct mptcp_sock *msk = mptcp_sk(sk);
3243 struct socket *ssock;
3244
3245 ssock = __mptcp_nmpc_socket(msk);
3246 pr_debug("msk=%p, subflow=%p", msk, ssock);
3247 if (WARN_ON_ONCE(!ssock))
3248 return -EINVAL;
3249
3250 return inet_csk_get_port(ssock->sk, snum);
3251 }
3252
3253 void mptcp_finish_connect(struct sock *ssk)
3254 {
3255 struct mptcp_subflow_context *subflow;
3256 struct mptcp_sock *msk;
3257 struct sock *sk;
3258 u64 ack_seq;
3259
3260 subflow = mptcp_subflow_ctx(ssk);
3261 sk = subflow->conn;
3262 msk = mptcp_sk(sk);
3263
3264 pr_debug("msk=%p, token=%u", sk, subflow->token);
3265
3266 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
3267 ack_seq++;
3268 subflow->map_seq = ack_seq;
3269 subflow->map_subflow_seq = 1;
3270
3271
3272
3273
3274 WRITE_ONCE(msk->remote_key, subflow->remote_key);
3275 WRITE_ONCE(msk->local_key, subflow->local_key);
3276 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
3277 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3278 WRITE_ONCE(msk->ack_seq, ack_seq);
3279 WRITE_ONCE(msk->can_ack, 1);
3280 WRITE_ONCE(msk->snd_una, msk->write_seq);
3281 atomic64_set(&msk->rcv_wnd_sent, ack_seq);
3282
3283 mptcp_pm_new_connection(msk, ssk, 0);
3284
3285 mptcp_rcv_space_init(msk, ssk);
3286 }
3287
3288 void mptcp_sock_graft(struct sock *sk, struct socket *parent)
3289 {
3290 write_lock_bh(&sk->sk_callback_lock);
3291 rcu_assign_pointer(sk->sk_wq, &parent->wq);
3292 sk_set_socket(sk, parent);
3293 sk->sk_uid = SOCK_INODE(parent)->i_uid;
3294 write_unlock_bh(&sk->sk_callback_lock);
3295 }
3296
3297 bool mptcp_finish_join(struct sock *ssk)
3298 {
3299 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3300 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3301 struct sock *parent = (void *)msk;
3302 bool ret = true;
3303
3304 pr_debug("msk=%p, subflow=%p", msk, subflow);
3305
3306
3307 if (!mptcp_is_fully_established(parent)) {
3308 subflow->reset_reason = MPTCP_RST_EMPTCP;
3309 return false;
3310 }
3311
3312 if (!list_empty(&subflow->node))
3313 goto out;
3314
3315 if (!mptcp_pm_allow_new_subflow(msk))
3316 goto err_prohibited;
3317
3318
3319
3320
3321
3322 mptcp_data_lock(parent);
3323 if (!sock_owned_by_user(parent)) {
3324 ret = __mptcp_finish_join(msk, ssk);
3325 if (ret) {
3326 sock_hold(ssk);
3327 list_add_tail(&subflow->node, &msk->conn_list);
3328 }
3329 } else {
3330 sock_hold(ssk);
3331 list_add_tail(&subflow->node, &msk->join_list);
3332 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
3333 }
3334 mptcp_data_unlock(parent);
3335
3336 if (!ret) {
3337 err_prohibited:
3338 subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3339 return false;
3340 }
3341
3342 subflow->map_seq = READ_ONCE(msk->ack_seq);
3343 WRITE_ONCE(msk->allow_infinite_fallback, false);
3344
3345 out:
3346 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
3347 return true;
3348 }
3349
3350 static void mptcp_shutdown(struct sock *sk, int how)
3351 {
3352 pr_debug("sk=%p, how=%d", sk, how);
3353
3354 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
3355 __mptcp_wr_shutdown(sk);
3356 }
3357
3358 static int mptcp_forward_alloc_get(const struct sock *sk)
3359 {
3360 return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc;
3361 }
3362
3363 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
3364 {
3365 const struct sock *sk = (void *)msk;
3366 u64 delta;
3367
3368 if (sk->sk_state == TCP_LISTEN)
3369 return -EINVAL;
3370
3371 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
3372 return 0;
3373
3374 delta = msk->write_seq - v;
3375 if (__mptcp_check_fallback(msk) && msk->first) {
3376 struct tcp_sock *tp = tcp_sk(msk->first);
3377
3378
3379
3380
3381
3382 if (!((1 << msk->first->sk_state) &
3383 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)))
3384 delta += READ_ONCE(tp->write_seq) - tp->snd_una;
3385 }
3386 if (delta > INT_MAX)
3387 delta = INT_MAX;
3388
3389 return (int)delta;
3390 }
3391
3392 static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
3393 {
3394 struct mptcp_sock *msk = mptcp_sk(sk);
3395 bool slow;
3396 int answ;
3397
3398 switch (cmd) {
3399 case SIOCINQ:
3400 if (sk->sk_state == TCP_LISTEN)
3401 return -EINVAL;
3402
3403 lock_sock(sk);
3404 __mptcp_move_skbs(msk);
3405 answ = mptcp_inq_hint(sk);
3406 release_sock(sk);
3407 break;
3408 case SIOCOUTQ:
3409 slow = lock_sock_fast(sk);
3410 answ = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una));
3411 unlock_sock_fast(sk, slow);
3412 break;
3413 case SIOCOUTQNSD:
3414 slow = lock_sock_fast(sk);
3415 answ = mptcp_ioctl_outq(msk, msk->snd_nxt);
3416 unlock_sock_fast(sk, slow);
3417 break;
3418 default:
3419 return -ENOIOCTLCMD;
3420 }
3421
3422 return put_user(answ, (int __user *)arg);
3423 }
3424
3425 static struct proto mptcp_prot = {
3426 .name = "MPTCP",
3427 .owner = THIS_MODULE,
3428 .init = mptcp_init_sock,
3429 .disconnect = mptcp_disconnect,
3430 .close = mptcp_close,
3431 .accept = mptcp_accept,
3432 .setsockopt = mptcp_setsockopt,
3433 .getsockopt = mptcp_getsockopt,
3434 .shutdown = mptcp_shutdown,
3435 .destroy = mptcp_destroy,
3436 .sendmsg = mptcp_sendmsg,
3437 .ioctl = mptcp_ioctl,
3438 .recvmsg = mptcp_recvmsg,
3439 .release_cb = mptcp_release_cb,
3440 .hash = mptcp_hash,
3441 .unhash = mptcp_unhash,
3442 .get_port = mptcp_get_port,
3443 .forward_alloc_get = mptcp_forward_alloc_get,
3444 .sockets_allocated = &mptcp_sockets_allocated,
3445
3446 .memory_allocated = &tcp_memory_allocated,
3447 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
3448
3449 .memory_pressure = &tcp_memory_pressure,
3450 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
3451 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
3452 .sysctl_mem = sysctl_tcp_mem,
3453 .obj_size = sizeof(struct mptcp_sock),
3454 .slab_flags = SLAB_TYPESAFE_BY_RCU,
3455 .no_autobind = true,
3456 };
3457
3458 static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3459 {
3460 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3461 struct socket *ssock;
3462 int err;
3463
3464 lock_sock(sock->sk);
3465 ssock = __mptcp_nmpc_socket(msk);
3466 if (!ssock) {
3467 err = -EINVAL;
3468 goto unlock;
3469 }
3470
3471 err = ssock->ops->bind(ssock, uaddr, addr_len);
3472 if (!err)
3473 mptcp_copy_inaddrs(sock->sk, ssock->sk);
3474
3475 unlock:
3476 release_sock(sock->sk);
3477 return err;
3478 }
3479
3480 static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
3481 struct mptcp_subflow_context *subflow)
3482 {
3483 subflow->request_mptcp = 0;
3484 __mptcp_do_fallback(msk);
3485 }
3486
3487 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
3488 int addr_len, int flags)
3489 {
3490 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3491 struct mptcp_subflow_context *subflow;
3492 struct socket *ssock;
3493 int err = -EINVAL;
3494
3495 lock_sock(sock->sk);
3496 if (uaddr) {
3497 if (addr_len < sizeof(uaddr->sa_family))
3498 goto unlock;
3499
3500 if (uaddr->sa_family == AF_UNSPEC) {
3501 err = mptcp_disconnect(sock->sk, flags);
3502 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
3503 goto unlock;
3504 }
3505 }
3506
3507 if (sock->state != SS_UNCONNECTED && msk->subflow) {
3508
3509
3510
3511 ssock = msk->subflow;
3512 goto do_connect;
3513 }
3514
3515 ssock = __mptcp_nmpc_socket(msk);
3516 if (!ssock)
3517 goto unlock;
3518
3519 mptcp_token_destroy(msk);
3520 inet_sk_state_store(sock->sk, TCP_SYN_SENT);
3521 subflow = mptcp_subflow_ctx(ssock->sk);
3522 #ifdef CONFIG_TCP_MD5SIG
3523
3524
3525
3526 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
3527 mptcp_subflow_early_fallback(msk, subflow);
3528 #endif
3529 if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
3530 MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
3531 mptcp_subflow_early_fallback(msk, subflow);
3532 }
3533 if (likely(!__mptcp_check_fallback(msk)))
3534 MPTCP_INC_STATS(sock_net(sock->sk), MPTCP_MIB_MPCAPABLEACTIVE);
3535
3536 do_connect:
3537 err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
3538 sock->state = ssock->state;
3539
3540
3541
3542
3543 if (!err || err == -EINPROGRESS)
3544 mptcp_copy_inaddrs(sock->sk, ssock->sk);
3545 else
3546 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
3547
3548 unlock:
3549 release_sock(sock->sk);
3550 return err;
3551 }
3552
3553 static int mptcp_listen(struct socket *sock, int backlog)
3554 {
3555 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3556 struct socket *ssock;
3557 int err;
3558
3559 pr_debug("msk=%p", msk);
3560
3561 lock_sock(sock->sk);
3562 ssock = __mptcp_nmpc_socket(msk);
3563 if (!ssock) {
3564 err = -EINVAL;
3565 goto unlock;
3566 }
3567
3568 mptcp_token_destroy(msk);
3569 inet_sk_state_store(sock->sk, TCP_LISTEN);
3570 sock_set_flag(sock->sk, SOCK_RCU_FREE);
3571
3572 err = ssock->ops->listen(ssock, backlog);
3573 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
3574 if (!err)
3575 mptcp_copy_inaddrs(sock->sk, ssock->sk);
3576
3577 unlock:
3578 release_sock(sock->sk);
3579 return err;
3580 }
3581
3582 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
3583 int flags, bool kern)
3584 {
3585 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3586 struct socket *ssock;
3587 int err;
3588
3589 pr_debug("msk=%p", msk);
3590
3591 ssock = __mptcp_nmpc_socket(msk);
3592 if (!ssock)
3593 return -EINVAL;
3594
3595 err = ssock->ops->accept(sock, newsock, flags, kern);
3596 if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
3597 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
3598 struct mptcp_subflow_context *subflow;
3599 struct sock *newsk = newsock->sk;
3600
3601 lock_sock(newsk);
3602
3603
3604
3605
3606
3607
3608
3609
3610 subflow = mptcp_subflow_ctx(msk->first);
3611 list_add(&subflow->node, &msk->conn_list);
3612 sock_hold(msk->first);
3613 if (mptcp_is_fully_established(newsk))
3614 mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
3615
3616 mptcp_copy_inaddrs(newsk, msk->first);
3617 mptcp_rcv_space_init(msk, msk->first);
3618 mptcp_propagate_sndbuf(newsk, msk->first);
3619
3620
3621
3622
3623 mptcp_for_each_subflow(msk, subflow) {
3624 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3625
3626 if (!ssk->sk_socket)
3627 mptcp_sock_graft(ssk, newsock);
3628 }
3629 release_sock(newsk);
3630 }
3631
3632 return err;
3633 }
3634
3635 static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
3636 {
3637
3638
3639
3640 if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
3641 skb_queue_empty_lockless(&msk->receive_queue))
3642 return 0;
3643
3644 return EPOLLIN | EPOLLRDNORM;
3645 }
3646
3647 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
3648 {
3649 struct sock *sk = (struct sock *)msk;
3650
3651 if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
3652 return EPOLLOUT | EPOLLWRNORM;
3653
3654 if (sk_stream_is_writeable(sk))
3655 return EPOLLOUT | EPOLLWRNORM;
3656
3657 mptcp_set_nospace(sk);
3658 smp_mb__after_atomic();
3659 if (sk_stream_is_writeable(sk))
3660 return EPOLLOUT | EPOLLWRNORM;
3661
3662 return 0;
3663 }
3664
3665 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
3666 struct poll_table_struct *wait)
3667 {
3668 struct sock *sk = sock->sk;
3669 struct mptcp_sock *msk;
3670 __poll_t mask = 0;
3671 int state;
3672
3673 msk = mptcp_sk(sk);
3674 sock_poll_wait(file, sock, wait);
3675
3676 state = inet_sk_state_load(sk);
3677 pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
3678 if (state == TCP_LISTEN) {
3679 if (WARN_ON_ONCE(!msk->subflow || !msk->subflow->sk))
3680 return 0;
3681
3682 return inet_csk_listen_poll(msk->subflow->sk);
3683 }
3684
3685 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
3686 mask |= mptcp_check_readable(msk);
3687 mask |= mptcp_check_writeable(msk);
3688 }
3689 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
3690 mask |= EPOLLHUP;
3691 if (sk->sk_shutdown & RCV_SHUTDOWN)
3692 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
3693
3694
3695 smp_rmb();
3696 if (sk->sk_err)
3697 mask |= EPOLLERR;
3698
3699 return mask;
3700 }
3701
3702 static const struct proto_ops mptcp_stream_ops = {
3703 .family = PF_INET,
3704 .owner = THIS_MODULE,
3705 .release = inet_release,
3706 .bind = mptcp_bind,
3707 .connect = mptcp_stream_connect,
3708 .socketpair = sock_no_socketpair,
3709 .accept = mptcp_stream_accept,
3710 .getname = inet_getname,
3711 .poll = mptcp_poll,
3712 .ioctl = inet_ioctl,
3713 .gettstamp = sock_gettstamp,
3714 .listen = mptcp_listen,
3715 .shutdown = inet_shutdown,
3716 .setsockopt = sock_common_setsockopt,
3717 .getsockopt = sock_common_getsockopt,
3718 .sendmsg = inet_sendmsg,
3719 .recvmsg = inet_recvmsg,
3720 .mmap = sock_no_mmap,
3721 .sendpage = inet_sendpage,
3722 };
3723
3724 static struct inet_protosw mptcp_protosw = {
3725 .type = SOCK_STREAM,
3726 .protocol = IPPROTO_MPTCP,
3727 .prot = &mptcp_prot,
3728 .ops = &mptcp_stream_ops,
3729 .flags = INET_PROTOSW_ICSK,
3730 };
3731
3732 static int mptcp_napi_poll(struct napi_struct *napi, int budget)
3733 {
3734 struct mptcp_delegated_action *delegated;
3735 struct mptcp_subflow_context *subflow;
3736 int work_done = 0;
3737
3738 delegated = container_of(napi, struct mptcp_delegated_action, napi);
3739 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
3740 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3741
3742 bh_lock_sock_nested(ssk);
3743 if (!sock_owned_by_user(ssk) &&
3744 mptcp_subflow_has_delegated_action(subflow))
3745 mptcp_subflow_process_delegated(ssk);
3746
3747
3748
3749
3750
3751 bh_unlock_sock(ssk);
3752 sock_put(ssk);
3753
3754 if (++work_done == budget)
3755 return budget;
3756 }
3757
3758
3759
3760
3761 napi_complete_done(napi, 0);
3762 return work_done;
3763 }
3764
3765 void __init mptcp_proto_init(void)
3766 {
3767 struct mptcp_delegated_action *delegated;
3768 int cpu;
3769
3770 mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
3771
3772 if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
3773 panic("Failed to allocate MPTCP pcpu counter\n");
3774
3775 init_dummy_netdev(&mptcp_napi_dev);
3776 for_each_possible_cpu(cpu) {
3777 delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
3778 INIT_LIST_HEAD(&delegated->head);
3779 netif_napi_add_tx(&mptcp_napi_dev, &delegated->napi,
3780 mptcp_napi_poll);
3781 napi_enable(&delegated->napi);
3782 }
3783
3784 mptcp_subflow_init();
3785 mptcp_pm_init();
3786 mptcp_token_init();
3787
3788 if (proto_register(&mptcp_prot, 1) != 0)
3789 panic("Failed to register MPTCP proto.\n");
3790
3791 inet_register_protosw(&mptcp_protosw);
3792
3793 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
3794 }
3795
3796 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3797 static const struct proto_ops mptcp_v6_stream_ops = {
3798 .family = PF_INET6,
3799 .owner = THIS_MODULE,
3800 .release = inet6_release,
3801 .bind = mptcp_bind,
3802 .connect = mptcp_stream_connect,
3803 .socketpair = sock_no_socketpair,
3804 .accept = mptcp_stream_accept,
3805 .getname = inet6_getname,
3806 .poll = mptcp_poll,
3807 .ioctl = inet6_ioctl,
3808 .gettstamp = sock_gettstamp,
3809 .listen = mptcp_listen,
3810 .shutdown = inet_shutdown,
3811 .setsockopt = sock_common_setsockopt,
3812 .getsockopt = sock_common_getsockopt,
3813 .sendmsg = inet6_sendmsg,
3814 .recvmsg = inet6_recvmsg,
3815 .mmap = sock_no_mmap,
3816 .sendpage = inet_sendpage,
3817 #ifdef CONFIG_COMPAT
3818 .compat_ioctl = inet6_compat_ioctl,
3819 #endif
3820 };
3821
3822 static struct proto mptcp_v6_prot;
3823
3824 static void mptcp_v6_destroy(struct sock *sk)
3825 {
3826 mptcp_destroy(sk);
3827 inet6_destroy_sock(sk);
3828 }
3829
3830 static struct inet_protosw mptcp_v6_protosw = {
3831 .type = SOCK_STREAM,
3832 .protocol = IPPROTO_MPTCP,
3833 .prot = &mptcp_v6_prot,
3834 .ops = &mptcp_v6_stream_ops,
3835 .flags = INET_PROTOSW_ICSK,
3836 };
3837
3838 int __init mptcp_proto_v6_init(void)
3839 {
3840 int err;
3841
3842 mptcp_v6_prot = mptcp_prot;
3843 strcpy(mptcp_v6_prot.name, "MPTCPv6");
3844 mptcp_v6_prot.slab = NULL;
3845 mptcp_v6_prot.destroy = mptcp_v6_destroy;
3846 mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
3847
3848 err = proto_register(&mptcp_v6_prot, 1);
3849 if (err)
3850 return err;
3851
3852 err = inet6_register_protosw(&mptcp_v6_protosw);
3853 if (err)
3854 proto_unregister(&mptcp_v6_prot);
3855
3856 return err;
3857 }
3858 #endif