0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dccp.h>
0010 #include <linux/skbuff.h>
0011 #include <linux/slab.h>
0012
0013 #include <net/sock.h>
0014
0015 #include "ackvec.h"
0016 #include "ccid.h"
0017 #include "dccp.h"
0018
0019
0020 int sysctl_dccp_sync_ratelimit __read_mostly = HZ / 8;
0021
0022 static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
0023 {
0024 __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
0025 __skb_queue_tail(&sk->sk_receive_queue, skb);
0026 skb_set_owner_r(skb, sk);
0027 sk->sk_data_ready(sk);
0028 }
0029
0030 static void dccp_fin(struct sock *sk, struct sk_buff *skb)
0031 {
0032
0033
0034
0035
0036
0037
0038 sk->sk_shutdown = SHUTDOWN_MASK;
0039 sock_set_flag(sk, SOCK_DONE);
0040 dccp_enqueue_skb(sk, skb);
0041 }
0042
0043 static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
0044 {
0045 int queued = 0;
0046
0047 switch (sk->sk_state) {
0048
0049
0050
0051
0052
0053
0054 case DCCP_CLOSING:
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
0066 break;
0067 fallthrough;
0068 case DCCP_REQUESTING:
0069 case DCCP_ACTIVE_CLOSEREQ:
0070 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
0071 dccp_done(sk);
0072 break;
0073 case DCCP_OPEN:
0074 case DCCP_PARTOPEN:
0075
0076 queued = 1;
0077 dccp_fin(sk, skb);
0078 dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
0079 fallthrough;
0080 case DCCP_PASSIVE_CLOSE:
0081
0082
0083
0084 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
0085 }
0086 return queued;
0087 }
0088
0089 static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
0090 {
0091 int queued = 0;
0092
0093
0094
0095
0096
0097
0098
0099 if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
0100 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
0101 return queued;
0102 }
0103
0104
0105 switch (sk->sk_state) {
0106 case DCCP_REQUESTING:
0107 dccp_send_close(sk, 0);
0108 dccp_set_state(sk, DCCP_CLOSING);
0109 break;
0110 case DCCP_OPEN:
0111 case DCCP_PARTOPEN:
0112
0113 queued = 1;
0114 dccp_fin(sk, skb);
0115 dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
0116 fallthrough;
0117 case DCCP_PASSIVE_CLOSEREQ:
0118 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
0119 }
0120 return queued;
0121 }
0122
0123 static u16 dccp_reset_code_convert(const u8 code)
0124 {
0125 static const u16 error_code[] = {
0126 [DCCP_RESET_CODE_CLOSED] = 0,
0127 [DCCP_RESET_CODE_UNSPECIFIED] = 0,
0128 [DCCP_RESET_CODE_ABORTED] = ECONNRESET,
0129
0130 [DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED,
0131 [DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
0132 [DCCP_RESET_CODE_TOO_BUSY] = EUSERS,
0133 [DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
0134
0135 [DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG,
0136 [DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR,
0137 [DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC,
0138 [DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ,
0139 [DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP,
0140 };
0141
0142 return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
0143 }
0144
0145 static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
0146 {
0147 u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
0148
0149 sk->sk_err = err;
0150
0151
0152 dccp_fin(sk, skb);
0153
0154 if (err && !sock_flag(sk, SOCK_DEAD))
0155 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
0156 dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
0157 }
0158
0159 static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
0160 {
0161 struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
0162
0163 if (av == NULL)
0164 return;
0165 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
0166 dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
0167 dccp_ackvec_input(av, skb);
0168 }
0169
0170 static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
0171 {
0172 const struct dccp_sock *dp = dccp_sk(sk);
0173
0174
0175 if (!(sk->sk_shutdown & RCV_SHUTDOWN))
0176 ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
0177
0178
0179
0180
0181 if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
0182 ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
0183 }
0184
0185 static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
0186 {
0187 const struct dccp_hdr *dh = dccp_hdr(skb);
0188 struct dccp_sock *dp = dccp_sk(sk);
0189 u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
0190 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 if (dh->dccph_type == DCCP_PKT_SYNC ||
0205 dh->dccph_type == DCCP_PKT_SYNCACK) {
0206 if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
0207 dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
0208 dccp_update_gsr(sk, seqno);
0209 else
0210 return -1;
0211 }
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224 lswl = dp->dccps_swl;
0225 lawl = dp->dccps_awl;
0226
0227 if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
0228 dh->dccph_type == DCCP_PKT_CLOSE ||
0229 dh->dccph_type == DCCP_PKT_RESET) {
0230 lswl = ADD48(dp->dccps_gsr, 1);
0231 lawl = dp->dccps_gar;
0232 }
0233
0234 if (between48(seqno, lswl, dp->dccps_swh) &&
0235 (ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
0236 between48(ackno, lawl, dp->dccps_awh))) {
0237 dccp_update_gsr(sk, seqno);
0238
0239 if (dh->dccph_type != DCCP_PKT_SYNC &&
0240 ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
0241 after48(ackno, dp->dccps_gar))
0242 dp->dccps_gar = ackno;
0243 } else {
0244 unsigned long now = jiffies;
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 if (time_before(now, (dp->dccps_rate_last +
0258 sysctl_dccp_sync_ratelimit)))
0259 return -1;
0260
0261 DCCP_WARN("Step 6 failed for %s packet, "
0262 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
0263 "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
0264 "sending SYNC...\n", dccp_packet_name(dh->dccph_type),
0265 (unsigned long long) lswl, (unsigned long long) seqno,
0266 (unsigned long long) dp->dccps_swh,
0267 (ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
0268 : "exists",
0269 (unsigned long long) lawl, (unsigned long long) ackno,
0270 (unsigned long long) dp->dccps_awh);
0271
0272 dp->dccps_rate_last = now;
0273
0274 if (dh->dccph_type == DCCP_PKT_RESET)
0275 seqno = dp->dccps_gsr;
0276 dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
0277 return -1;
0278 }
0279
0280 return 0;
0281 }
0282
0283 static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
0284 const struct dccp_hdr *dh, const unsigned int len)
0285 {
0286 struct dccp_sock *dp = dccp_sk(sk);
0287
0288 switch (dccp_hdr(skb)->dccph_type) {
0289 case DCCP_PKT_DATAACK:
0290 case DCCP_PKT_DATA:
0291
0292
0293
0294
0295
0296 dccp_enqueue_skb(sk, skb);
0297 return 0;
0298 case DCCP_PKT_ACK:
0299 goto discard;
0300 case DCCP_PKT_RESET:
0301
0302
0303
0304
0305
0306
0307
0308
0309 dccp_rcv_reset(sk, skb);
0310 return 0;
0311 case DCCP_PKT_CLOSEREQ:
0312 if (dccp_rcv_closereq(sk, skb))
0313 return 0;
0314 goto discard;
0315 case DCCP_PKT_CLOSE:
0316 if (dccp_rcv_close(sk, skb))
0317 return 0;
0318 goto discard;
0319 case DCCP_PKT_REQUEST:
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 if (dp->dccps_role != DCCP_ROLE_LISTEN)
0332 goto send_sync;
0333 goto check_seq;
0334 case DCCP_PKT_RESPONSE:
0335 if (dp->dccps_role != DCCP_ROLE_CLIENT)
0336 goto send_sync;
0337 check_seq:
0338 if (dccp_delta_seqno(dp->dccps_osr,
0339 DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
0340 send_sync:
0341 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
0342 DCCP_PKT_SYNC);
0343 }
0344 break;
0345 case DCCP_PKT_SYNC:
0346 dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
0347 DCCP_PKT_SYNCACK);
0348
0349
0350
0351
0352
0353
0354
0355 goto discard;
0356 }
0357
0358 DCCP_INC_STATS(DCCP_MIB_INERRS);
0359 discard:
0360 __kfree_skb(skb);
0361 return 0;
0362 }
0363
0364 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
0365 const struct dccp_hdr *dh, const unsigned int len)
0366 {
0367 if (dccp_check_seqno(sk, skb))
0368 goto discard;
0369
0370 if (dccp_parse_options(sk, NULL, skb))
0371 return 1;
0372
0373 dccp_handle_ackvec_processing(sk, skb);
0374 dccp_deliver_input_to_ccids(sk, skb);
0375
0376 return __dccp_rcv_established(sk, skb, dh, len);
0377 discard:
0378 __kfree_skb(skb);
0379 return 0;
0380 }
0381
0382 EXPORT_SYMBOL_GPL(dccp_rcv_established);
0383
0384 static int dccp_rcv_request_sent_state_process(struct sock *sk,
0385 struct sk_buff *skb,
0386 const struct dccp_hdr *dh,
0387 const unsigned int len)
0388 {
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 if (dh->dccph_type == DCCP_PKT_RESPONSE) {
0401 const struct inet_connection_sock *icsk = inet_csk(sk);
0402 struct dccp_sock *dp = dccp_sk(sk);
0403 long tstamp = dccp_timestamp();
0404
0405 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
0406 dp->dccps_awl, dp->dccps_awh)) {
0407 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
0408 "P.ackno=%llu, S.AWH=%llu\n",
0409 (unsigned long long)dp->dccps_awl,
0410 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
0411 (unsigned long long)dp->dccps_awh);
0412 goto out_invalid_packet;
0413 }
0414
0415
0416
0417
0418
0419
0420 if (dccp_parse_options(sk, NULL, skb))
0421 return 1;
0422
0423
0424 if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
0425 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
0426 dp->dccps_options_received.dccpor_timestamp_echo));
0427
0428
0429 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
0430 WARN_ON(sk->sk_send_head == NULL);
0431 kfree_skb(sk->sk_send_head);
0432 sk->sk_send_head = NULL;
0433
0434
0435
0436
0437
0438
0439
0440
0441 dp->dccps_gsr = dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
0442
0443 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460 dccp_set_state(sk, DCCP_PARTOPEN);
0461
0462
0463
0464
0465
0466
0467
0468 if (dccp_feat_activate_values(sk, &dp->dccps_featneg))
0469 goto unable_to_proceed;
0470
0471
0472 icsk->icsk_af_ops->rebuild_header(sk);
0473
0474 if (!sock_flag(sk, SOCK_DEAD)) {
0475 sk->sk_state_change(sk);
0476 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
0477 }
0478
0479 if (sk->sk_write_pending || inet_csk_in_pingpong_mode(sk) ||
0480 icsk->icsk_accept_queue.rskq_defer_accept) {
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494 __kfree_skb(skb);
0495 return 0;
0496 }
0497 dccp_send_ack(sk);
0498 return -1;
0499 }
0500
0501 out_invalid_packet:
0502
0503 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
0504 return 1;
0505
0506 unable_to_proceed:
0507 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_ABORTED;
0508
0509
0510
0511
0512 dccp_set_state(sk, DCCP_CLOSED);
0513 sk->sk_err = ECOMM;
0514 return 1;
0515 }
0516
0517 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
0518 struct sk_buff *skb,
0519 const struct dccp_hdr *dh,
0520 const unsigned int len)
0521 {
0522 struct dccp_sock *dp = dccp_sk(sk);
0523 u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
0524 int queued = 0;
0525
0526 switch (dh->dccph_type) {
0527 case DCCP_PKT_RESET:
0528 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
0529 break;
0530 case DCCP_PKT_DATA:
0531 if (sk->sk_state == DCCP_RESPOND)
0532 break;
0533 fallthrough;
0534 case DCCP_PKT_DATAACK:
0535 case DCCP_PKT_ACK:
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546 if (sk->sk_state == DCCP_PARTOPEN)
0547 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
0548
0549
0550 if (likely(sample)) {
0551 long delta = dccp_timestamp() - sample;
0552
0553 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
0554 }
0555
0556 dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
0557 dccp_set_state(sk, DCCP_OPEN);
0558
0559 if (dh->dccph_type == DCCP_PKT_DATAACK ||
0560 dh->dccph_type == DCCP_PKT_DATA) {
0561 __dccp_rcv_established(sk, skb, dh, len);
0562 queued = 1;
0563
0564 }
0565 break;
0566 }
0567
0568 return queued;
0569 }
0570
0571 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
0572 struct dccp_hdr *dh, unsigned int len)
0573 {
0574 struct dccp_sock *dp = dccp_sk(sk);
0575 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
0576 const int old_state = sk->sk_state;
0577 bool acceptable;
0578 int queued = 0;
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602 if (sk->sk_state == DCCP_LISTEN) {
0603 if (dh->dccph_type == DCCP_PKT_REQUEST) {
0604
0605
0606
0607 rcu_read_lock();
0608 local_bh_disable();
0609 acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
0610 local_bh_enable();
0611 rcu_read_unlock();
0612 if (!acceptable)
0613 return 1;
0614 consume_skb(skb);
0615 return 0;
0616 }
0617 if (dh->dccph_type == DCCP_PKT_RESET)
0618 goto discard;
0619
0620
0621 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
0622 return 1;
0623 } else if (sk->sk_state == DCCP_CLOSED) {
0624 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
0625 return 1;
0626 }
0627
0628
0629 if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
0630 goto discard;
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640 if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
0641 dh->dccph_type == DCCP_PKT_RESPONSE) ||
0642 (dp->dccps_role == DCCP_ROLE_CLIENT &&
0643 dh->dccph_type == DCCP_PKT_REQUEST) ||
0644 (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
0645 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
0646 goto discard;
0647 }
0648
0649
0650 if (dccp_parse_options(sk, NULL, skb))
0651 return 1;
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661 if (dh->dccph_type == DCCP_PKT_RESET) {
0662 dccp_rcv_reset(sk, skb);
0663 return 0;
0664 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
0665 if (dccp_rcv_closereq(sk, skb))
0666 return 0;
0667 goto discard;
0668 } else if (dh->dccph_type == DCCP_PKT_CLOSE) {
0669 if (dccp_rcv_close(sk, skb))
0670 return 0;
0671 goto discard;
0672 }
0673
0674 switch (sk->sk_state) {
0675 case DCCP_REQUESTING:
0676 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
0677 if (queued >= 0)
0678 return queued;
0679
0680 __kfree_skb(skb);
0681 return 0;
0682
0683 case DCCP_PARTOPEN:
0684
0685 dccp_handle_ackvec_processing(sk, skb);
0686 dccp_deliver_input_to_ccids(sk, skb);
0687 fallthrough;
0688 case DCCP_RESPOND:
0689 queued = dccp_rcv_respond_partopen_state_process(sk, skb,
0690 dh, len);
0691 break;
0692 }
0693
0694 if (dh->dccph_type == DCCP_PKT_ACK ||
0695 dh->dccph_type == DCCP_PKT_DATAACK) {
0696 switch (old_state) {
0697 case DCCP_PARTOPEN:
0698 sk->sk_state_change(sk);
0699 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
0700 break;
0701 }
0702 } else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
0703 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
0704 goto discard;
0705 }
0706
0707 if (!queued) {
0708 discard:
0709 __kfree_skb(skb);
0710 }
0711 return 0;
0712 }
0713
0714 EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724 u32 dccp_sample_rtt(struct sock *sk, long delta)
0725 {
0726
0727 delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
0728
0729 if (unlikely(delta <= 0)) {
0730 DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
0731 return DCCP_SANE_RTT_MIN;
0732 }
0733 if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
0734 DCCP_WARN("RTT sample %ld too large, using max\n", delta);
0735 return DCCP_SANE_RTT_MAX;
0736 }
0737
0738 return delta;
0739 }