0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dccp.h>
0010 #include <linux/kernel.h>
0011 #include <linux/skbuff.h>
0012 #include <linux/slab.h>
0013 #include <linux/sched/signal.h>
0014
0015 #include <net/inet_sock.h>
0016 #include <net/sock.h>
0017
0018 #include "ackvec.h"
0019 #include "ccid.h"
0020 #include "dccp.h"
0021
0022 static inline void dccp_event_ack_sent(struct sock *sk)
0023 {
0024 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
0025 }
0026
0027
0028 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
0029 {
0030 skb_set_owner_w(skb, sk);
0031 WARN_ON(sk->sk_send_head);
0032 sk->sk_send_head = skb;
0033 return skb_clone(sk->sk_send_head, gfp_any());
0034 }
0035
0036
0037
0038
0039
0040
0041
0042 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
0043 {
0044 if (likely(skb != NULL)) {
0045 struct inet_sock *inet = inet_sk(sk);
0046 const struct inet_connection_sock *icsk = inet_csk(sk);
0047 struct dccp_sock *dp = dccp_sk(sk);
0048 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
0049 struct dccp_hdr *dh;
0050
0051 const u32 dccp_header_size = sizeof(*dh) +
0052 sizeof(struct dccp_hdr_ext) +
0053 dccp_packet_hdr_len(dcb->dccpd_type);
0054 int err, set_ack = 1;
0055 u64 ackno = dp->dccps_gsr;
0056
0057
0058
0059
0060 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
0061
0062 switch (dcb->dccpd_type) {
0063 case DCCP_PKT_DATA:
0064 set_ack = 0;
0065 fallthrough;
0066 case DCCP_PKT_DATAACK:
0067 case DCCP_PKT_RESET:
0068 break;
0069
0070 case DCCP_PKT_REQUEST:
0071 set_ack = 0;
0072
0073 if (icsk->icsk_retransmits == 0)
0074 dcb->dccpd_seq = dp->dccps_iss;
0075 fallthrough;
0076
0077 case DCCP_PKT_SYNC:
0078 case DCCP_PKT_SYNCACK:
0079 ackno = dcb->dccpd_ack_seq;
0080 fallthrough;
0081 default:
0082
0083
0084
0085
0086
0087
0088 WARN_ON(skb->sk);
0089 skb_set_owner_w(skb, sk);
0090 break;
0091 }
0092
0093 if (dccp_insert_options(sk, skb)) {
0094 kfree_skb(skb);
0095 return -EPROTO;
0096 }
0097
0098
0099
0100 dh = dccp_zeroed_hdr(skb, dccp_header_size);
0101 dh->dccph_type = dcb->dccpd_type;
0102 dh->dccph_sport = inet->inet_sport;
0103 dh->dccph_dport = inet->inet_dport;
0104 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
0105 dh->dccph_ccval = dcb->dccpd_ccval;
0106 dh->dccph_cscov = dp->dccps_pcslen;
0107
0108 dh->dccph_x = 1;
0109
0110 dccp_update_gss(sk, dcb->dccpd_seq);
0111 dccp_hdr_set_seq(dh, dp->dccps_gss);
0112 if (set_ack)
0113 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
0114
0115 switch (dcb->dccpd_type) {
0116 case DCCP_PKT_REQUEST:
0117 dccp_hdr_request(skb)->dccph_req_service =
0118 dp->dccps_service;
0119
0120
0121
0122
0123 dp->dccps_awl = dp->dccps_iss;
0124 break;
0125 case DCCP_PKT_RESET:
0126 dccp_hdr_reset(skb)->dccph_reset_code =
0127 dcb->dccpd_reset_code;
0128 break;
0129 }
0130
0131 icsk->icsk_af_ops->send_check(sk, skb);
0132
0133 if (set_ack)
0134 dccp_event_ack_sent(sk);
0135
0136 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
0137
0138 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
0139 return net_xmit_eval(err);
0140 }
0141 return -ENOBUFS;
0142 }
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
0153 {
0154 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
0155
0156 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
0157 return 0;
0158 return tx_ccid->ccid_ops->ccid_ccmps;
0159 }
0160
0161 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
0162 {
0163 struct inet_connection_sock *icsk = inet_csk(sk);
0164 struct dccp_sock *dp = dccp_sk(sk);
0165 u32 ccmps = dccp_determine_ccmps(dp);
0166 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
0167
0168
0169 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
0170 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
0186 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
0187
0188
0189 icsk->icsk_pmtu_cookie = pmtu;
0190 dp->dccps_mss_cache = cur_mps;
0191
0192 return cur_mps;
0193 }
0194
0195 EXPORT_SYMBOL_GPL(dccp_sync_mss);
0196
0197 void dccp_write_space(struct sock *sk)
0198 {
0199 struct socket_wq *wq;
0200
0201 rcu_read_lock();
0202 wq = rcu_dereference(sk->sk_wq);
0203 if (skwq_has_sleeper(wq))
0204 wake_up_interruptible(&wq->wait);
0205
0206 if (sock_writeable(sk))
0207 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
0208
0209 rcu_read_unlock();
0210 }
0211
0212
0213
0214
0215
0216
0217
0218
0219 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
0220 {
0221 DEFINE_WAIT(wait);
0222 long remaining;
0223
0224 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
0225 sk->sk_write_pending++;
0226 release_sock(sk);
0227
0228 remaining = schedule_timeout(delay);
0229
0230 lock_sock(sk);
0231 sk->sk_write_pending--;
0232 finish_wait(sk_sleep(sk), &wait);
0233
0234 if (signal_pending(current) || sk->sk_err)
0235 return -1;
0236 return remaining;
0237 }
0238
0239
0240
0241
0242
0243
0244
0245 static void dccp_xmit_packet(struct sock *sk)
0246 {
0247 int err, len;
0248 struct dccp_sock *dp = dccp_sk(sk);
0249 struct sk_buff *skb = dccp_qpolicy_pop(sk);
0250
0251 if (unlikely(skb == NULL))
0252 return;
0253 len = skb->len;
0254
0255 if (sk->sk_state == DCCP_PARTOPEN) {
0256 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
0257
0258
0259
0260
0261
0262
0263
0264 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
0265 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
0266 dccp_send_ack(sk);
0267 dccp_feat_list_purge(&dp->dccps_featneg);
0268 }
0269
0270 inet_csk_schedule_ack(sk);
0271 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
0272 inet_csk(sk)->icsk_rto,
0273 DCCP_RTO_MAX);
0274 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
0275 } else if (dccp_ack_pending(sk)) {
0276 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
0277 } else {
0278 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
0279 }
0280
0281 err = dccp_transmit_skb(sk, skb);
0282 if (err)
0283 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
0284
0285
0286
0287
0288
0289 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
0290
0291
0292
0293
0294
0295
0296
0297 if (dp->dccps_sync_scheduled)
0298 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
0299 }
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 void dccp_flush_write_queue(struct sock *sk, long *time_budget)
0312 {
0313 struct dccp_sock *dp = dccp_sk(sk);
0314 struct sk_buff *skb;
0315 long delay, rc;
0316
0317 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
0318 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
0319
0320 switch (ccid_packet_dequeue_eval(rc)) {
0321 case CCID_PACKET_WILL_DEQUEUE_LATER:
0322
0323
0324
0325
0326
0327 DCCP_WARN("CCID did not manage to send all packets\n");
0328 return;
0329 case CCID_PACKET_DELAY:
0330 delay = msecs_to_jiffies(rc);
0331 if (delay > *time_budget)
0332 return;
0333 rc = dccp_wait_for_ccid(sk, delay);
0334 if (rc < 0)
0335 return;
0336 *time_budget -= (delay - rc);
0337
0338 break;
0339 case CCID_PACKET_SEND_AT_ONCE:
0340 dccp_xmit_packet(sk);
0341 break;
0342 case CCID_PACKET_ERR:
0343 skb_dequeue(&sk->sk_write_queue);
0344 kfree_skb(skb);
0345 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
0346 }
0347 }
0348 }
0349
0350 void dccp_write_xmit(struct sock *sk)
0351 {
0352 struct dccp_sock *dp = dccp_sk(sk);
0353 struct sk_buff *skb;
0354
0355 while ((skb = dccp_qpolicy_top(sk))) {
0356 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
0357
0358 switch (ccid_packet_dequeue_eval(rc)) {
0359 case CCID_PACKET_WILL_DEQUEUE_LATER:
0360 return;
0361 case CCID_PACKET_DELAY:
0362 sk_reset_timer(sk, &dp->dccps_xmit_timer,
0363 jiffies + msecs_to_jiffies(rc));
0364 return;
0365 case CCID_PACKET_SEND_AT_ONCE:
0366 dccp_xmit_packet(sk);
0367 break;
0368 case CCID_PACKET_ERR:
0369 dccp_qpolicy_drop(sk, skb);
0370 dccp_pr_debug("packet discarded due to err=%d\n", rc);
0371 }
0372 }
0373 }
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 int dccp_retransmit_skb(struct sock *sk)
0387 {
0388 WARN_ON(sk->sk_send_head == NULL);
0389
0390 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
0391 return -EHOSTUNREACH;
0392
0393
0394 inet_csk(sk)->icsk_retransmits++;
0395
0396 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
0397 }
0398
0399 struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
0400 struct request_sock *req)
0401 {
0402 struct dccp_hdr *dh;
0403 struct dccp_request_sock *dreq;
0404 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
0405 sizeof(struct dccp_hdr_ext) +
0406 sizeof(struct dccp_hdr_response);
0407 struct sk_buff *skb;
0408
0409
0410
0411
0412
0413 skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
0414 GFP_ATOMIC);
0415 if (!skb)
0416 return NULL;
0417
0418 skb_reserve(skb, MAX_DCCP_HEADER);
0419
0420 skb_dst_set(skb, dst_clone(dst));
0421
0422 dreq = dccp_rsk(req);
0423 if (inet_rsk(req)->acked)
0424 dccp_inc_seqno(&dreq->dreq_gss);
0425 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
0426 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
0427
0428
0429 if (dccp_feat_server_ccid_dependencies(dreq))
0430 goto response_failed;
0431
0432 if (dccp_insert_options_rsk(dreq, skb))
0433 goto response_failed;
0434
0435
0436 dh = dccp_zeroed_hdr(skb, dccp_header_size);
0437
0438 dh->dccph_sport = htons(inet_rsk(req)->ir_num);
0439 dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
0440 dh->dccph_doff = (dccp_header_size +
0441 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
0442 dh->dccph_type = DCCP_PKT_RESPONSE;
0443 dh->dccph_x = 1;
0444 dccp_hdr_set_seq(dh, dreq->dreq_gss);
0445 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
0446 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
0447
0448 dccp_csum_outgoing(skb);
0449
0450
0451 inet_rsk(req)->acked = 1;
0452 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
0453 return skb;
0454 response_failed:
0455 kfree_skb(skb);
0456 return NULL;
0457 }
0458
0459 EXPORT_SYMBOL_GPL(dccp_make_response);
0460
0461
0462 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
0463 {
0464 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
0465 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
0466 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
0467 sizeof(struct dccp_hdr_ext) +
0468 sizeof(struct dccp_hdr_reset);
0469 struct dccp_hdr_reset *dhr;
0470 struct sk_buff *skb;
0471
0472 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
0473 if (skb == NULL)
0474 return NULL;
0475
0476 skb_reserve(skb, sk->sk_prot->max_header);
0477
0478
0479 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
0480 dh->dccph_type = DCCP_PKT_RESET;
0481 dh->dccph_sport = rxdh->dccph_dport;
0482 dh->dccph_dport = rxdh->dccph_sport;
0483 dh->dccph_doff = dccp_hdr_reset_len / 4;
0484 dh->dccph_x = 1;
0485
0486 dhr = dccp_hdr_reset(skb);
0487 dhr->dccph_reset_code = dcb->dccpd_reset_code;
0488
0489 switch (dcb->dccpd_reset_code) {
0490 case DCCP_RESET_CODE_PACKET_ERROR:
0491 dhr->dccph_reset_data[0] = rxdh->dccph_type;
0492 break;
0493 case DCCP_RESET_CODE_OPTION_ERROR:
0494 case DCCP_RESET_CODE_MANDATORY_ERROR:
0495 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
0496 break;
0497 }
0498
0499
0500
0501
0502
0503 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
0504 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
0505 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
0506
0507 dccp_csum_outgoing(skb);
0508 return skb;
0509 }
0510
0511 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
0512
0513
0514 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
0515 {
0516 struct sk_buff *skb;
0517
0518
0519
0520
0521 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
0522
0523 if (err != 0)
0524 return err;
0525
0526 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
0527 if (skb == NULL)
0528 return -ENOBUFS;
0529
0530
0531 skb_reserve(skb, sk->sk_prot->max_header);
0532 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
0533 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
0534
0535 return dccp_transmit_skb(sk, skb);
0536 }
0537
0538
0539
0540
0541 int dccp_connect(struct sock *sk)
0542 {
0543 struct sk_buff *skb;
0544 struct dccp_sock *dp = dccp_sk(sk);
0545 struct dst_entry *dst = __sk_dst_get(sk);
0546 struct inet_connection_sock *icsk = inet_csk(sk);
0547
0548 sk->sk_err = 0;
0549 sock_reset_flag(sk, SOCK_DONE);
0550
0551 dccp_sync_mss(sk, dst_mtu(dst));
0552
0553
0554 if (dccp_feat_finalise_settings(dccp_sk(sk)))
0555 return -EPROTO;
0556
0557
0558 dp->dccps_gar = dp->dccps_iss;
0559
0560 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
0561 if (unlikely(skb == NULL))
0562 return -ENOBUFS;
0563
0564
0565 skb_reserve(skb, sk->sk_prot->max_header);
0566
0567 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
0568
0569 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
0570 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
0571
0572
0573 icsk->icsk_retransmits = 0;
0574 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
0575 icsk->icsk_rto, DCCP_RTO_MAX);
0576 return 0;
0577 }
0578
0579 EXPORT_SYMBOL_GPL(dccp_connect);
0580
0581 void dccp_send_ack(struct sock *sk)
0582 {
0583
0584 if (sk->sk_state != DCCP_CLOSED) {
0585 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
0586 GFP_ATOMIC);
0587
0588 if (skb == NULL) {
0589 inet_csk_schedule_ack(sk);
0590 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
0591 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
0592 TCP_DELACK_MAX,
0593 DCCP_RTO_MAX);
0594 return;
0595 }
0596
0597
0598 skb_reserve(skb, sk->sk_prot->max_header);
0599 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
0600 dccp_transmit_skb(sk, skb);
0601 }
0602 }
0603
0604 EXPORT_SYMBOL_GPL(dccp_send_ack);
0605
0606 #if 0
0607
0608 void dccp_send_delayed_ack(struct sock *sk)
0609 {
0610 struct inet_connection_sock *icsk = inet_csk(sk);
0611
0612
0613
0614
0615
0616 unsigned long timeout = jiffies + 2 * HZ;
0617
0618
0619 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
0620
0621
0622
0623
0624
0625 if (icsk->icsk_ack.blocked) {
0626 dccp_send_ack(sk);
0627 return;
0628 }
0629
0630 if (!time_before(timeout, icsk->icsk_ack.timeout))
0631 timeout = icsk->icsk_ack.timeout;
0632 }
0633 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
0634 icsk->icsk_ack.timeout = timeout;
0635 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
0636 }
0637 #endif
0638
0639 void dccp_send_sync(struct sock *sk, const u64 ackno,
0640 const enum dccp_pkt_type pkt_type)
0641 {
0642
0643
0644
0645
0646
0647 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
0648
0649 if (skb == NULL) {
0650
0651 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
0652 return;
0653 }
0654
0655
0656 skb_reserve(skb, sk->sk_prot->max_header);
0657 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
0658 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
0659
0660
0661
0662
0663
0664 dccp_sk(sk)->dccps_sync_scheduled = 0;
0665
0666 dccp_transmit_skb(sk, skb);
0667 }
0668
0669 EXPORT_SYMBOL_GPL(dccp_send_sync);
0670
0671
0672
0673
0674
0675
0676 void dccp_send_close(struct sock *sk, const int active)
0677 {
0678 struct dccp_sock *dp = dccp_sk(sk);
0679 struct sk_buff *skb;
0680 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
0681
0682 skb = alloc_skb(sk->sk_prot->max_header, prio);
0683 if (skb == NULL)
0684 return;
0685
0686
0687 skb_reserve(skb, sk->sk_prot->max_header);
0688 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
0689 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
0690 else
0691 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
0692
0693 if (active) {
0694 skb = dccp_skb_entail(sk, skb);
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
0706 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
0707 }
0708 dccp_transmit_skb(sk, skb);
0709 }