0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include "core.h"
0038 #include "subscr.h"
0039 #include "link.h"
0040 #include "bcast.h"
0041 #include "socket.h"
0042 #include "name_distr.h"
0043 #include "discover.h"
0044 #include "netlink.h"
0045 #include "monitor.h"
0046 #include "trace.h"
0047 #include "crypto.h"
0048
0049 #include <linux/pkt_sched.h>
0050
0051 struct tipc_stats {
0052 u32 sent_pkts;
0053 u32 recv_pkts;
0054 u32 sent_states;
0055 u32 recv_states;
0056 u32 sent_probes;
0057 u32 recv_probes;
0058 u32 sent_nacks;
0059 u32 recv_nacks;
0060 u32 sent_acks;
0061 u32 sent_bundled;
0062 u32 sent_bundles;
0063 u32 recv_bundled;
0064 u32 recv_bundles;
0065 u32 retransmitted;
0066 u32 sent_fragmented;
0067 u32 sent_fragments;
0068 u32 recv_fragmented;
0069 u32 recv_fragments;
0070 u32 link_congs;
0071 u32 deferred_recv;
0072 u32 duplicates;
0073 u32 max_queue_sz;
0074 u32 accu_queue_sz;
0075 u32 queue_sz_counts;
0076 u32 msg_length_counts;
0077 u32 msg_lengths_total;
0078 u32 msg_length_profile[7];
0079 };
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 struct tipc_link {
0153 u32 addr;
0154 char name[TIPC_MAX_LINK_NAME];
0155 struct net *net;
0156
0157
0158 u16 peer_session;
0159 u16 session;
0160 u16 snd_nxt_state;
0161 u16 rcv_nxt_state;
0162 u32 peer_bearer_id;
0163 u32 bearer_id;
0164 u32 tolerance;
0165 u32 abort_limit;
0166 u32 state;
0167 u16 peer_caps;
0168 bool in_session;
0169 bool active;
0170 u32 silent_intv_cnt;
0171 char if_name[TIPC_MAX_IF_NAME];
0172 u32 priority;
0173 char net_plane;
0174 struct tipc_mon_state mon_state;
0175 u16 rst_cnt;
0176
0177
0178 u16 drop_point;
0179 struct sk_buff *failover_reasm_skb;
0180 struct sk_buff_head failover_deferdq;
0181
0182
0183 u16 mtu;
0184 u16 advertised_mtu;
0185
0186
0187 struct sk_buff_head transmq;
0188 struct sk_buff_head backlogq;
0189 struct {
0190 u16 len;
0191 u16 limit;
0192 struct sk_buff *target_bskb;
0193 } backlog[5];
0194 u16 snd_nxt;
0195
0196
0197 u16 rcv_nxt;
0198 u32 rcv_unacked;
0199 struct sk_buff_head deferdq;
0200 struct sk_buff_head *inputq;
0201 struct sk_buff_head *namedq;
0202
0203
0204 struct sk_buff_head wakeupq;
0205 u16 window;
0206 u16 min_win;
0207 u16 ssthresh;
0208 u16 max_win;
0209 u16 cong_acks;
0210 u16 checkpoint;
0211
0212
0213 struct sk_buff *reasm_buf;
0214 struct sk_buff *reasm_tnlmsg;
0215
0216
0217 u16 ackers;
0218 u16 acked;
0219 u16 last_gap;
0220 struct tipc_gap_ack_blks *last_ga;
0221 struct tipc_link *bc_rcvlink;
0222 struct tipc_link *bc_sndlink;
0223 u8 nack_state;
0224 bool bc_peer_is_up;
0225
0226
0227 struct tipc_stats stats;
0228 };
0229
0230
0231
0232
0233 static const char *link_co_err = "Link tunneling error, ";
0234 static const char *link_rst_msg = "Resetting link ";
0235
0236
0237
0238 enum {
0239 BC_NACK_SND_CONDITIONAL,
0240 BC_NACK_SND_UNCONDITIONAL,
0241 BC_NACK_SND_SUPPRESS,
0242 };
0243
0244 #define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
0245 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
0246
0247
0248
0249 enum {
0250 LINK_ESTABLISHED = 0xe,
0251 LINK_ESTABLISHING = 0xe << 4,
0252 LINK_RESET = 0x1 << 8,
0253 LINK_RESETTING = 0x2 << 12,
0254 LINK_PEER_RESET = 0xd << 16,
0255 LINK_FAILINGOVER = 0xf << 20,
0256 LINK_SYNCHING = 0xc << 24
0257 };
0258
0259
0260
0261 static int link_is_up(struct tipc_link *l)
0262 {
0263 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
0264 }
0265
0266 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
0267 struct sk_buff_head *xmitq);
0268 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
0269 bool probe_reply, u16 rcvgap,
0270 int tolerance, int priority,
0271 struct sk_buff_head *xmitq);
0272 static void link_print(struct tipc_link *l, const char *str);
0273 static int tipc_link_build_nack_msg(struct tipc_link *l,
0274 struct sk_buff_head *xmitq);
0275 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
0276 struct sk_buff_head *xmitq);
0277 static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
0278 struct tipc_link *l, u8 start_index);
0279 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
0280 static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
0281 u16 acked, u16 gap,
0282 struct tipc_gap_ack_blks *ga,
0283 struct sk_buff_head *xmitq,
0284 bool *retransmitted, int *rc);
0285 static void tipc_link_update_cwin(struct tipc_link *l, int released,
0286 bool retransmitted);
0287
0288
0289
0290 bool tipc_link_is_up(struct tipc_link *l)
0291 {
0292 return link_is_up(l);
0293 }
0294
0295 bool tipc_link_peer_is_down(struct tipc_link *l)
0296 {
0297 return l->state == LINK_PEER_RESET;
0298 }
0299
0300 bool tipc_link_is_reset(struct tipc_link *l)
0301 {
0302 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
0303 }
0304
0305 bool tipc_link_is_establishing(struct tipc_link *l)
0306 {
0307 return l->state == LINK_ESTABLISHING;
0308 }
0309
0310 bool tipc_link_is_synching(struct tipc_link *l)
0311 {
0312 return l->state == LINK_SYNCHING;
0313 }
0314
0315 bool tipc_link_is_failingover(struct tipc_link *l)
0316 {
0317 return l->state == LINK_FAILINGOVER;
0318 }
0319
0320 bool tipc_link_is_blocked(struct tipc_link *l)
0321 {
0322 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
0323 }
0324
0325 static bool link_is_bc_sndlink(struct tipc_link *l)
0326 {
0327 return !l->bc_sndlink;
0328 }
0329
0330 static bool link_is_bc_rcvlink(struct tipc_link *l)
0331 {
0332 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
0333 }
0334
0335 void tipc_link_set_active(struct tipc_link *l, bool active)
0336 {
0337 l->active = active;
0338 }
0339
0340 u32 tipc_link_id(struct tipc_link *l)
0341 {
0342 return l->peer_bearer_id << 16 | l->bearer_id;
0343 }
0344
0345 int tipc_link_min_win(struct tipc_link *l)
0346 {
0347 return l->min_win;
0348 }
0349
0350 int tipc_link_max_win(struct tipc_link *l)
0351 {
0352 return l->max_win;
0353 }
0354
0355 int tipc_link_prio(struct tipc_link *l)
0356 {
0357 return l->priority;
0358 }
0359
0360 unsigned long tipc_link_tolerance(struct tipc_link *l)
0361 {
0362 return l->tolerance;
0363 }
0364
0365 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
0366 {
0367 return l->inputq;
0368 }
0369
0370 char tipc_link_plane(struct tipc_link *l)
0371 {
0372 return l->net_plane;
0373 }
0374
0375 struct net *tipc_link_net(struct tipc_link *l)
0376 {
0377 return l->net;
0378 }
0379
0380 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
0381 {
0382 l->peer_caps = capabilities;
0383 }
0384
0385 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
0386 struct tipc_link *uc_l,
0387 struct sk_buff_head *xmitq)
0388 {
0389 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
0390
0391 snd_l->ackers++;
0392 rcv_l->acked = snd_l->snd_nxt - 1;
0393 snd_l->state = LINK_ESTABLISHED;
0394 tipc_link_build_bc_init_msg(uc_l, xmitq);
0395 }
0396
0397 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
0398 struct tipc_link *rcv_l,
0399 struct sk_buff_head *xmitq)
0400 {
0401 u16 ack = snd_l->snd_nxt - 1;
0402
0403 snd_l->ackers--;
0404 rcv_l->bc_peer_is_up = true;
0405 rcv_l->state = LINK_ESTABLISHED;
0406 tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
0407 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
0408 tipc_link_reset(rcv_l);
0409 rcv_l->state = LINK_RESET;
0410 if (!snd_l->ackers) {
0411 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
0412 tipc_link_reset(snd_l);
0413 snd_l->state = LINK_RESET;
0414 __skb_queue_purge(xmitq);
0415 }
0416 }
0417
0418 int tipc_link_bc_peers(struct tipc_link *l)
0419 {
0420 return l->ackers;
0421 }
0422
0423 static u16 link_bc_rcv_gap(struct tipc_link *l)
0424 {
0425 struct sk_buff *skb = skb_peek(&l->deferdq);
0426 u16 gap = 0;
0427
0428 if (more(l->snd_nxt, l->rcv_nxt))
0429 gap = l->snd_nxt - l->rcv_nxt;
0430 if (skb)
0431 gap = buf_seqno(skb) - l->rcv_nxt;
0432 return gap;
0433 }
0434
0435 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
0436 {
0437 l->mtu = mtu;
0438 }
0439
0440 int tipc_link_mtu(struct tipc_link *l)
0441 {
0442 return l->mtu;
0443 }
0444
0445 int tipc_link_mss(struct tipc_link *l)
0446 {
0447 #ifdef CONFIG_TIPC_CRYPTO
0448 return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
0449 #else
0450 return l->mtu - INT_H_SIZE;
0451 #endif
0452 }
0453
0454 u16 tipc_link_rcv_nxt(struct tipc_link *l)
0455 {
0456 return l->rcv_nxt;
0457 }
0458
0459 u16 tipc_link_acked(struct tipc_link *l)
0460 {
0461 return l->acked;
0462 }
0463
0464 char *tipc_link_name(struct tipc_link *l)
0465 {
0466 return l->name;
0467 }
0468
0469 u32 tipc_link_state(struct tipc_link *l)
0470 {
0471 return l->state;
0472 }
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
0499 int tolerance, char net_plane, u32 mtu, int priority,
0500 u32 min_win, u32 max_win, u32 session, u32 self,
0501 u32 peer, u8 *peer_id, u16 peer_caps,
0502 struct tipc_link *bc_sndlink,
0503 struct tipc_link *bc_rcvlink,
0504 struct sk_buff_head *inputq,
0505 struct sk_buff_head *namedq,
0506 struct tipc_link **link)
0507 {
0508 char peer_str[NODE_ID_STR_LEN] = {0,};
0509 char self_str[NODE_ID_STR_LEN] = {0,};
0510 struct tipc_link *l;
0511
0512 l = kzalloc(sizeof(*l), GFP_ATOMIC);
0513 if (!l)
0514 return false;
0515 *link = l;
0516 l->session = session;
0517
0518
0519 if (peer_id) {
0520 tipc_nodeid2string(self_str, tipc_own_id(net));
0521 if (strlen(self_str) > 16)
0522 sprintf(self_str, "%x", self);
0523 tipc_nodeid2string(peer_str, peer_id);
0524 if (strlen(peer_str) > 16)
0525 sprintf(peer_str, "%x", peer);
0526 }
0527
0528 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
0529 self_str, if_name, peer_str);
0530
0531 strcpy(l->if_name, if_name);
0532 l->addr = peer;
0533 l->peer_caps = peer_caps;
0534 l->net = net;
0535 l->in_session = false;
0536 l->bearer_id = bearer_id;
0537 l->tolerance = tolerance;
0538 if (bc_rcvlink)
0539 bc_rcvlink->tolerance = tolerance;
0540 l->net_plane = net_plane;
0541 l->advertised_mtu = mtu;
0542 l->mtu = mtu;
0543 l->priority = priority;
0544 tipc_link_set_queue_limits(l, min_win, max_win);
0545 l->ackers = 1;
0546 l->bc_sndlink = bc_sndlink;
0547 l->bc_rcvlink = bc_rcvlink;
0548 l->inputq = inputq;
0549 l->namedq = namedq;
0550 l->state = LINK_RESETTING;
0551 __skb_queue_head_init(&l->transmq);
0552 __skb_queue_head_init(&l->backlogq);
0553 __skb_queue_head_init(&l->deferdq);
0554 __skb_queue_head_init(&l->failover_deferdq);
0555 skb_queue_head_init(&l->wakeupq);
0556 skb_queue_head_init(l->inputq);
0557 return true;
0558 }
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
0578 int mtu, u32 min_win, u32 max_win, u16 peer_caps,
0579 struct sk_buff_head *inputq,
0580 struct sk_buff_head *namedq,
0581 struct tipc_link *bc_sndlink,
0582 struct tipc_link **link)
0583 {
0584 struct tipc_link *l;
0585
0586 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
0587 max_win, 0, ownnode, peer, NULL, peer_caps,
0588 bc_sndlink, NULL, inputq, namedq, link))
0589 return false;
0590
0591 l = *link;
0592 if (peer_id) {
0593 char peer_str[NODE_ID_STR_LEN] = {0,};
0594
0595 tipc_nodeid2string(peer_str, peer_id);
0596 if (strlen(peer_str) > 16)
0597 sprintf(peer_str, "%x", peer);
0598
0599 snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
0600 peer_str);
0601 } else {
0602 strcpy(l->name, tipc_bclink_name);
0603 }
0604 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
0605 tipc_link_reset(l);
0606 l->state = LINK_RESET;
0607 l->ackers = 0;
0608 l->bc_rcvlink = l;
0609
0610
0611 if (link_is_bc_sndlink(l))
0612 l->state = LINK_ESTABLISHED;
0613
0614
0615 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
0616 tipc_bcast_toggle_rcast(net, false);
0617
0618 return true;
0619 }
0620
0621
0622
0623
0624
0625
0626 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
0627 {
0628 int rc = 0;
0629 int old_state = l->state;
0630
0631 switch (l->state) {
0632 case LINK_RESETTING:
0633 switch (evt) {
0634 case LINK_PEER_RESET_EVT:
0635 l->state = LINK_PEER_RESET;
0636 break;
0637 case LINK_RESET_EVT:
0638 l->state = LINK_RESET;
0639 break;
0640 case LINK_FAILURE_EVT:
0641 case LINK_FAILOVER_BEGIN_EVT:
0642 case LINK_ESTABLISH_EVT:
0643 case LINK_FAILOVER_END_EVT:
0644 case LINK_SYNCH_BEGIN_EVT:
0645 case LINK_SYNCH_END_EVT:
0646 default:
0647 goto illegal_evt;
0648 }
0649 break;
0650 case LINK_RESET:
0651 switch (evt) {
0652 case LINK_PEER_RESET_EVT:
0653 l->state = LINK_ESTABLISHING;
0654 break;
0655 case LINK_FAILOVER_BEGIN_EVT:
0656 l->state = LINK_FAILINGOVER;
0657 break;
0658 case LINK_FAILURE_EVT:
0659 case LINK_RESET_EVT:
0660 case LINK_ESTABLISH_EVT:
0661 case LINK_FAILOVER_END_EVT:
0662 break;
0663 case LINK_SYNCH_BEGIN_EVT:
0664 case LINK_SYNCH_END_EVT:
0665 default:
0666 goto illegal_evt;
0667 }
0668 break;
0669 case LINK_PEER_RESET:
0670 switch (evt) {
0671 case LINK_RESET_EVT:
0672 l->state = LINK_ESTABLISHING;
0673 break;
0674 case LINK_PEER_RESET_EVT:
0675 case LINK_ESTABLISH_EVT:
0676 case LINK_FAILURE_EVT:
0677 break;
0678 case LINK_SYNCH_BEGIN_EVT:
0679 case LINK_SYNCH_END_EVT:
0680 case LINK_FAILOVER_BEGIN_EVT:
0681 case LINK_FAILOVER_END_EVT:
0682 default:
0683 goto illegal_evt;
0684 }
0685 break;
0686 case LINK_FAILINGOVER:
0687 switch (evt) {
0688 case LINK_FAILOVER_END_EVT:
0689 l->state = LINK_RESET;
0690 break;
0691 case LINK_PEER_RESET_EVT:
0692 case LINK_RESET_EVT:
0693 case LINK_ESTABLISH_EVT:
0694 case LINK_FAILURE_EVT:
0695 break;
0696 case LINK_FAILOVER_BEGIN_EVT:
0697 case LINK_SYNCH_BEGIN_EVT:
0698 case LINK_SYNCH_END_EVT:
0699 default:
0700 goto illegal_evt;
0701 }
0702 break;
0703 case LINK_ESTABLISHING:
0704 switch (evt) {
0705 case LINK_ESTABLISH_EVT:
0706 l->state = LINK_ESTABLISHED;
0707 break;
0708 case LINK_FAILOVER_BEGIN_EVT:
0709 l->state = LINK_FAILINGOVER;
0710 break;
0711 case LINK_RESET_EVT:
0712 l->state = LINK_RESET;
0713 break;
0714 case LINK_FAILURE_EVT:
0715 case LINK_PEER_RESET_EVT:
0716 case LINK_SYNCH_BEGIN_EVT:
0717 case LINK_FAILOVER_END_EVT:
0718 break;
0719 case LINK_SYNCH_END_EVT:
0720 default:
0721 goto illegal_evt;
0722 }
0723 break;
0724 case LINK_ESTABLISHED:
0725 switch (evt) {
0726 case LINK_PEER_RESET_EVT:
0727 l->state = LINK_PEER_RESET;
0728 rc |= TIPC_LINK_DOWN_EVT;
0729 break;
0730 case LINK_FAILURE_EVT:
0731 l->state = LINK_RESETTING;
0732 rc |= TIPC_LINK_DOWN_EVT;
0733 break;
0734 case LINK_RESET_EVT:
0735 l->state = LINK_RESET;
0736 break;
0737 case LINK_ESTABLISH_EVT:
0738 case LINK_SYNCH_END_EVT:
0739 break;
0740 case LINK_SYNCH_BEGIN_EVT:
0741 l->state = LINK_SYNCHING;
0742 break;
0743 case LINK_FAILOVER_BEGIN_EVT:
0744 case LINK_FAILOVER_END_EVT:
0745 default:
0746 goto illegal_evt;
0747 }
0748 break;
0749 case LINK_SYNCHING:
0750 switch (evt) {
0751 case LINK_PEER_RESET_EVT:
0752 l->state = LINK_PEER_RESET;
0753 rc |= TIPC_LINK_DOWN_EVT;
0754 break;
0755 case LINK_FAILURE_EVT:
0756 l->state = LINK_RESETTING;
0757 rc |= TIPC_LINK_DOWN_EVT;
0758 break;
0759 case LINK_RESET_EVT:
0760 l->state = LINK_RESET;
0761 break;
0762 case LINK_ESTABLISH_EVT:
0763 case LINK_SYNCH_BEGIN_EVT:
0764 break;
0765 case LINK_SYNCH_END_EVT:
0766 l->state = LINK_ESTABLISHED;
0767 break;
0768 case LINK_FAILOVER_BEGIN_EVT:
0769 case LINK_FAILOVER_END_EVT:
0770 default:
0771 goto illegal_evt;
0772 }
0773 break;
0774 default:
0775 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
0776 }
0777 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
0778 return rc;
0779 illegal_evt:
0780 pr_err("Illegal FSM event %x in state %x on link %s\n",
0781 evt, l->state, l->name);
0782 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
0783 return rc;
0784 }
0785
0786
0787
0788 static void link_profile_stats(struct tipc_link *l)
0789 {
0790 struct sk_buff *skb;
0791 struct tipc_msg *msg;
0792 int length;
0793
0794
0795 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
0796 l->stats.queue_sz_counts++;
0797
0798 skb = skb_peek(&l->transmq);
0799 if (!skb)
0800 return;
0801 msg = buf_msg(skb);
0802 length = msg_size(msg);
0803
0804 if (msg_user(msg) == MSG_FRAGMENTER) {
0805 if (msg_type(msg) != FIRST_FRAGMENT)
0806 return;
0807 length = msg_size(msg_inner_hdr(msg));
0808 }
0809 l->stats.msg_lengths_total += length;
0810 l->stats.msg_length_counts++;
0811 if (length <= 64)
0812 l->stats.msg_length_profile[0]++;
0813 else if (length <= 256)
0814 l->stats.msg_length_profile[1]++;
0815 else if (length <= 1024)
0816 l->stats.msg_length_profile[2]++;
0817 else if (length <= 4096)
0818 l->stats.msg_length_profile[3]++;
0819 else if (length <= 16384)
0820 l->stats.msg_length_profile[4]++;
0821 else if (length <= 32768)
0822 l->stats.msg_length_profile[5]++;
0823 else
0824 l->stats.msg_length_profile[6]++;
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834 bool tipc_link_too_silent(struct tipc_link *l)
0835 {
0836 return (l->silent_intv_cnt + 2 > l->abort_limit);
0837 }
0838
0839
0840
0841 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
0842 {
0843 int mtyp = 0;
0844 int rc = 0;
0845 bool state = false;
0846 bool probe = false;
0847 bool setup = false;
0848 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
0849 u16 bc_acked = l->bc_rcvlink->acked;
0850 struct tipc_mon_state *mstate = &l->mon_state;
0851
0852 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
0853 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
0854 switch (l->state) {
0855 case LINK_ESTABLISHED:
0856 case LINK_SYNCHING:
0857 mtyp = STATE_MSG;
0858 link_profile_stats(l);
0859 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
0860 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
0861 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
0862 state = bc_acked != bc_snt;
0863 state |= l->bc_rcvlink->rcv_unacked;
0864 state |= l->rcv_unacked;
0865 state |= !skb_queue_empty(&l->transmq);
0866 probe = mstate->probing;
0867 probe |= l->silent_intv_cnt;
0868 if (probe || mstate->monitoring)
0869 l->silent_intv_cnt++;
0870 probe |= !skb_queue_empty(&l->deferdq);
0871 if (l->snd_nxt == l->checkpoint) {
0872 tipc_link_update_cwin(l, 0, 0);
0873 probe = true;
0874 }
0875 l->checkpoint = l->snd_nxt;
0876 break;
0877 case LINK_RESET:
0878 setup = l->rst_cnt++ <= 4;
0879 setup |= !(l->rst_cnt % 16);
0880 mtyp = RESET_MSG;
0881 break;
0882 case LINK_ESTABLISHING:
0883 setup = true;
0884 mtyp = ACTIVATE_MSG;
0885 break;
0886 case LINK_PEER_RESET:
0887 case LINK_RESETTING:
0888 case LINK_FAILINGOVER:
0889 break;
0890 default:
0891 break;
0892 }
0893
0894 if (state || probe || setup)
0895 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
0896
0897 return rc;
0898 }
0899
0900
0901
0902
0903
0904
0905
0906 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
0907 {
0908 u32 dnode = tipc_own_addr(l->net);
0909 u32 dport = msg_origport(hdr);
0910 struct sk_buff *skb;
0911
0912
0913 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
0914 dnode, l->addr, dport, 0, 0);
0915 if (!skb)
0916 return -ENOBUFS;
0917 msg_set_dest_droppable(buf_msg(skb), true);
0918 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
0919 skb_queue_tail(&l->wakeupq, skb);
0920 l->stats.link_congs++;
0921 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
0922 return -ELINKCONG;
0923 }
0924
0925
0926
0927
0928
0929
0930
0931 static void link_prepare_wakeup(struct tipc_link *l)
0932 {
0933 struct sk_buff_head *wakeupq = &l->wakeupq;
0934 struct sk_buff_head *inputq = l->inputq;
0935 struct sk_buff *skb, *tmp;
0936 struct sk_buff_head tmpq;
0937 int avail[5] = {0,};
0938 int imp = 0;
0939
0940 __skb_queue_head_init(&tmpq);
0941
0942 for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
0943 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
0944
0945 skb_queue_walk_safe(wakeupq, skb, tmp) {
0946 imp = TIPC_SKB_CB(skb)->chain_imp;
0947 if (avail[imp] <= 0)
0948 continue;
0949 avail[imp]--;
0950 __skb_unlink(skb, wakeupq);
0951 __skb_queue_tail(&tmpq, skb);
0952 }
0953
0954 spin_lock_bh(&inputq->lock);
0955 skb_queue_splice_tail(&tmpq, inputq);
0956 spin_unlock_bh(&inputq->lock);
0957
0958 }
0959
0960
0961
0962
0963
0964
0965
0966 static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
0967 struct tipc_link *l)
0968 {
0969 if (link_is_bc_sndlink(l))
0970 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
0971 else
0972 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
0973 }
0974
0975 void tipc_link_reset(struct tipc_link *l)
0976 {
0977 struct sk_buff_head list;
0978 u32 imp;
0979
0980 __skb_queue_head_init(&list);
0981
0982 l->in_session = false;
0983
0984 l->peer_session--;
0985 l->session++;
0986 l->mtu = l->advertised_mtu;
0987
0988 spin_lock_bh(&l->wakeupq.lock);
0989 skb_queue_splice_init(&l->wakeupq, &list);
0990 spin_unlock_bh(&l->wakeupq.lock);
0991
0992 spin_lock_bh(&l->inputq->lock);
0993 skb_queue_splice_init(&list, l->inputq);
0994 spin_unlock_bh(&l->inputq->lock);
0995
0996 __skb_queue_purge(&l->transmq);
0997 __skb_queue_purge(&l->deferdq);
0998 __skb_queue_purge(&l->backlogq);
0999 __skb_queue_purge(&l->failover_deferdq);
1000 for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
1001 l->backlog[imp].len = 0;
1002 l->backlog[imp].target_bskb = NULL;
1003 }
1004 kfree_skb(l->reasm_buf);
1005 kfree_skb(l->reasm_tnlmsg);
1006 kfree_skb(l->failover_reasm_skb);
1007 l->reasm_buf = NULL;
1008 l->reasm_tnlmsg = NULL;
1009 l->failover_reasm_skb = NULL;
1010 l->rcv_unacked = 0;
1011 l->snd_nxt = 1;
1012 l->rcv_nxt = 1;
1013 l->snd_nxt_state = 1;
1014 l->rcv_nxt_state = 1;
1015 l->acked = 0;
1016 l->last_gap = 0;
1017 kfree(l->last_ga);
1018 l->last_ga = NULL;
1019 l->silent_intv_cnt = 0;
1020 l->rst_cnt = 0;
1021 l->bc_peer_is_up = false;
1022 memset(&l->mon_state, 0, sizeof(l->mon_state));
1023 tipc_link_reset_stats(l);
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1037 struct sk_buff_head *xmitq)
1038 {
1039 struct sk_buff_head *backlogq = &l->backlogq;
1040 struct sk_buff_head *transmq = &l->transmq;
1041 struct sk_buff *skb, *_skb;
1042 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1043 u16 ack = l->rcv_nxt - 1;
1044 u16 seqno = l->snd_nxt;
1045 int pkt_cnt = skb_queue_len(list);
1046 unsigned int mss = tipc_link_mss(l);
1047 unsigned int cwin = l->window;
1048 unsigned int mtu = l->mtu;
1049 struct tipc_msg *hdr;
1050 bool new_bundle;
1051 int rc = 0;
1052 int imp;
1053
1054 if (pkt_cnt <= 0)
1055 return 0;
1056
1057 hdr = buf_msg(skb_peek(list));
1058 if (unlikely(msg_size(hdr) > mtu)) {
1059 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1060 skb_queue_len(list), msg_user(hdr),
1061 msg_type(hdr), msg_size(hdr), mtu);
1062 __skb_queue_purge(list);
1063 return -EMSGSIZE;
1064 }
1065
1066 imp = msg_importance(hdr);
1067
1068 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1069 if (imp == TIPC_SYSTEM_IMPORTANCE) {
1070 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1071 return -ENOBUFS;
1072 }
1073 rc = link_schedule_user(l, hdr);
1074 }
1075
1076 if (pkt_cnt > 1) {
1077 l->stats.sent_fragmented++;
1078 l->stats.sent_fragments += pkt_cnt;
1079 }
1080
1081
1082 while ((skb = __skb_dequeue(list))) {
1083 if (likely(skb_queue_len(transmq) < cwin)) {
1084 hdr = buf_msg(skb);
1085 msg_set_seqno(hdr, seqno);
1086 msg_set_ack(hdr, ack);
1087 msg_set_bcast_ack(hdr, bc_ack);
1088 _skb = skb_clone(skb, GFP_ATOMIC);
1089 if (!_skb) {
1090 kfree_skb(skb);
1091 __skb_queue_purge(list);
1092 return -ENOBUFS;
1093 }
1094 __skb_queue_tail(transmq, skb);
1095 tipc_link_set_skb_retransmit_time(skb, l);
1096 __skb_queue_tail(xmitq, _skb);
1097 TIPC_SKB_CB(skb)->ackers = l->ackers;
1098 l->rcv_unacked = 0;
1099 l->stats.sent_pkts++;
1100 seqno++;
1101 continue;
1102 }
1103 if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1104 mss, l->addr, &new_bundle)) {
1105 if (skb) {
1106
1107 l->backlog[imp].target_bskb = skb;
1108 l->backlog[imp].len++;
1109 __skb_queue_tail(backlogq, skb);
1110 } else {
1111 if (new_bundle) {
1112 l->stats.sent_bundles++;
1113 l->stats.sent_bundled++;
1114 }
1115 l->stats.sent_bundled++;
1116 }
1117 continue;
1118 }
1119 l->backlog[imp].target_bskb = NULL;
1120 l->backlog[imp].len += (1 + skb_queue_len(list));
1121 __skb_queue_tail(backlogq, skb);
1122 skb_queue_splice_tail_init(list, backlogq);
1123 }
1124 l->snd_nxt = seqno;
1125 return rc;
1126 }
1127
1128 static void tipc_link_update_cwin(struct tipc_link *l, int released,
1129 bool retransmitted)
1130 {
1131 int bklog_len = skb_queue_len(&l->backlogq);
1132 struct sk_buff_head *txq = &l->transmq;
1133 int txq_len = skb_queue_len(txq);
1134 u16 cwin = l->window;
1135
1136
1137 if (unlikely(retransmitted)) {
1138 l->ssthresh = max_t(u16, l->window / 2, 300);
1139 l->window = min_t(u16, l->ssthresh, l->window);
1140 return;
1141 }
1142
1143 if (unlikely(!released)) {
1144 l->ssthresh = max_t(u16, l->window / 2, 300);
1145 l->window = l->min_win;
1146 return;
1147 }
1148
1149 if (txq_len + bklog_len < cwin)
1150 return;
1151
1152
1153 if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
1154 return;
1155
1156 l->cong_acks += released;
1157
1158
1159 if (cwin <= l->ssthresh) {
1160 l->window = min_t(u16, cwin + released, l->max_win);
1161 return;
1162 }
1163
1164 if (l->cong_acks < cwin)
1165 return;
1166 l->window = min_t(u16, ++cwin, l->max_win);
1167 l->cong_acks = 0;
1168 }
1169
1170 static void tipc_link_advance_backlog(struct tipc_link *l,
1171 struct sk_buff_head *xmitq)
1172 {
1173 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1174 struct sk_buff_head *txq = &l->transmq;
1175 struct sk_buff *skb, *_skb;
1176 u16 ack = l->rcv_nxt - 1;
1177 u16 seqno = l->snd_nxt;
1178 struct tipc_msg *hdr;
1179 u16 cwin = l->window;
1180 u32 imp;
1181
1182 while (skb_queue_len(txq) < cwin) {
1183 skb = skb_peek(&l->backlogq);
1184 if (!skb)
1185 break;
1186 _skb = skb_clone(skb, GFP_ATOMIC);
1187 if (!_skb)
1188 break;
1189 __skb_dequeue(&l->backlogq);
1190 hdr = buf_msg(skb);
1191 imp = msg_importance(hdr);
1192 l->backlog[imp].len--;
1193 if (unlikely(skb == l->backlog[imp].target_bskb))
1194 l->backlog[imp].target_bskb = NULL;
1195 __skb_queue_tail(&l->transmq, skb);
1196 tipc_link_set_skb_retransmit_time(skb, l);
1197
1198 __skb_queue_tail(xmitq, _skb);
1199 TIPC_SKB_CB(skb)->ackers = l->ackers;
1200 msg_set_seqno(hdr, seqno);
1201 msg_set_ack(hdr, ack);
1202 msg_set_bcast_ack(hdr, bc_ack);
1203 l->rcv_unacked = 0;
1204 l->stats.sent_pkts++;
1205 seqno++;
1206 }
1207 l->snd_nxt = seqno;
1208 }
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1220 int *rc)
1221 {
1222 struct sk_buff *skb = skb_peek(&l->transmq);
1223 struct tipc_msg *hdr;
1224
1225 if (!skb)
1226 return false;
1227
1228 if (!TIPC_SKB_CB(skb)->retr_cnt)
1229 return false;
1230
1231 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1232 msecs_to_jiffies(r->tolerance * 10)))
1233 return false;
1234
1235 hdr = buf_msg(skb);
1236 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1237 return false;
1238
1239 pr_warn("Retransmission failure on link <%s>\n", l->name);
1240 link_print(l, "State of link ");
1241 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1242 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1243 pr_info("sqno %u, prev: %x, dest: %x\n",
1244 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1245 pr_info("retr_stamp %d, retr_cnt %d\n",
1246 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1247 TIPC_SKB_CB(skb)->retr_cnt);
1248
1249 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1250 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1251 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1252
1253 if (link_is_bc_sndlink(l)) {
1254 r->state = LINK_RESET;
1255 *rc |= TIPC_LINK_DOWN_EVT;
1256 } else {
1257 *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1258 }
1259
1260 return true;
1261 }
1262
1263
1264
1265
1266
1267
1268 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1269 struct sk_buff_head *inputq)
1270 {
1271 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1272 struct tipc_msg *hdr = buf_msg(skb);
1273
1274 switch (msg_user(hdr)) {
1275 case TIPC_LOW_IMPORTANCE:
1276 case TIPC_MEDIUM_IMPORTANCE:
1277 case TIPC_HIGH_IMPORTANCE:
1278 case TIPC_CRITICAL_IMPORTANCE:
1279 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1280 skb_queue_tail(mc_inputq, skb);
1281 return true;
1282 }
1283 fallthrough;
1284 case CONN_MANAGER:
1285 skb_queue_tail(inputq, skb);
1286 return true;
1287 case GROUP_PROTOCOL:
1288 skb_queue_tail(mc_inputq, skb);
1289 return true;
1290 case NAME_DISTRIBUTOR:
1291 l->bc_rcvlink->state = LINK_ESTABLISHED;
1292 skb_queue_tail(l->namedq, skb);
1293 return true;
1294 case MSG_BUNDLER:
1295 case TUNNEL_PROTOCOL:
1296 case MSG_FRAGMENTER:
1297 case BCAST_PROTOCOL:
1298 return false;
1299 #ifdef CONFIG_TIPC_CRYPTO
1300 case MSG_CRYPTO:
1301 if (sysctl_tipc_key_exchange_enabled &&
1302 TIPC_SKB_CB(skb)->decrypted) {
1303 tipc_crypto_msg_rcv(l->net, skb);
1304 return true;
1305 }
1306 fallthrough;
1307 #endif
1308 default:
1309 pr_warn("Dropping received illegal msg type\n");
1310 kfree_skb(skb);
1311 return true;
1312 }
1313 }
1314
1315
1316
1317
1318
1319 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1320 struct sk_buff_head *inputq,
1321 struct sk_buff **reasm_skb)
1322 {
1323 struct tipc_msg *hdr = buf_msg(skb);
1324 struct sk_buff *iskb;
1325 struct sk_buff_head tmpq;
1326 int usr = msg_user(hdr);
1327 int pos = 0;
1328
1329 if (usr == MSG_BUNDLER) {
1330 skb_queue_head_init(&tmpq);
1331 l->stats.recv_bundles++;
1332 l->stats.recv_bundled += msg_msgcnt(hdr);
1333 while (tipc_msg_extract(skb, &iskb, &pos))
1334 tipc_data_input(l, iskb, &tmpq);
1335 tipc_skb_queue_splice_tail(&tmpq, inputq);
1336 return 0;
1337 } else if (usr == MSG_FRAGMENTER) {
1338 l->stats.recv_fragments++;
1339 if (tipc_buf_append(reasm_skb, &skb)) {
1340 l->stats.recv_fragmented++;
1341 tipc_data_input(l, skb, inputq);
1342 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1343 pr_warn_ratelimited("Unable to build fragment list\n");
1344 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1345 }
1346 return 0;
1347 } else if (usr == BCAST_PROTOCOL) {
1348 tipc_bcast_lock(l->net);
1349 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1350 tipc_bcast_unlock(l->net);
1351 }
1352
1353 kfree_skb(skb);
1354 return 0;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1365 struct sk_buff_head *inputq)
1366 {
1367 struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1368 struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1369 struct sk_buff_head *fdefq = &l->failover_deferdq;
1370 struct tipc_msg *hdr = buf_msg(skb);
1371 struct sk_buff *iskb;
1372 int ipos = 0;
1373 int rc = 0;
1374 u16 seqno;
1375
1376 if (msg_type(hdr) == SYNCH_MSG) {
1377 kfree_skb(skb);
1378 return 0;
1379 }
1380
1381
1382 if (likely(!msg_nof_fragms(hdr))) {
1383 if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1384 pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1385 skb_queue_len(fdefq));
1386 return 0;
1387 }
1388 kfree_skb(skb);
1389 } else {
1390
1391 if (msg_fragm_no(hdr) == 1)
1392 msg_set_type(hdr, FIRST_FRAGMENT);
1393 else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1394 msg_set_type(hdr, FRAGMENT);
1395 else
1396 msg_set_type(hdr, LAST_FRAGMENT);
1397
1398 if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1399
1400 if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1401 return 0;
1402 pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1403 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1404 }
1405 iskb = skb;
1406 }
1407
1408 do {
1409 seqno = buf_seqno(iskb);
1410 if (unlikely(less(seqno, l->drop_point))) {
1411 kfree_skb(iskb);
1412 continue;
1413 }
1414 if (unlikely(seqno != l->drop_point)) {
1415 __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1416 continue;
1417 }
1418
1419 l->drop_point++;
1420 if (!tipc_data_input(l, iskb, inputq))
1421 rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1422 if (unlikely(rc))
1423 break;
1424 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1425
1426 return rc;
1427 }
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438 u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1439 struct tipc_msg *hdr, bool uc)
1440 {
1441 struct tipc_gap_ack_blks *p;
1442 u16 sz = 0;
1443
1444
1445 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1446 p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1447 sz = ntohs(p->len);
1448
1449 if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
1450
1451 if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1452 goto ok;
1453
1454 } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
1455 if (p->ugack_cnt) {
1456 p->bgack_cnt = 0;
1457 goto ok;
1458 }
1459 }
1460 }
1461
1462 p = NULL;
1463
1464 ok:
1465 *ga = p;
1466 return sz;
1467 }
1468
1469 static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1470 struct tipc_link *l, u8 start_index)
1471 {
1472 struct tipc_gap_ack *gacks = &ga->gacks[start_index];
1473 struct sk_buff *skb = skb_peek(&l->deferdq);
1474 u16 expect, seqno = 0;
1475 u8 n = 0;
1476
1477 if (!skb)
1478 return 0;
1479
1480 expect = buf_seqno(skb);
1481 skb_queue_walk(&l->deferdq, skb) {
1482 seqno = buf_seqno(skb);
1483 if (unlikely(more(seqno, expect))) {
1484 gacks[n].ack = htons(expect - 1);
1485 gacks[n].gap = htons(seqno - expect);
1486 if (++n >= MAX_GAP_ACK_BLKS / 2) {
1487 pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1488 l->name, n,
1489 skb_queue_len(&l->deferdq));
1490 return n;
1491 }
1492 } else if (unlikely(less(seqno, expect))) {
1493 pr_warn("Unexpected skb in deferdq!\n");
1494 continue;
1495 }
1496 expect = seqno + 1;
1497 }
1498
1499
1500 gacks[n].ack = htons(seqno);
1501 gacks[n].gap = 0;
1502 n++;
1503 return n;
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1517 {
1518 struct tipc_link *bcl = l->bc_rcvlink;
1519 struct tipc_gap_ack_blks *ga;
1520 u16 len;
1521
1522 ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1523
1524
1525 tipc_bcast_lock(bcl->net);
1526 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1527 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1528 ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1529 tipc_bcast_unlock(bcl->net);
1530
1531
1532 ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1533 __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1534
1535
1536 len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
1537 ga->len = htons(len);
1538 return len;
1539 }
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556 static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1557 u16 acked, u16 gap,
1558 struct tipc_gap_ack_blks *ga,
1559 struct sk_buff_head *xmitq,
1560 bool *retransmitted, int *rc)
1561 {
1562 struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1563 struct tipc_gap_ack *gacks = NULL;
1564 struct sk_buff *skb, *_skb, *tmp;
1565 struct tipc_msg *hdr;
1566 u32 qlen = skb_queue_len(&l->transmq);
1567 u16 nacked = acked, ngap = gap, gack_cnt = 0;
1568 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1569 u16 ack = l->rcv_nxt - 1;
1570 u16 seqno, n = 0;
1571 u16 end = r->acked, start = end, offset = r->last_gap;
1572 u16 si = (last_ga) ? last_ga->start_index : 0;
1573 bool is_uc = !link_is_bc_sndlink(l);
1574 bool bc_has_acked = false;
1575
1576 trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1577
1578
1579 if (ga && is_uc) {
1580
1581 gack_cnt = ga->ugack_cnt;
1582 gacks = &ga->gacks[ga->bgack_cnt];
1583 } else if (ga) {
1584
1585 this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
1586 GFP_ATOMIC);
1587 if (likely(this_ga)) {
1588 this_ga->start_index = 0;
1589
1590 gack_cnt = this_ga->bgack_cnt;
1591 gacks = &this_ga->gacks[0];
1592 } else {
1593
1594 pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1595 }
1596 }
1597
1598
1599 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1600 seqno = buf_seqno(skb);
1601
1602 next_gap_ack:
1603 if (less_eq(seqno, nacked)) {
1604 if (is_uc)
1605 goto release;
1606
1607 if (!more(seqno, r->acked))
1608 continue;
1609
1610 while (more(seqno, end)) {
1611 if (!last_ga || si >= last_ga->bgack_cnt)
1612 break;
1613 start = end + offset + 1;
1614 end = ntohs(last_ga->gacks[si].ack);
1615 offset = ntohs(last_ga->gacks[si].gap);
1616 si++;
1617 WARN_ONCE(more(start, end) ||
1618 (!offset &&
1619 si < last_ga->bgack_cnt) ||
1620 si > MAX_GAP_ACK_BLKS,
1621 "Corrupted Gap ACK: %d %d %d %d %d\n",
1622 start, end, offset, si,
1623 last_ga->bgack_cnt);
1624 }
1625
1626 if (in_range(seqno, start, end))
1627 continue;
1628
1629 bc_has_acked = true;
1630 if (--TIPC_SKB_CB(skb)->ackers)
1631 continue;
1632 release:
1633
1634 __skb_unlink(skb, &l->transmq);
1635 kfree_skb(skb);
1636 } else if (less_eq(seqno, nacked + ngap)) {
1637
1638 if (unlikely(seqno == acked + 1 &&
1639 link_retransmit_failure(l, r, rc))) {
1640
1641 kfree(this_ga);
1642 this_ga = NULL;
1643 break;
1644 }
1645
1646 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1647 continue;
1648 tipc_link_set_skb_retransmit_time(skb, l);
1649 _skb = pskb_copy(skb, GFP_ATOMIC);
1650 if (!_skb)
1651 continue;
1652 hdr = buf_msg(_skb);
1653 msg_set_ack(hdr, ack);
1654 msg_set_bcast_ack(hdr, bc_ack);
1655 _skb->priority = TC_PRIO_CONTROL;
1656 __skb_queue_tail(xmitq, _skb);
1657 l->stats.retransmitted++;
1658 if (!is_uc)
1659 r->stats.retransmitted++;
1660 *retransmitted = true;
1661
1662 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1663 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1664 } else {
1665
1666 if (n >= gack_cnt)
1667 break;
1668 nacked = ntohs(gacks[n].ack);
1669 ngap = ntohs(gacks[n].gap);
1670 n++;
1671 goto next_gap_ack;
1672 }
1673 }
1674
1675
1676 if (bc_has_acked) {
1677 if (this_ga) {
1678 kfree(last_ga);
1679 r->last_ga = this_ga;
1680 r->last_gap = gap;
1681 } else if (last_ga) {
1682 if (less(acked, start)) {
1683 si--;
1684 offset = start - acked - 1;
1685 } else if (less(acked, end)) {
1686 acked = end;
1687 }
1688 if (si < last_ga->bgack_cnt) {
1689 last_ga->start_index = si;
1690 r->last_gap = offset;
1691 } else {
1692 kfree(last_ga);
1693 r->last_ga = NULL;
1694 r->last_gap = 0;
1695 }
1696 } else {
1697 r->last_gap = 0;
1698 }
1699 r->acked = acked;
1700 } else {
1701 kfree(this_ga);
1702 }
1703
1704 return qlen - skb_queue_len(&l->transmq);
1705 }
1706
1707
1708
1709
1710
1711
1712 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1713 {
1714 if (!l)
1715 return 0;
1716
1717
1718 if (link_is_bc_rcvlink(l)) {
1719 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1720 return 0;
1721 l->rcv_unacked = 0;
1722
1723
1724 l->snd_nxt = l->rcv_nxt;
1725 return TIPC_LINK_SND_STATE;
1726 }
1727
1728 l->rcv_unacked = 0;
1729 l->stats.sent_acks++;
1730 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1731 return 0;
1732 }
1733
1734
1735
1736 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1737 {
1738 int mtyp = RESET_MSG;
1739 struct sk_buff *skb;
1740
1741 if (l->state == LINK_ESTABLISHING)
1742 mtyp = ACTIVATE_MSG;
1743
1744 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1745
1746
1747 skb = skb_peek_tail(xmitq);
1748 if (skb && (l->state == LINK_RESET))
1749 msg_set_peer_stopping(buf_msg(skb), 1);
1750 }
1751
1752
1753
1754
1755
1756 static int tipc_link_build_nack_msg(struct tipc_link *l,
1757 struct sk_buff_head *xmitq)
1758 {
1759 u32 def_cnt = ++l->stats.deferred_recv;
1760 struct sk_buff_head *dfq = &l->deferdq;
1761 u32 defq_len = skb_queue_len(dfq);
1762 int match1, match2;
1763
1764 if (link_is_bc_rcvlink(l)) {
1765 match1 = def_cnt & 0xf;
1766 match2 = tipc_own_addr(l->net) & 0xf;
1767 if (match1 == match2)
1768 return TIPC_LINK_SND_STATE;
1769 return 0;
1770 }
1771
1772 if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1773 u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1774
1775 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1776 rcvgap, 0, 0, xmitq);
1777 }
1778 return 0;
1779 }
1780
1781
1782
1783
1784
1785
1786 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1787 struct sk_buff_head *xmitq)
1788 {
1789 struct sk_buff_head *defq = &l->deferdq;
1790 struct tipc_msg *hdr = buf_msg(skb);
1791 u16 seqno, rcv_nxt, win_lim;
1792 int released = 0;
1793 int rc = 0;
1794
1795
1796 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1797 return tipc_link_proto_rcv(l, skb, xmitq);
1798
1799
1800 l->silent_intv_cnt = 0;
1801
1802 do {
1803 hdr = buf_msg(skb);
1804 seqno = msg_seqno(hdr);
1805 rcv_nxt = l->rcv_nxt;
1806 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1807
1808 if (unlikely(!link_is_up(l))) {
1809 if (l->state == LINK_ESTABLISHING)
1810 rc = TIPC_LINK_UP_EVT;
1811 kfree_skb(skb);
1812 break;
1813 }
1814
1815
1816 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1817 l->stats.duplicates++;
1818 kfree_skb(skb);
1819 break;
1820 }
1821 released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1822 NULL, NULL, NULL, NULL);
1823
1824
1825 if (unlikely(seqno != rcv_nxt)) {
1826 if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1827 l->stats.duplicates++;
1828 rc |= tipc_link_build_nack_msg(l, xmitq);
1829 break;
1830 }
1831
1832
1833 l->rcv_nxt++;
1834 l->stats.recv_pkts++;
1835
1836 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1837 rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1838 else if (!tipc_data_input(l, skb, l->inputq))
1839 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1840 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1841 rc |= tipc_link_build_state_msg(l, xmitq);
1842 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1843 break;
1844 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1845
1846
1847 if (released) {
1848 tipc_link_update_cwin(l, released, 0);
1849 tipc_link_advance_backlog(l, xmitq);
1850 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1851 link_prepare_wakeup(l);
1852 }
1853 return rc;
1854 }
1855
1856 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1857 bool probe_reply, u16 rcvgap,
1858 int tolerance, int priority,
1859 struct sk_buff_head *xmitq)
1860 {
1861 struct tipc_mon_state *mstate = &l->mon_state;
1862 struct sk_buff_head *dfq = &l->deferdq;
1863 struct tipc_link *bcl = l->bc_rcvlink;
1864 struct tipc_msg *hdr;
1865 struct sk_buff *skb;
1866 bool node_up = link_is_up(bcl);
1867 u16 glen = 0, bc_rcvgap = 0;
1868 int dlen = 0;
1869 void *data;
1870
1871
1872 if (tipc_link_is_blocked(l))
1873 return;
1874
1875 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1876 return;
1877
1878 if ((probe || probe_reply) && !skb_queue_empty(dfq))
1879 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1880
1881 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1882 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1883 l->addr, tipc_own_addr(l->net), 0, 0, 0);
1884 if (!skb)
1885 return;
1886
1887 hdr = buf_msg(skb);
1888 data = msg_data(hdr);
1889 msg_set_session(hdr, l->session);
1890 msg_set_bearer_id(hdr, l->bearer_id);
1891 msg_set_net_plane(hdr, l->net_plane);
1892 msg_set_next_sent(hdr, l->snd_nxt);
1893 msg_set_ack(hdr, l->rcv_nxt - 1);
1894 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1895 msg_set_bc_ack_invalid(hdr, !node_up);
1896 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1897 msg_set_link_tolerance(hdr, tolerance);
1898 msg_set_linkprio(hdr, priority);
1899 msg_set_redundant_link(hdr, node_up);
1900 msg_set_seq_gap(hdr, 0);
1901 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1902
1903 if (mtyp == STATE_MSG) {
1904 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1905 msg_set_seqno(hdr, l->snd_nxt_state++);
1906 msg_set_seq_gap(hdr, rcvgap);
1907 bc_rcvgap = link_bc_rcv_gap(bcl);
1908 msg_set_bc_gap(hdr, bc_rcvgap);
1909 msg_set_probe(hdr, probe);
1910 msg_set_is_keepalive(hdr, probe || probe_reply);
1911 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1912 glen = tipc_build_gap_ack_blks(l, hdr);
1913 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1914 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1915 skb_trim(skb, INT_H_SIZE + glen + dlen);
1916 l->stats.sent_states++;
1917 l->rcv_unacked = 0;
1918 } else {
1919
1920 if (mtyp == ACTIVATE_MSG) {
1921 msg_set_dest_session_valid(hdr, 1);
1922 msg_set_dest_session(hdr, l->peer_session);
1923 }
1924 msg_set_max_pkt(hdr, l->advertised_mtu);
1925 strcpy(data, l->if_name);
1926 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1927 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1928 }
1929 if (probe)
1930 l->stats.sent_probes++;
1931 if (rcvgap)
1932 l->stats.sent_nacks++;
1933 if (bc_rcvgap)
1934 bcl->stats.sent_nacks++;
1935 skb->priority = TC_PRIO_CONTROL;
1936 __skb_queue_tail(xmitq, skb);
1937 trace_tipc_proto_build(skb, false, l->name);
1938 }
1939
1940 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1941 struct sk_buff_head *xmitq)
1942 {
1943 u32 onode = tipc_own_addr(l->net);
1944 struct tipc_msg *hdr, *ihdr;
1945 struct sk_buff_head tnlq;
1946 struct sk_buff *skb;
1947 u32 dnode = l->addr;
1948
1949 __skb_queue_head_init(&tnlq);
1950 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1951 INT_H_SIZE, BASIC_H_SIZE,
1952 dnode, onode, 0, 0, 0);
1953 if (!skb) {
1954 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1955 return;
1956 }
1957
1958 hdr = buf_msg(skb);
1959 msg_set_msgcnt(hdr, 1);
1960 msg_set_bearer_id(hdr, l->peer_bearer_id);
1961
1962 ihdr = (struct tipc_msg *)msg_data(hdr);
1963 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1964 BASIC_H_SIZE, dnode);
1965 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1966 __skb_queue_tail(&tnlq, skb);
1967 tipc_link_xmit(l, &tnlq, xmitq);
1968 }
1969
1970
1971
1972
1973 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1974 int mtyp, struct sk_buff_head *xmitq)
1975 {
1976 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1977 struct sk_buff *skb, *tnlskb;
1978 struct tipc_msg *hdr, tnlhdr;
1979 struct sk_buff_head *queue = &l->transmq;
1980 struct sk_buff_head tmpxq, tnlq, frags;
1981 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1982 bool pktcnt_need_update = false;
1983 u16 syncpt;
1984 int rc;
1985
1986 if (!tnl)
1987 return;
1988
1989 __skb_queue_head_init(&tnlq);
1990
1991
1992
1993
1994
1995 if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1996 tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1997 INT_H_SIZE, 0, l->addr,
1998 tipc_own_addr(l->net),
1999 0, 0, 0);
2000 if (!tnlskb) {
2001 pr_warn("%sunable to create dummy SYNCH_MSG\n",
2002 link_co_err);
2003 return;
2004 }
2005
2006 hdr = buf_msg(tnlskb);
2007 syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
2008 msg_set_syncpt(hdr, syncpt);
2009 msg_set_bearer_id(hdr, l->peer_bearer_id);
2010 __skb_queue_tail(&tnlq, tnlskb);
2011 tipc_link_xmit(tnl, &tnlq, xmitq);
2012 return;
2013 }
2014
2015 __skb_queue_head_init(&tmpxq);
2016 __skb_queue_head_init(&frags);
2017
2018 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
2019 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
2020 0, 0, TIPC_ERR_NO_PORT);
2021 if (!skb) {
2022 pr_warn("%sunable to create tunnel packet\n", link_co_err);
2023 return;
2024 }
2025 __skb_queue_tail(&tnlq, skb);
2026 tipc_link_xmit(l, &tnlq, &tmpxq);
2027 __skb_queue_purge(&tmpxq);
2028
2029
2030 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
2031 mtyp, INT_H_SIZE, l->addr);
2032 if (mtyp == SYNCH_MSG)
2033 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
2034 else
2035 pktcnt = skb_queue_len(&l->transmq);
2036 pktcnt += skb_queue_len(&l->backlogq);
2037 msg_set_msgcnt(&tnlhdr, pktcnt);
2038 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
2039 tnl:
2040
2041 skb_queue_walk(queue, skb) {
2042 hdr = buf_msg(skb);
2043 if (queue == &l->backlogq)
2044 msg_set_seqno(hdr, seqno++);
2045 pktlen = msg_size(hdr);
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055 if (pktlen > tnl->mtu - INT_H_SIZE) {
2056 if (mtyp == FAILOVER_MSG &&
2057 (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
2058 rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
2059 &frags);
2060 if (rc) {
2061 pr_warn("%sunable to frag msg: rc %d\n",
2062 link_co_err, rc);
2063 return;
2064 }
2065 pktcnt += skb_queue_len(&frags) - 1;
2066 pktcnt_need_update = true;
2067 skb_queue_splice_tail_init(&frags, &tnlq);
2068 continue;
2069 }
2070
2071
2072
2073 pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2074 link_co_err, msg_user(hdr),
2075 msg_type(hdr), msg_size(hdr));
2076 return;
2077 }
2078
2079 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
2080 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
2081 if (!tnlskb) {
2082 pr_warn("%sunable to send packet\n", link_co_err);
2083 return;
2084 }
2085 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2086 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2087 __skb_queue_tail(&tnlq, tnlskb);
2088 }
2089 if (queue != &l->backlogq) {
2090 queue = &l->backlogq;
2091 goto tnl;
2092 }
2093
2094 if (pktcnt_need_update)
2095 skb_queue_walk(&tnlq, skb) {
2096 hdr = buf_msg(skb);
2097 msg_set_msgcnt(hdr, pktcnt);
2098 }
2099
2100 tipc_link_xmit(tnl, &tnlq, xmitq);
2101
2102 if (mtyp == FAILOVER_MSG) {
2103 tnl->drop_point = l->rcv_nxt;
2104 tnl->failover_reasm_skb = l->reasm_buf;
2105 l->reasm_buf = NULL;
2106
2107
2108 if (unlikely(!skb_queue_empty(fdefq))) {
2109 pr_warn("Link failover deferdq not empty: %d!\n",
2110 skb_queue_len(fdefq));
2111 __skb_queue_purge(fdefq);
2112 }
2113 skb_queue_splice_init(&l->deferdq, fdefq);
2114 }
2115 }
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2128 struct sk_buff_head *xmitq)
2129 {
2130 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2131
2132 tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2133
2134
2135
2136
2137
2138
2139
2140 tnl->drop_point = 1;
2141 tnl->failover_reasm_skb = NULL;
2142
2143
2144 if (unlikely(!skb_queue_empty(fdefq))) {
2145 pr_warn("Link failover deferdq not empty: %d!\n",
2146 skb_queue_len(fdefq));
2147 __skb_queue_purge(fdefq);
2148 }
2149 }
2150
2151
2152
2153
2154 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
2155 {
2156 u16 curr_session = l->peer_session;
2157 u16 session = msg_session(hdr);
2158 int mtyp = msg_type(hdr);
2159
2160 if (msg_user(hdr) != LINK_PROTOCOL)
2161 return true;
2162
2163 switch (mtyp) {
2164 case RESET_MSG:
2165 if (!l->in_session)
2166 return true;
2167
2168 return more(session, curr_session);
2169 case ACTIVATE_MSG:
2170 if (!l->in_session)
2171 return true;
2172
2173 return !less(session, curr_session);
2174 case STATE_MSG:
2175
2176 if (!l->in_session)
2177 return false;
2178 if (session != curr_session)
2179 return false;
2180
2181 if (!link_is_up(l) && msg_ack(hdr))
2182 return false;
2183 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2184 return true;
2185
2186 return !less(msg_seqno(hdr), l->rcv_nxt_state);
2187 default:
2188 return false;
2189 }
2190 }
2191
2192
2193
2194
2195
2196
2197 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2198 struct sk_buff_head *xmitq)
2199 {
2200 struct tipc_msg *hdr = buf_msg(skb);
2201 struct tipc_gap_ack_blks *ga = NULL;
2202 bool reply = msg_probe(hdr), retransmitted = false;
2203 u32 dlen = msg_data_sz(hdr), glen = 0;
2204 u16 peers_snd_nxt = msg_next_sent(hdr);
2205 u16 peers_tol = msg_link_tolerance(hdr);
2206 u16 peers_prio = msg_linkprio(hdr);
2207 u16 gap = msg_seq_gap(hdr);
2208 u16 ack = msg_ack(hdr);
2209 u16 rcv_nxt = l->rcv_nxt;
2210 u16 rcvgap = 0;
2211 int mtyp = msg_type(hdr);
2212 int rc = 0, released;
2213 char *if_name;
2214 void *data;
2215
2216 trace_tipc_proto_rcv(skb, false, l->name);
2217
2218 if (dlen > U16_MAX)
2219 goto exit;
2220
2221 if (tipc_link_is_blocked(l) || !xmitq)
2222 goto exit;
2223
2224 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
2225 l->net_plane = msg_net_plane(hdr);
2226
2227 skb_linearize(skb);
2228 hdr = buf_msg(skb);
2229 data = msg_data(hdr);
2230
2231 if (!tipc_link_validate_msg(l, hdr)) {
2232 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2233 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
2234 goto exit;
2235 }
2236
2237 switch (mtyp) {
2238 case RESET_MSG:
2239 case ACTIVATE_MSG:
2240
2241 if_name = strrchr(l->name, ':') + 1;
2242 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2243 break;
2244 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2245 break;
2246 strncpy(if_name, data, TIPC_MAX_IF_NAME);
2247
2248
2249 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2250 l->tolerance = peers_tol;
2251 l->bc_rcvlink->tolerance = peers_tol;
2252 }
2253
2254 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2255 l->priority = peers_prio;
2256
2257
2258 if (msg_peer_stopping(hdr)) {
2259 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2260 break;
2261 }
2262
2263
2264
2265
2266 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2267 l->session != msg_dest_session(hdr)) {
2268 if (less(l->session, msg_dest_session(hdr)))
2269 l->session = msg_dest_session(hdr) + 1;
2270 break;
2271 }
2272
2273
2274 if (mtyp == RESET_MSG || !link_is_up(l))
2275 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2276
2277
2278 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2279 rc = TIPC_LINK_UP_EVT;
2280
2281 l->peer_session = msg_session(hdr);
2282 l->in_session = true;
2283 l->peer_bearer_id = msg_bearer_id(hdr);
2284 if (l->mtu > msg_max_pkt(hdr))
2285 l->mtu = msg_max_pkt(hdr);
2286 break;
2287
2288 case STATE_MSG:
2289
2290 glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
2291 if (glen > dlen)
2292 break;
2293
2294 l->rcv_nxt_state = msg_seqno(hdr) + 1;
2295
2296
2297 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2298 l->tolerance = peers_tol;
2299 l->bc_rcvlink->tolerance = peers_tol;
2300 }
2301
2302 if ((peers_prio != l->priority) &&
2303 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2304 l->priority = peers_prio;
2305 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2306 }
2307
2308 l->silent_intv_cnt = 0;
2309 l->stats.recv_states++;
2310 if (msg_probe(hdr))
2311 l->stats.recv_probes++;
2312
2313 if (!link_is_up(l)) {
2314 if (l->state == LINK_ESTABLISHING)
2315 rc = TIPC_LINK_UP_EVT;
2316 break;
2317 }
2318
2319 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2320 &l->mon_state, l->bearer_id);
2321
2322
2323 if ((reply || msg_is_keepalive(hdr)) &&
2324 more(peers_snd_nxt, rcv_nxt) &&
2325 !tipc_link_is_synching(l) &&
2326 skb_queue_empty(&l->deferdq))
2327 rcvgap = peers_snd_nxt - l->rcv_nxt;
2328 if (rcvgap || reply)
2329 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2330 rcvgap, 0, 0, xmitq);
2331
2332 released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2333 &retransmitted, &rc);
2334 if (gap)
2335 l->stats.recv_nacks++;
2336 if (released || retransmitted)
2337 tipc_link_update_cwin(l, released, retransmitted);
2338 if (released)
2339 tipc_link_advance_backlog(l, xmitq);
2340 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2341 link_prepare_wakeup(l);
2342 }
2343 exit:
2344 kfree_skb(skb);
2345 return rc;
2346 }
2347
2348
2349
2350 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2351 u16 peers_snd_nxt,
2352 struct sk_buff_head *xmitq)
2353 {
2354 struct sk_buff *skb;
2355 struct tipc_msg *hdr;
2356 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2357 u16 ack = l->rcv_nxt - 1;
2358 u16 gap_to = peers_snd_nxt - 1;
2359
2360 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2361 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2362 if (!skb)
2363 return false;
2364 hdr = buf_msg(skb);
2365 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2366 msg_set_bcast_ack(hdr, ack);
2367 msg_set_bcgap_after(hdr, ack);
2368 if (dfrd_skb)
2369 gap_to = buf_seqno(dfrd_skb) - 1;
2370 msg_set_bcgap_to(hdr, gap_to);
2371 msg_set_non_seq(hdr, bcast);
2372 __skb_queue_tail(xmitq, skb);
2373 return true;
2374 }
2375
2376
2377
2378
2379
2380
2381 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2382 struct sk_buff_head *xmitq)
2383 {
2384 struct sk_buff_head list;
2385
2386 __skb_queue_head_init(&list);
2387 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2388 return;
2389 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2390 tipc_link_xmit(l, &list, xmitq);
2391 }
2392
2393
2394
2395 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2396 {
2397 int mtyp = msg_type(hdr);
2398 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2399
2400 if (link_is_up(l))
2401 return;
2402
2403 if (msg_user(hdr) == BCAST_PROTOCOL) {
2404 l->rcv_nxt = peers_snd_nxt;
2405 l->state = LINK_ESTABLISHED;
2406 return;
2407 }
2408
2409 if (l->peer_caps & TIPC_BCAST_SYNCH)
2410 return;
2411
2412 if (msg_peer_node_is_up(hdr))
2413 return;
2414
2415
2416 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2417 l->rcv_nxt = peers_snd_nxt;
2418 }
2419
2420
2421
2422 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2423 struct sk_buff_head *xmitq)
2424 {
2425 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2426 int rc = 0;
2427
2428 if (!link_is_up(l))
2429 return rc;
2430
2431 if (!msg_peer_node_is_up(hdr))
2432 return rc;
2433
2434
2435 if (msg_ack(hdr))
2436 l->bc_peer_is_up = true;
2437
2438 if (!l->bc_peer_is_up)
2439 return rc;
2440
2441
2442 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2443 return rc;
2444
2445 l->snd_nxt = peers_snd_nxt;
2446 if (link_bc_rcv_gap(l))
2447 rc |= TIPC_LINK_SND_STATE;
2448
2449
2450 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2451 return rc;
2452
2453
2454
2455 if (!more(peers_snd_nxt, l->rcv_nxt)) {
2456 l->nack_state = BC_NACK_SND_CONDITIONAL;
2457 return 0;
2458 }
2459
2460
2461 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2462 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2463 return 0;
2464 }
2465
2466
2467 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2468 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2469 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2470 return 0;
2471 }
2472
2473
2474 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2475 l->nack_state = BC_NACK_SND_SUPPRESS;
2476 return 0;
2477 }
2478
2479 int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2480 struct tipc_gap_ack_blks *ga,
2481 struct sk_buff_head *xmitq,
2482 struct sk_buff_head *retrq)
2483 {
2484 struct tipc_link *l = r->bc_sndlink;
2485 bool unused = false;
2486 int rc = 0;
2487
2488 if (!link_is_up(r) || !r->bc_peer_is_up)
2489 return 0;
2490
2491 if (gap) {
2492 l->stats.recv_nacks++;
2493 r->stats.recv_nacks++;
2494 }
2495
2496 if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2497 return 0;
2498
2499 trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
2500 tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
2501
2502 tipc_link_advance_backlog(l, xmitq);
2503 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2504 link_prepare_wakeup(l);
2505
2506 return rc;
2507 }
2508
2509
2510
2511
2512
2513 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2514 struct sk_buff_head *xmitq)
2515 {
2516 struct tipc_msg *hdr = buf_msg(skb);
2517 u32 dnode = msg_destnode(hdr);
2518 int mtyp = msg_type(hdr);
2519 u16 acked = msg_bcast_ack(hdr);
2520 u16 from = acked + 1;
2521 u16 to = msg_bcgap_to(hdr);
2522 u16 peers_snd_nxt = to + 1;
2523 int rc = 0;
2524
2525 kfree_skb(skb);
2526
2527 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2528 return 0;
2529
2530 if (mtyp != STATE_MSG)
2531 return 0;
2532
2533 if (dnode == tipc_own_addr(l->net)) {
2534 rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2535 xmitq);
2536 l->stats.recv_nacks++;
2537 return rc;
2538 }
2539
2540
2541 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2542 l->nack_state = BC_NACK_SND_SUPPRESS;
2543
2544 return 0;
2545 }
2546
2547 void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
2548 {
2549 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2550
2551 l->min_win = min_win;
2552 l->ssthresh = max_win;
2553 l->max_win = max_win;
2554 l->window = min_win;
2555 l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2;
2556 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4;
2557 l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6;
2558 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
2559 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
2560 }
2561
2562
2563
2564
2565
2566 void tipc_link_reset_stats(struct tipc_link *l)
2567 {
2568 memset(&l->stats, 0, sizeof(l->stats));
2569 }
2570
2571 static void link_print(struct tipc_link *l, const char *str)
2572 {
2573 struct sk_buff *hskb = skb_peek(&l->transmq);
2574 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2575 u16 tail = l->snd_nxt - 1;
2576
2577 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2578 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2579 skb_queue_len(&l->transmq), head, tail,
2580 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2581 }
2582
2583
2584
2585 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2586 {
2587 int err;
2588
2589 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2590 tipc_nl_prop_policy, NULL);
2591 if (err)
2592 return err;
2593
2594 if (props[TIPC_NLA_PROP_PRIO]) {
2595 u32 prio;
2596
2597 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2598 if (prio > TIPC_MAX_LINK_PRI)
2599 return -EINVAL;
2600 }
2601
2602 if (props[TIPC_NLA_PROP_TOL]) {
2603 u32 tol;
2604
2605 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2606 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2607 return -EINVAL;
2608 }
2609
2610 if (props[TIPC_NLA_PROP_WIN]) {
2611 u32 max_win;
2612
2613 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2614 if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
2615 return -EINVAL;
2616 }
2617
2618 return 0;
2619 }
2620
2621 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2622 {
2623 int i;
2624 struct nlattr *stats;
2625
2626 struct nla_map {
2627 u32 key;
2628 u32 val;
2629 };
2630
2631 struct nla_map map[] = {
2632 {TIPC_NLA_STATS_RX_INFO, 0},
2633 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2634 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2635 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2636 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2637 {TIPC_NLA_STATS_TX_INFO, 0},
2638 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2639 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2640 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2641 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2642 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2643 s->msg_length_counts : 1},
2644 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2645 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2646 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2647 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2648 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2649 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2650 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2651 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2652 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2653 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2654 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2655 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2656 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2657 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2658 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2659 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2660 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2661 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2662 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2663 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2664 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2665 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2666 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2667 };
2668
2669 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2670 if (!stats)
2671 return -EMSGSIZE;
2672
2673 for (i = 0; i < ARRAY_SIZE(map); i++)
2674 if (nla_put_u32(skb, map[i].key, map[i].val))
2675 goto msg_full;
2676
2677 nla_nest_end(skb, stats);
2678
2679 return 0;
2680 msg_full:
2681 nla_nest_cancel(skb, stats);
2682
2683 return -EMSGSIZE;
2684 }
2685
2686
2687 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2688 struct tipc_link *link, int nlflags)
2689 {
2690 u32 self = tipc_own_addr(net);
2691 struct nlattr *attrs;
2692 struct nlattr *prop;
2693 void *hdr;
2694 int err;
2695
2696 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2697 nlflags, TIPC_NL_LINK_GET);
2698 if (!hdr)
2699 return -EMSGSIZE;
2700
2701 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2702 if (!attrs)
2703 goto msg_full;
2704
2705 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2706 goto attr_msg_full;
2707 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2708 goto attr_msg_full;
2709 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2710 goto attr_msg_full;
2711 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2712 goto attr_msg_full;
2713 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2714 goto attr_msg_full;
2715
2716 if (tipc_link_is_up(link))
2717 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2718 goto attr_msg_full;
2719 if (link->active)
2720 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2721 goto attr_msg_full;
2722
2723 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2724 if (!prop)
2725 goto attr_msg_full;
2726 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2727 goto prop_msg_full;
2728 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2729 goto prop_msg_full;
2730 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2731 link->window))
2732 goto prop_msg_full;
2733 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2734 goto prop_msg_full;
2735 nla_nest_end(msg->skb, prop);
2736
2737 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2738 if (err)
2739 goto attr_msg_full;
2740
2741 nla_nest_end(msg->skb, attrs);
2742 genlmsg_end(msg->skb, hdr);
2743
2744 return 0;
2745
2746 prop_msg_full:
2747 nla_nest_cancel(msg->skb, prop);
2748 attr_msg_full:
2749 nla_nest_cancel(msg->skb, attrs);
2750 msg_full:
2751 genlmsg_cancel(msg->skb, hdr);
2752
2753 return -EMSGSIZE;
2754 }
2755
2756 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2757 struct tipc_stats *stats)
2758 {
2759 int i;
2760 struct nlattr *nest;
2761
2762 struct nla_map {
2763 __u32 key;
2764 __u32 val;
2765 };
2766
2767 struct nla_map map[] = {
2768 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2769 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2770 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2771 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2772 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2773 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2774 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2775 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2776 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2777 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2778 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2779 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2780 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2781 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2782 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2783 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2784 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2785 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2786 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2787 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2788 };
2789
2790 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2791 if (!nest)
2792 return -EMSGSIZE;
2793
2794 for (i = 0; i < ARRAY_SIZE(map); i++)
2795 if (nla_put_u32(skb, map[i].key, map[i].val))
2796 goto msg_full;
2797
2798 nla_nest_end(skb, nest);
2799
2800 return 0;
2801 msg_full:
2802 nla_nest_cancel(skb, nest);
2803
2804 return -EMSGSIZE;
2805 }
2806
2807 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2808 struct tipc_link *bcl)
2809 {
2810 int err;
2811 void *hdr;
2812 struct nlattr *attrs;
2813 struct nlattr *prop;
2814 u32 bc_mode = tipc_bcast_get_mode(net);
2815 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2816
2817 if (!bcl)
2818 return 0;
2819
2820 tipc_bcast_lock(net);
2821
2822 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2823 NLM_F_MULTI, TIPC_NL_LINK_GET);
2824 if (!hdr) {
2825 tipc_bcast_unlock(net);
2826 return -EMSGSIZE;
2827 }
2828
2829 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2830 if (!attrs)
2831 goto msg_full;
2832
2833
2834 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2835 goto attr_msg_full;
2836
2837 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2838 goto attr_msg_full;
2839 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2840 goto attr_msg_full;
2841 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2842 goto attr_msg_full;
2843 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2844 goto attr_msg_full;
2845
2846 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2847 if (!prop)
2848 goto attr_msg_full;
2849 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
2850 goto prop_msg_full;
2851 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2852 goto prop_msg_full;
2853 if (bc_mode & BCLINK_MODE_SEL)
2854 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2855 bc_ratio))
2856 goto prop_msg_full;
2857 nla_nest_end(msg->skb, prop);
2858
2859 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2860 if (err)
2861 goto attr_msg_full;
2862
2863 tipc_bcast_unlock(net);
2864 nla_nest_end(msg->skb, attrs);
2865 genlmsg_end(msg->skb, hdr);
2866
2867 return 0;
2868
2869 prop_msg_full:
2870 nla_nest_cancel(msg->skb, prop);
2871 attr_msg_full:
2872 nla_nest_cancel(msg->skb, attrs);
2873 msg_full:
2874 tipc_bcast_unlock(net);
2875 genlmsg_cancel(msg->skb, hdr);
2876
2877 return -EMSGSIZE;
2878 }
2879
2880 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2881 struct sk_buff_head *xmitq)
2882 {
2883 l->tolerance = tol;
2884 if (l->bc_rcvlink)
2885 l->bc_rcvlink->tolerance = tol;
2886 if (link_is_up(l))
2887 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2888 }
2889
2890 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2891 struct sk_buff_head *xmitq)
2892 {
2893 l->priority = prio;
2894 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2895 }
2896
2897 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2898 {
2899 l->abort_limit = limit;
2900 }
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2916 {
2917 int i = 0;
2918 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2919 struct sk_buff_head *list;
2920 struct sk_buff *hskb, *tskb;
2921 u32 len;
2922
2923 if (!l) {
2924 i += scnprintf(buf, sz, "link data: (null)\n");
2925 return i;
2926 }
2927
2928 i += scnprintf(buf, sz, "link data: %x", l->addr);
2929 i += scnprintf(buf + i, sz - i, " %x", l->state);
2930 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2931 i += scnprintf(buf + i, sz - i, " %u", l->session);
2932 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2933 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2934 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2935 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2936 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2937 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2938 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2939 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2940 i += scnprintf(buf + i, sz - i, " %u", 0);
2941 i += scnprintf(buf + i, sz - i, " %u", 0);
2942 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2943
2944 list = &l->transmq;
2945 len = skb_queue_len(list);
2946 hskb = skb_peek(list);
2947 tskb = skb_peek_tail(list);
2948 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2949 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2950 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2951
2952 list = &l->deferdq;
2953 len = skb_queue_len(list);
2954 hskb = skb_peek(list);
2955 tskb = skb_peek_tail(list);
2956 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2957 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2958 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2959
2960 list = &l->backlogq;
2961 len = skb_queue_len(list);
2962 hskb = skb_peek(list);
2963 tskb = skb_peek_tail(list);
2964 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2965 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2966 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2967
2968 list = l->inputq;
2969 len = skb_queue_len(list);
2970 hskb = skb_peek(list);
2971 tskb = skb_peek_tail(list);
2972 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2973 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2974 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2975
2976 if (dqueues & TIPC_DUMP_TRANSMQ) {
2977 i += scnprintf(buf + i, sz - i, "transmq: ");
2978 i += tipc_list_dump(&l->transmq, false, buf + i);
2979 }
2980 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2981 i += scnprintf(buf + i, sz - i,
2982 "backlogq: <%u %u %u %u %u>, ",
2983 l->backlog[TIPC_LOW_IMPORTANCE].len,
2984 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2985 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2986 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2987 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2988 i += tipc_list_dump(&l->backlogq, false, buf + i);
2989 }
2990 if (dqueues & TIPC_DUMP_DEFERDQ) {
2991 i += scnprintf(buf + i, sz - i, "deferdq: ");
2992 i += tipc_list_dump(&l->deferdq, false, buf + i);
2993 }
2994 if (dqueues & TIPC_DUMP_INPUTQ) {
2995 i += scnprintf(buf + i, sz - i, "inputq: ");
2996 i += tipc_list_dump(l->inputq, false, buf + i);
2997 }
2998 if (dqueues & TIPC_DUMP_WAKEUP) {
2999 i += scnprintf(buf + i, sz - i, "wakeup: ");
3000 i += tipc_list_dump(&l->wakeupq, false, buf + i);
3001 }
3002
3003 return i;
3004 }