0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include "core.h"
0038 #include "link.h"
0039 #include "node.h"
0040 #include "name_distr.h"
0041 #include "socket.h"
0042 #include "bcast.h"
0043 #include "monitor.h"
0044 #include "discover.h"
0045 #include "netlink.h"
0046 #include "trace.h"
0047 #include "crypto.h"
0048
0049 #define INVALID_NODE_SIG 0x10000
0050 #define NODE_CLEANUP_AFTER 300000
0051
0052
0053
0054
0055
0056
0057 enum {
0058 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
0059 TIPC_NOTIFY_NODE_UP = (1 << 4),
0060 TIPC_NOTIFY_LINK_UP = (1 << 6),
0061 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
0062 };
0063
0064 struct tipc_link_entry {
0065 struct tipc_link *link;
0066 spinlock_t lock;
0067 u32 mtu;
0068 struct sk_buff_head inputq;
0069 struct tipc_media_addr maddr;
0070 };
0071
0072 struct tipc_bclink_entry {
0073 struct tipc_link *link;
0074 struct sk_buff_head inputq1;
0075 struct sk_buff_head arrvq;
0076 struct sk_buff_head inputq2;
0077 struct sk_buff_head namedq;
0078 u16 named_rcv_nxt;
0079 bool named_open;
0080 };
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 struct tipc_node {
0118 u32 addr;
0119 struct kref kref;
0120 rwlock_t lock;
0121 struct net *net;
0122 struct hlist_node hash;
0123 int active_links[2];
0124 struct tipc_link_entry links[MAX_BEARERS];
0125 struct tipc_bclink_entry bc_entry;
0126 int action_flags;
0127 struct list_head list;
0128 int state;
0129 bool preliminary;
0130 bool failover_sent;
0131 u16 sync_point;
0132 int link_cnt;
0133 u16 working_links;
0134 u16 capabilities;
0135 u32 signature;
0136 u32 link_id;
0137 u8 peer_id[16];
0138 char peer_id_string[NODE_ID_STR_LEN];
0139 struct list_head publ_list;
0140 struct list_head conn_sks;
0141 unsigned long keepalive_intv;
0142 struct timer_list timer;
0143 struct rcu_head rcu;
0144 unsigned long delete_at;
0145 struct net *peer_net;
0146 u32 peer_hash_mix;
0147 #ifdef CONFIG_TIPC_CRYPTO
0148 struct tipc_crypto *crypto_rx;
0149 #endif
0150 };
0151
0152
0153
0154 enum {
0155 SELF_DOWN_PEER_DOWN = 0xdd,
0156 SELF_UP_PEER_UP = 0xaa,
0157 SELF_DOWN_PEER_LEAVING = 0xd1,
0158 SELF_UP_PEER_COMING = 0xac,
0159 SELF_COMING_PEER_UP = 0xca,
0160 SELF_LEAVING_PEER_DOWN = 0x1d,
0161 NODE_FAILINGOVER = 0xf0,
0162 NODE_SYNCHING = 0xcc
0163 };
0164
0165 enum {
0166 SELF_ESTABL_CONTACT_EVT = 0xece,
0167 SELF_LOST_CONTACT_EVT = 0x1ce,
0168 PEER_ESTABL_CONTACT_EVT = 0x9ece,
0169 PEER_LOST_CONTACT_EVT = 0x91ce,
0170 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
0171 NODE_FAILOVER_END_EVT = 0xfee,
0172 NODE_SYNCH_BEGIN_EVT = 0xcbe,
0173 NODE_SYNCH_END_EVT = 0xcee
0174 };
0175
0176 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
0177 struct sk_buff_head *xmitq,
0178 struct tipc_media_addr **maddr);
0179 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
0180 bool delete);
0181 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
0182 static void tipc_node_delete(struct tipc_node *node);
0183 static void tipc_node_timeout(struct timer_list *t);
0184 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
0185 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
0186 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
0187 static bool node_is_up(struct tipc_node *n);
0188 static void tipc_node_delete_from_list(struct tipc_node *node);
0189
0190 struct tipc_sock_conn {
0191 u32 port;
0192 u32 peer_port;
0193 u32 peer_node;
0194 struct list_head list;
0195 };
0196
0197 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
0198 {
0199 int bearer_id = n->active_links[sel & 1];
0200
0201 if (unlikely(bearer_id == INVALID_BEARER_ID))
0202 return NULL;
0203
0204 return n->links[bearer_id].link;
0205 }
0206
0207 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
0208 {
0209 struct tipc_node *n;
0210 int bearer_id;
0211 unsigned int mtu = MAX_MSG_SIZE;
0212
0213 n = tipc_node_find(net, addr);
0214 if (unlikely(!n))
0215 return mtu;
0216
0217
0218
0219
0220 if (n->peer_net && connected) {
0221 tipc_node_put(n);
0222 return mtu;
0223 }
0224
0225 bearer_id = n->active_links[sel & 1];
0226 if (likely(bearer_id != INVALID_BEARER_ID))
0227 mtu = n->links[bearer_id].mtu;
0228 tipc_node_put(n);
0229 return mtu;
0230 }
0231
0232 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
0233 {
0234 u8 *own_id = tipc_own_id(net);
0235 struct tipc_node *n;
0236
0237 if (!own_id)
0238 return true;
0239
0240 if (addr == tipc_own_addr(net)) {
0241 memcpy(id, own_id, TIPC_NODEID_LEN);
0242 return true;
0243 }
0244 n = tipc_node_find(net, addr);
0245 if (!n)
0246 return false;
0247
0248 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
0249 tipc_node_put(n);
0250 return true;
0251 }
0252
0253 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
0254 {
0255 struct tipc_node *n;
0256 u16 caps;
0257
0258 n = tipc_node_find(net, addr);
0259 if (unlikely(!n))
0260 return TIPC_NODE_CAPABILITIES;
0261 caps = n->capabilities;
0262 tipc_node_put(n);
0263 return caps;
0264 }
0265
0266 u32 tipc_node_get_addr(struct tipc_node *node)
0267 {
0268 return (node) ? node->addr : 0;
0269 }
0270
0271 char *tipc_node_get_id_str(struct tipc_node *node)
0272 {
0273 return node->peer_id_string;
0274 }
0275
0276 #ifdef CONFIG_TIPC_CRYPTO
0277
0278
0279
0280
0281
0282 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
0283 {
0284 return (__n) ? __n->crypto_rx : NULL;
0285 }
0286
0287 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
0288 {
0289 return container_of(pos, struct tipc_node, list)->crypto_rx;
0290 }
0291
0292 struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
0293 {
0294 struct tipc_node *n;
0295
0296 n = tipc_node_find(net, addr);
0297 return (n) ? n->crypto_rx : NULL;
0298 }
0299 #endif
0300
0301 static void tipc_node_free(struct rcu_head *rp)
0302 {
0303 struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
0304
0305 #ifdef CONFIG_TIPC_CRYPTO
0306 tipc_crypto_stop(&n->crypto_rx);
0307 #endif
0308 kfree(n);
0309 }
0310
0311 static void tipc_node_kref_release(struct kref *kref)
0312 {
0313 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
0314
0315 kfree(n->bc_entry.link);
0316 call_rcu(&n->rcu, tipc_node_free);
0317 }
0318
0319 void tipc_node_put(struct tipc_node *node)
0320 {
0321 kref_put(&node->kref, tipc_node_kref_release);
0322 }
0323
0324 void tipc_node_get(struct tipc_node *node)
0325 {
0326 kref_get(&node->kref);
0327 }
0328
0329
0330
0331
0332 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
0333 {
0334 struct tipc_net *tn = tipc_net(net);
0335 struct tipc_node *node;
0336 unsigned int thash = tipc_hashfn(addr);
0337
0338 rcu_read_lock();
0339 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
0340 if (node->addr != addr || node->preliminary)
0341 continue;
0342 if (!kref_get_unless_zero(&node->kref))
0343 node = NULL;
0344 break;
0345 }
0346 rcu_read_unlock();
0347 return node;
0348 }
0349
0350
0351
0352
0353
0354 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
0355 {
0356 struct tipc_net *tn = tipc_net(net);
0357 struct tipc_node *n;
0358 bool found = false;
0359
0360 rcu_read_lock();
0361 list_for_each_entry_rcu(n, &tn->node_list, list) {
0362 read_lock_bh(&n->lock);
0363 if (!memcmp(id, n->peer_id, 16) &&
0364 kref_get_unless_zero(&n->kref))
0365 found = true;
0366 read_unlock_bh(&n->lock);
0367 if (found)
0368 break;
0369 }
0370 rcu_read_unlock();
0371 return found ? n : NULL;
0372 }
0373
0374 static void tipc_node_read_lock(struct tipc_node *n)
0375 __acquires(n->lock)
0376 {
0377 read_lock_bh(&n->lock);
0378 }
0379
0380 static void tipc_node_read_unlock(struct tipc_node *n)
0381 __releases(n->lock)
0382 {
0383 read_unlock_bh(&n->lock);
0384 }
0385
0386 static void tipc_node_write_lock(struct tipc_node *n)
0387 __acquires(n->lock)
0388 {
0389 write_lock_bh(&n->lock);
0390 }
0391
0392 static void tipc_node_write_unlock_fast(struct tipc_node *n)
0393 __releases(n->lock)
0394 {
0395 write_unlock_bh(&n->lock);
0396 }
0397
0398 static void tipc_node_write_unlock(struct tipc_node *n)
0399 __releases(n->lock)
0400 {
0401 struct tipc_socket_addr sk;
0402 struct net *net = n->net;
0403 u32 flags = n->action_flags;
0404 struct list_head *publ_list;
0405 struct tipc_uaddr ua;
0406 u32 bearer_id, node;
0407
0408 if (likely(!flags)) {
0409 write_unlock_bh(&n->lock);
0410 return;
0411 }
0412
0413 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
0414 TIPC_LINK_STATE, n->addr, n->addr);
0415 sk.ref = n->link_id;
0416 sk.node = tipc_own_addr(net);
0417 node = n->addr;
0418 bearer_id = n->link_id & 0xffff;
0419 publ_list = &n->publ_list;
0420
0421 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
0422 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
0423
0424 write_unlock_bh(&n->lock);
0425
0426 if (flags & TIPC_NOTIFY_NODE_DOWN)
0427 tipc_publ_notify(net, publ_list, node, n->capabilities);
0428
0429 if (flags & TIPC_NOTIFY_NODE_UP)
0430 tipc_named_node_up(net, node, n->capabilities);
0431
0432 if (flags & TIPC_NOTIFY_LINK_UP) {
0433 tipc_mon_peer_up(net, node, bearer_id);
0434 tipc_nametbl_publish(net, &ua, &sk, sk.ref);
0435 }
0436 if (flags & TIPC_NOTIFY_LINK_DOWN) {
0437 tipc_mon_peer_down(net, node, bearer_id);
0438 tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
0439 }
0440 }
0441
0442 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
0443 {
0444 int net_id = tipc_netid(n->net);
0445 struct tipc_net *tn_peer;
0446 struct net *tmp;
0447 u32 hash_chk;
0448
0449 if (n->peer_net)
0450 return;
0451
0452 for_each_net_rcu(tmp) {
0453 tn_peer = tipc_net(tmp);
0454 if (!tn_peer)
0455 continue;
0456
0457 if (tn_peer->net_id != net_id)
0458 continue;
0459 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
0460 continue;
0461 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
0462 if (hash_mixes ^ hash_chk)
0463 continue;
0464 n->peer_net = tmp;
0465 n->peer_hash_mix = hash_mixes;
0466 break;
0467 }
0468 }
0469
0470 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
0471 u16 capabilities, u32 hash_mixes,
0472 bool preliminary)
0473 {
0474 struct tipc_net *tn = net_generic(net, tipc_net_id);
0475 struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
0476 struct tipc_node *n, *temp_node;
0477 unsigned long intv;
0478 int bearer_id;
0479 int i;
0480
0481 spin_lock_bh(&tn->node_list_lock);
0482 n = tipc_node_find(net, addr) ?:
0483 tipc_node_find_by_id(net, peer_id);
0484 if (n) {
0485 if (!n->preliminary)
0486 goto update;
0487 if (preliminary)
0488 goto exit;
0489
0490 tipc_node_write_lock(n);
0491 if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
0492 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
0493 n->capabilities, &n->bc_entry.inputq1,
0494 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
0495 pr_warn("Broadcast rcv link refresh failed, no memory\n");
0496 tipc_node_write_unlock_fast(n);
0497 tipc_node_put(n);
0498 n = NULL;
0499 goto exit;
0500 }
0501 n->preliminary = false;
0502 n->addr = addr;
0503 hlist_del_rcu(&n->hash);
0504 hlist_add_head_rcu(&n->hash,
0505 &tn->node_htable[tipc_hashfn(addr)]);
0506 list_del_rcu(&n->list);
0507 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
0508 if (n->addr < temp_node->addr)
0509 break;
0510 }
0511 list_add_tail_rcu(&n->list, &temp_node->list);
0512 tipc_node_write_unlock_fast(n);
0513
0514 update:
0515 if (n->peer_hash_mix ^ hash_mixes)
0516 tipc_node_assign_peer_net(n, hash_mixes);
0517 if (n->capabilities == capabilities)
0518 goto exit;
0519
0520 tipc_node_write_lock(n);
0521 n->capabilities = capabilities;
0522 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
0523 l = n->links[bearer_id].link;
0524 if (l)
0525 tipc_link_update_caps(l, capabilities);
0526 }
0527 tipc_node_write_unlock_fast(n);
0528
0529
0530 tn->capabilities = TIPC_NODE_CAPABILITIES;
0531 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
0532 tn->capabilities &= temp_node->capabilities;
0533 }
0534
0535 tipc_bcast_toggle_rcast(net,
0536 (tn->capabilities & TIPC_BCAST_RCAST));
0537
0538 goto exit;
0539 }
0540 n = kzalloc(sizeof(*n), GFP_ATOMIC);
0541 if (!n) {
0542 pr_warn("Node creation failed, no memory\n");
0543 goto exit;
0544 }
0545 tipc_nodeid2string(n->peer_id_string, peer_id);
0546 #ifdef CONFIG_TIPC_CRYPTO
0547 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
0548 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
0549 kfree(n);
0550 n = NULL;
0551 goto exit;
0552 }
0553 #endif
0554 n->addr = addr;
0555 n->preliminary = preliminary;
0556 memcpy(&n->peer_id, peer_id, 16);
0557 n->net = net;
0558 n->peer_net = NULL;
0559 n->peer_hash_mix = 0;
0560
0561 tipc_node_assign_peer_net(n, hash_mixes);
0562 n->capabilities = capabilities;
0563 kref_init(&n->kref);
0564 rwlock_init(&n->lock);
0565 INIT_HLIST_NODE(&n->hash);
0566 INIT_LIST_HEAD(&n->list);
0567 INIT_LIST_HEAD(&n->publ_list);
0568 INIT_LIST_HEAD(&n->conn_sks);
0569 skb_queue_head_init(&n->bc_entry.namedq);
0570 skb_queue_head_init(&n->bc_entry.inputq1);
0571 __skb_queue_head_init(&n->bc_entry.arrvq);
0572 skb_queue_head_init(&n->bc_entry.inputq2);
0573 for (i = 0; i < MAX_BEARERS; i++)
0574 spin_lock_init(&n->links[i].lock);
0575 n->state = SELF_DOWN_PEER_LEAVING;
0576 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
0577 n->signature = INVALID_NODE_SIG;
0578 n->active_links[0] = INVALID_BEARER_ID;
0579 n->active_links[1] = INVALID_BEARER_ID;
0580 if (!preliminary &&
0581 !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
0582 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
0583 n->capabilities, &n->bc_entry.inputq1,
0584 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
0585 pr_warn("Broadcast rcv link creation failed, no memory\n");
0586 kfree(n);
0587 n = NULL;
0588 goto exit;
0589 }
0590 tipc_node_get(n);
0591 timer_setup(&n->timer, tipc_node_timeout, 0);
0592
0593 n->keepalive_intv = 10000;
0594 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
0595 if (!mod_timer(&n->timer, intv))
0596 tipc_node_get(n);
0597 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
0598 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
0599 if (n->addr < temp_node->addr)
0600 break;
0601 }
0602 list_add_tail_rcu(&n->list, &temp_node->list);
0603
0604 tn->capabilities = TIPC_NODE_CAPABILITIES;
0605 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
0606 tn->capabilities &= temp_node->capabilities;
0607 }
0608 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
0609 trace_tipc_node_create(n, true, " ");
0610 exit:
0611 spin_unlock_bh(&tn->node_list_lock);
0612 return n;
0613 }
0614
0615 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
0616 {
0617 unsigned long tol = tipc_link_tolerance(l);
0618 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
0619
0620
0621 if (intv < n->keepalive_intv)
0622 n->keepalive_intv = intv;
0623
0624
0625 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
0626 }
0627
0628 static void tipc_node_delete_from_list(struct tipc_node *node)
0629 {
0630 #ifdef CONFIG_TIPC_CRYPTO
0631 tipc_crypto_key_flush(node->crypto_rx);
0632 #endif
0633 list_del_rcu(&node->list);
0634 hlist_del_rcu(&node->hash);
0635 tipc_node_put(node);
0636 }
0637
0638 static void tipc_node_delete(struct tipc_node *node)
0639 {
0640 trace_tipc_node_delete(node, true, " ");
0641 tipc_node_delete_from_list(node);
0642
0643 del_timer_sync(&node->timer);
0644 tipc_node_put(node);
0645 }
0646
0647 void tipc_node_stop(struct net *net)
0648 {
0649 struct tipc_net *tn = tipc_net(net);
0650 struct tipc_node *node, *t_node;
0651
0652 spin_lock_bh(&tn->node_list_lock);
0653 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
0654 tipc_node_delete(node);
0655 spin_unlock_bh(&tn->node_list_lock);
0656 }
0657
0658 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
0659 {
0660 struct tipc_node *n;
0661
0662 if (in_own_node(net, addr))
0663 return;
0664
0665 n = tipc_node_find(net, addr);
0666 if (!n) {
0667 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
0668 return;
0669 }
0670 tipc_node_write_lock(n);
0671 list_add_tail(subscr, &n->publ_list);
0672 tipc_node_write_unlock_fast(n);
0673 tipc_node_put(n);
0674 }
0675
0676 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
0677 {
0678 struct tipc_node *n;
0679
0680 if (in_own_node(net, addr))
0681 return;
0682
0683 n = tipc_node_find(net, addr);
0684 if (!n) {
0685 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
0686 return;
0687 }
0688 tipc_node_write_lock(n);
0689 list_del_init(subscr);
0690 tipc_node_write_unlock_fast(n);
0691 tipc_node_put(n);
0692 }
0693
0694 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
0695 {
0696 struct tipc_node *node;
0697 struct tipc_sock_conn *conn;
0698 int err = 0;
0699
0700 if (in_own_node(net, dnode))
0701 return 0;
0702
0703 node = tipc_node_find(net, dnode);
0704 if (!node) {
0705 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
0706 return -EHOSTUNREACH;
0707 }
0708 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
0709 if (!conn) {
0710 err = -EHOSTUNREACH;
0711 goto exit;
0712 }
0713 conn->peer_node = dnode;
0714 conn->port = port;
0715 conn->peer_port = peer_port;
0716
0717 tipc_node_write_lock(node);
0718 list_add_tail(&conn->list, &node->conn_sks);
0719 tipc_node_write_unlock(node);
0720 exit:
0721 tipc_node_put(node);
0722 return err;
0723 }
0724
0725 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
0726 {
0727 struct tipc_node *node;
0728 struct tipc_sock_conn *conn, *safe;
0729
0730 if (in_own_node(net, dnode))
0731 return;
0732
0733 node = tipc_node_find(net, dnode);
0734 if (!node)
0735 return;
0736
0737 tipc_node_write_lock(node);
0738 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
0739 if (port != conn->port)
0740 continue;
0741 list_del(&conn->list);
0742 kfree(conn);
0743 }
0744 tipc_node_write_unlock(node);
0745 tipc_node_put(node);
0746 }
0747
0748 static void tipc_node_clear_links(struct tipc_node *node)
0749 {
0750 int i;
0751
0752 for (i = 0; i < MAX_BEARERS; i++) {
0753 struct tipc_link_entry *le = &node->links[i];
0754
0755 if (le->link) {
0756 kfree(le->link);
0757 le->link = NULL;
0758 node->link_cnt--;
0759 }
0760 }
0761 }
0762
0763
0764
0765
0766 static bool tipc_node_cleanup(struct tipc_node *peer)
0767 {
0768 struct tipc_node *temp_node;
0769 struct tipc_net *tn = tipc_net(peer->net);
0770 bool deleted = false;
0771
0772
0773 if (!spin_trylock_bh(&tn->node_list_lock))
0774 return false;
0775
0776 tipc_node_write_lock(peer);
0777
0778 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
0779 tipc_node_clear_links(peer);
0780 tipc_node_delete_from_list(peer);
0781 deleted = true;
0782 }
0783 tipc_node_write_unlock(peer);
0784
0785 if (!deleted) {
0786 spin_unlock_bh(&tn->node_list_lock);
0787 return deleted;
0788 }
0789
0790
0791 tn->capabilities = TIPC_NODE_CAPABILITIES;
0792 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
0793 tn->capabilities &= temp_node->capabilities;
0794 }
0795 tipc_bcast_toggle_rcast(peer->net,
0796 (tn->capabilities & TIPC_BCAST_RCAST));
0797 spin_unlock_bh(&tn->node_list_lock);
0798 return deleted;
0799 }
0800
0801
0802
0803 static void tipc_node_timeout(struct timer_list *t)
0804 {
0805 struct tipc_node *n = from_timer(n, t, timer);
0806 struct tipc_link_entry *le;
0807 struct sk_buff_head xmitq;
0808 int remains = n->link_cnt;
0809 int bearer_id;
0810 int rc = 0;
0811
0812 trace_tipc_node_timeout(n, false, " ");
0813 if (!node_is_up(n) && tipc_node_cleanup(n)) {
0814
0815 tipc_node_put(n);
0816 return;
0817 }
0818
0819 #ifdef CONFIG_TIPC_CRYPTO
0820
0821 tipc_crypto_timeout(n->crypto_rx);
0822 #endif
0823 __skb_queue_head_init(&xmitq);
0824
0825
0826
0827
0828 tipc_node_read_lock(n);
0829 n->keepalive_intv = 10000;
0830 tipc_node_read_unlock(n);
0831 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
0832 tipc_node_read_lock(n);
0833 le = &n->links[bearer_id];
0834 if (le->link) {
0835 spin_lock_bh(&le->lock);
0836
0837 tipc_node_calculate_timer(n, le->link);
0838 rc = tipc_link_timeout(le->link, &xmitq);
0839 spin_unlock_bh(&le->lock);
0840 remains--;
0841 }
0842 tipc_node_read_unlock(n);
0843 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
0844 if (rc & TIPC_LINK_DOWN_EVT)
0845 tipc_node_link_down(n, bearer_id, false);
0846 }
0847 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
0848 }
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
0859 struct sk_buff_head *xmitq)
0860 {
0861 int *slot0 = &n->active_links[0];
0862 int *slot1 = &n->active_links[1];
0863 struct tipc_link *ol = node_active_link(n, 0);
0864 struct tipc_link *nl = n->links[bearer_id].link;
0865
0866 if (!nl || tipc_link_is_up(nl))
0867 return;
0868
0869 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
0870 if (!tipc_link_is_up(nl))
0871 return;
0872
0873 n->working_links++;
0874 n->action_flags |= TIPC_NOTIFY_LINK_UP;
0875 n->link_id = tipc_link_id(nl);
0876
0877
0878 n->links[bearer_id].mtu = tipc_link_mss(nl);
0879
0880 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
0881 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
0882
0883 pr_debug("Established link <%s> on network plane %c\n",
0884 tipc_link_name(nl), tipc_link_plane(nl));
0885 trace_tipc_node_link_up(n, true, " ");
0886
0887
0888 tipc_link_build_state_msg(nl, xmitq);
0889
0890
0891 if (!ol) {
0892 *slot0 = bearer_id;
0893 *slot1 = bearer_id;
0894 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
0895 n->action_flags |= TIPC_NOTIFY_NODE_UP;
0896 tipc_link_set_active(nl, true);
0897 tipc_bcast_add_peer(n->net, nl, xmitq);
0898 return;
0899 }
0900
0901
0902 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
0903 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
0904 *slot0 = bearer_id;
0905 *slot1 = bearer_id;
0906 tipc_link_set_active(nl, true);
0907 tipc_link_set_active(ol, false);
0908 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
0909 tipc_link_set_active(nl, true);
0910 *slot1 = bearer_id;
0911 } else {
0912 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
0913 }
0914
0915
0916 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
0917 }
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
0928 struct sk_buff_head *xmitq)
0929 {
0930 struct tipc_media_addr *maddr;
0931
0932 tipc_node_write_lock(n);
0933 __tipc_node_link_up(n, bearer_id, xmitq);
0934 maddr = &n->links[bearer_id].maddr;
0935 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
0936 tipc_node_write_unlock(n);
0937 }
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
0961 struct tipc_link *tnl,
0962 struct sk_buff_head *xmitq)
0963 {
0964
0965 if (!tipc_link_is_up(tnl))
0966 return;
0967
0968
0969 if (l && !tipc_link_is_reset(l))
0970 return;
0971
0972 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
0973 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
0974
0975 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
0976 tipc_link_failover_prepare(l, tnl, xmitq);
0977
0978 if (l)
0979 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
0980 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
0981 }
0982
0983
0984
0985
0986
0987
0988
0989
0990 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
0991 struct sk_buff_head *xmitq,
0992 struct tipc_media_addr **maddr)
0993 {
0994 struct tipc_link_entry *le = &n->links[*bearer_id];
0995 int *slot0 = &n->active_links[0];
0996 int *slot1 = &n->active_links[1];
0997 int i, highest = 0, prio;
0998 struct tipc_link *l, *_l, *tnl;
0999
1000 l = n->links[*bearer_id].link;
1001 if (!l || tipc_link_is_reset(l))
1002 return;
1003
1004 n->working_links--;
1005 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
1006 n->link_id = tipc_link_id(l);
1007
1008 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
1009
1010 pr_debug("Lost link <%s> on network plane %c\n",
1011 tipc_link_name(l), tipc_link_plane(l));
1012
1013
1014 *slot0 = INVALID_BEARER_ID;
1015 *slot1 = INVALID_BEARER_ID;
1016 for (i = 0; i < MAX_BEARERS; i++) {
1017 _l = n->links[i].link;
1018 if (!_l || !tipc_link_is_up(_l))
1019 continue;
1020 if (_l == l)
1021 continue;
1022 prio = tipc_link_prio(_l);
1023 if (prio < highest)
1024 continue;
1025 if (prio > highest) {
1026 highest = prio;
1027 *slot0 = i;
1028 *slot1 = i;
1029 continue;
1030 }
1031 *slot1 = i;
1032 }
1033
1034 if (!node_is_up(n)) {
1035 if (tipc_link_peer_is_down(l))
1036 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1037 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
1038 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
1039 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1040 tipc_link_reset(l);
1041 tipc_link_build_reset_msg(l, xmitq);
1042 *maddr = &n->links[*bearer_id].maddr;
1043 node_lost_contact(n, &le->inputq);
1044 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1045 return;
1046 }
1047 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1048
1049
1050 *bearer_id = n->active_links[0];
1051 tnl = n->links[*bearer_id].link;
1052 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1053 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1054 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
1055 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1056 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
1057 tipc_link_reset(l);
1058 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1059 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1060 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1061 *maddr = &n->links[*bearer_id].maddr;
1062 }
1063
1064 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1065 {
1066 struct tipc_link_entry *le = &n->links[bearer_id];
1067 struct tipc_media_addr *maddr = NULL;
1068 struct tipc_link *l = le->link;
1069 int old_bearer_id = bearer_id;
1070 struct sk_buff_head xmitq;
1071
1072 if (!l)
1073 return;
1074
1075 __skb_queue_head_init(&xmitq);
1076
1077 tipc_node_write_lock(n);
1078 if (!tipc_link_is_establishing(l)) {
1079 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1080 } else {
1081
1082 tipc_link_reset(l);
1083 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1084 }
1085 if (delete) {
1086 kfree(l);
1087 le->link = NULL;
1088 n->link_cnt--;
1089 }
1090 trace_tipc_node_link_down(n, true, "node link down or deleted!");
1091 tipc_node_write_unlock(n);
1092 if (delete)
1093 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1094 if (!skb_queue_empty(&xmitq))
1095 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1096 tipc_sk_rcv(n->net, &le->inputq);
1097 }
1098
1099 static bool node_is_up(struct tipc_node *n)
1100 {
1101 return n->active_links[0] != INVALID_BEARER_ID;
1102 }
1103
1104 bool tipc_node_is_up(struct net *net, u32 addr)
1105 {
1106 struct tipc_node *n;
1107 bool retval = false;
1108
1109 if (in_own_node(net, addr))
1110 return true;
1111
1112 n = tipc_node_find(net, addr);
1113 if (!n)
1114 return false;
1115 retval = node_is_up(n);
1116 tipc_node_put(n);
1117 return retval;
1118 }
1119
1120 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1121 {
1122 struct tipc_node *n;
1123
1124 addr ^= tipc_net(net)->random;
1125 while ((n = tipc_node_find(net, addr))) {
1126 tipc_node_put(n);
1127 addr++;
1128 }
1129 return addr;
1130 }
1131
1132
1133
1134
1135 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1136 {
1137 struct tipc_net *tn = tipc_net(net);
1138 struct tipc_node *n;
1139 bool preliminary;
1140 u32 sugg_addr;
1141
1142
1143 n = tipc_node_find(net, addr);
1144 if (n) {
1145 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1146 addr = 0;
1147 tipc_node_put(n);
1148 if (!addr)
1149 return 0;
1150 return tipc_node_suggest_addr(net, addr);
1151 }
1152
1153
1154 n = tipc_node_find_by_id(net, id);
1155 if (n) {
1156 sugg_addr = n->addr;
1157 preliminary = n->preliminary;
1158 tipc_node_put(n);
1159 if (!preliminary)
1160 return sugg_addr;
1161 }
1162
1163
1164 if (tn->trial_addr == addr)
1165 return tipc_node_suggest_addr(net, addr);
1166
1167 return 0;
1168 }
1169
1170 void tipc_node_check_dest(struct net *net, u32 addr,
1171 u8 *peer_id, struct tipc_bearer *b,
1172 u16 capabilities, u32 signature, u32 hash_mixes,
1173 struct tipc_media_addr *maddr,
1174 bool *respond, bool *dupl_addr)
1175 {
1176 struct tipc_node *n;
1177 struct tipc_link *l;
1178 struct tipc_link_entry *le;
1179 bool addr_match = false;
1180 bool sign_match = false;
1181 bool link_up = false;
1182 bool accept_addr = false;
1183 bool reset = true;
1184 char *if_name;
1185 unsigned long intv;
1186 u16 session;
1187
1188 *dupl_addr = false;
1189 *respond = false;
1190
1191 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1192 false);
1193 if (!n)
1194 return;
1195
1196 tipc_node_write_lock(n);
1197
1198 le = &n->links[b->identity];
1199
1200
1201 l = le->link;
1202 link_up = l && tipc_link_is_up(l);
1203 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1204 sign_match = (signature == n->signature);
1205
1206
1207
1208 if (sign_match && addr_match && link_up) {
1209
1210 reset = false;
1211
1212 if (!n->peer_hash_mix)
1213 n->peer_hash_mix = hash_mixes;
1214 } else if (sign_match && addr_match && !link_up) {
1215
1216 *respond = true;
1217 } else if (sign_match && !addr_match && link_up) {
1218
1219
1220
1221
1222
1223
1224
1225
1226 *dupl_addr = true;
1227 } else if (sign_match && !addr_match && !link_up) {
1228
1229
1230
1231
1232
1233 accept_addr = true;
1234 *respond = true;
1235 } else if (!sign_match && addr_match && link_up) {
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 n->signature = signature;
1248 } else if (!sign_match && addr_match && !link_up) {
1249
1250
1251
1252 n->signature = signature;
1253 *respond = true;
1254 } else if (!sign_match && !addr_match && link_up) {
1255
1256
1257
1258 *dupl_addr = true;
1259 } else if (!sign_match && !addr_match && !link_up) {
1260
1261
1262
1263 n->signature = signature;
1264 accept_addr = true;
1265 *respond = true;
1266 }
1267
1268 if (!accept_addr)
1269 goto exit;
1270
1271
1272 if (!l) {
1273 if (n->link_cnt == 2)
1274 goto exit;
1275
1276 if_name = strchr(b->name, ':') + 1;
1277 get_random_bytes(&session, sizeof(u16));
1278 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1279 b->net_plane, b->mtu, b->priority,
1280 b->min_win, b->max_win, session,
1281 tipc_own_addr(net), addr, peer_id,
1282 n->capabilities,
1283 tipc_bc_sndlink(n->net), n->bc_entry.link,
1284 &le->inputq,
1285 &n->bc_entry.namedq, &l)) {
1286 *respond = false;
1287 goto exit;
1288 }
1289 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1290 tipc_link_reset(l);
1291 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1292 if (n->state == NODE_FAILINGOVER)
1293 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1294 le->link = l;
1295 n->link_cnt++;
1296 tipc_node_calculate_timer(n, l);
1297 if (n->link_cnt == 1) {
1298 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1299 if (!mod_timer(&n->timer, intv))
1300 tipc_node_get(n);
1301 }
1302 }
1303 memcpy(&le->maddr, maddr, sizeof(*maddr));
1304 exit:
1305 tipc_node_write_unlock(n);
1306 if (reset && l && !tipc_link_is_reset(l))
1307 tipc_node_link_down(n, b->identity, false);
1308 tipc_node_put(n);
1309 }
1310
1311 void tipc_node_delete_links(struct net *net, int bearer_id)
1312 {
1313 struct tipc_net *tn = net_generic(net, tipc_net_id);
1314 struct tipc_node *n;
1315
1316 rcu_read_lock();
1317 list_for_each_entry_rcu(n, &tn->node_list, list) {
1318 tipc_node_link_down(n, bearer_id, true);
1319 }
1320 rcu_read_unlock();
1321 }
1322
1323 static void tipc_node_reset_links(struct tipc_node *n)
1324 {
1325 int i;
1326
1327 pr_warn("Resetting all links to %x\n", n->addr);
1328
1329 trace_tipc_node_reset_links(n, true, " ");
1330 for (i = 0; i < MAX_BEARERS; i++) {
1331 tipc_node_link_down(n, i, false);
1332 }
1333 }
1334
1335
1336
1337
1338 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1339 {
1340 int state = n->state;
1341
1342 switch (state) {
1343 case SELF_DOWN_PEER_DOWN:
1344 switch (evt) {
1345 case SELF_ESTABL_CONTACT_EVT:
1346 state = SELF_UP_PEER_COMING;
1347 break;
1348 case PEER_ESTABL_CONTACT_EVT:
1349 state = SELF_COMING_PEER_UP;
1350 break;
1351 case SELF_LOST_CONTACT_EVT:
1352 case PEER_LOST_CONTACT_EVT:
1353 break;
1354 case NODE_SYNCH_END_EVT:
1355 case NODE_SYNCH_BEGIN_EVT:
1356 case NODE_FAILOVER_BEGIN_EVT:
1357 case NODE_FAILOVER_END_EVT:
1358 default:
1359 goto illegal_evt;
1360 }
1361 break;
1362 case SELF_UP_PEER_UP:
1363 switch (evt) {
1364 case SELF_LOST_CONTACT_EVT:
1365 state = SELF_DOWN_PEER_LEAVING;
1366 break;
1367 case PEER_LOST_CONTACT_EVT:
1368 state = SELF_LEAVING_PEER_DOWN;
1369 break;
1370 case NODE_SYNCH_BEGIN_EVT:
1371 state = NODE_SYNCHING;
1372 break;
1373 case NODE_FAILOVER_BEGIN_EVT:
1374 state = NODE_FAILINGOVER;
1375 break;
1376 case SELF_ESTABL_CONTACT_EVT:
1377 case PEER_ESTABL_CONTACT_EVT:
1378 case NODE_SYNCH_END_EVT:
1379 case NODE_FAILOVER_END_EVT:
1380 break;
1381 default:
1382 goto illegal_evt;
1383 }
1384 break;
1385 case SELF_DOWN_PEER_LEAVING:
1386 switch (evt) {
1387 case PEER_LOST_CONTACT_EVT:
1388 state = SELF_DOWN_PEER_DOWN;
1389 break;
1390 case SELF_ESTABL_CONTACT_EVT:
1391 case PEER_ESTABL_CONTACT_EVT:
1392 case SELF_LOST_CONTACT_EVT:
1393 break;
1394 case NODE_SYNCH_END_EVT:
1395 case NODE_SYNCH_BEGIN_EVT:
1396 case NODE_FAILOVER_BEGIN_EVT:
1397 case NODE_FAILOVER_END_EVT:
1398 default:
1399 goto illegal_evt;
1400 }
1401 break;
1402 case SELF_UP_PEER_COMING:
1403 switch (evt) {
1404 case PEER_ESTABL_CONTACT_EVT:
1405 state = SELF_UP_PEER_UP;
1406 break;
1407 case SELF_LOST_CONTACT_EVT:
1408 state = SELF_DOWN_PEER_DOWN;
1409 break;
1410 case SELF_ESTABL_CONTACT_EVT:
1411 case PEER_LOST_CONTACT_EVT:
1412 case NODE_SYNCH_END_EVT:
1413 case NODE_FAILOVER_BEGIN_EVT:
1414 break;
1415 case NODE_SYNCH_BEGIN_EVT:
1416 case NODE_FAILOVER_END_EVT:
1417 default:
1418 goto illegal_evt;
1419 }
1420 break;
1421 case SELF_COMING_PEER_UP:
1422 switch (evt) {
1423 case SELF_ESTABL_CONTACT_EVT:
1424 state = SELF_UP_PEER_UP;
1425 break;
1426 case PEER_LOST_CONTACT_EVT:
1427 state = SELF_DOWN_PEER_DOWN;
1428 break;
1429 case SELF_LOST_CONTACT_EVT:
1430 case PEER_ESTABL_CONTACT_EVT:
1431 break;
1432 case NODE_SYNCH_END_EVT:
1433 case NODE_SYNCH_BEGIN_EVT:
1434 case NODE_FAILOVER_BEGIN_EVT:
1435 case NODE_FAILOVER_END_EVT:
1436 default:
1437 goto illegal_evt;
1438 }
1439 break;
1440 case SELF_LEAVING_PEER_DOWN:
1441 switch (evt) {
1442 case SELF_LOST_CONTACT_EVT:
1443 state = SELF_DOWN_PEER_DOWN;
1444 break;
1445 case SELF_ESTABL_CONTACT_EVT:
1446 case PEER_ESTABL_CONTACT_EVT:
1447 case PEER_LOST_CONTACT_EVT:
1448 break;
1449 case NODE_SYNCH_END_EVT:
1450 case NODE_SYNCH_BEGIN_EVT:
1451 case NODE_FAILOVER_BEGIN_EVT:
1452 case NODE_FAILOVER_END_EVT:
1453 default:
1454 goto illegal_evt;
1455 }
1456 break;
1457 case NODE_FAILINGOVER:
1458 switch (evt) {
1459 case SELF_LOST_CONTACT_EVT:
1460 state = SELF_DOWN_PEER_LEAVING;
1461 break;
1462 case PEER_LOST_CONTACT_EVT:
1463 state = SELF_LEAVING_PEER_DOWN;
1464 break;
1465 case NODE_FAILOVER_END_EVT:
1466 state = SELF_UP_PEER_UP;
1467 break;
1468 case NODE_FAILOVER_BEGIN_EVT:
1469 case SELF_ESTABL_CONTACT_EVT:
1470 case PEER_ESTABL_CONTACT_EVT:
1471 break;
1472 case NODE_SYNCH_BEGIN_EVT:
1473 case NODE_SYNCH_END_EVT:
1474 default:
1475 goto illegal_evt;
1476 }
1477 break;
1478 case NODE_SYNCHING:
1479 switch (evt) {
1480 case SELF_LOST_CONTACT_EVT:
1481 state = SELF_DOWN_PEER_LEAVING;
1482 break;
1483 case PEER_LOST_CONTACT_EVT:
1484 state = SELF_LEAVING_PEER_DOWN;
1485 break;
1486 case NODE_SYNCH_END_EVT:
1487 state = SELF_UP_PEER_UP;
1488 break;
1489 case NODE_FAILOVER_BEGIN_EVT:
1490 state = NODE_FAILINGOVER;
1491 break;
1492 case NODE_SYNCH_BEGIN_EVT:
1493 case SELF_ESTABL_CONTACT_EVT:
1494 case PEER_ESTABL_CONTACT_EVT:
1495 break;
1496 case NODE_FAILOVER_END_EVT:
1497 default:
1498 goto illegal_evt;
1499 }
1500 break;
1501 default:
1502 pr_err("Unknown node fsm state %x\n", state);
1503 break;
1504 }
1505 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1506 n->state = state;
1507 return;
1508
1509 illegal_evt:
1510 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1511 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1512 }
1513
1514 static void node_lost_contact(struct tipc_node *n,
1515 struct sk_buff_head *inputq)
1516 {
1517 struct tipc_sock_conn *conn, *safe;
1518 struct tipc_link *l;
1519 struct list_head *conns = &n->conn_sks;
1520 struct sk_buff *skb;
1521 uint i;
1522
1523 pr_debug("Lost contact with %x\n", n->addr);
1524 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1525 trace_tipc_node_lost_contact(n, true, " ");
1526
1527
1528 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1529 skb_queue_purge(&n->bc_entry.namedq);
1530
1531
1532 for (i = 0; i < MAX_BEARERS; i++) {
1533 l = n->links[i].link;
1534 if (l)
1535 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1536 }
1537
1538
1539 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1540 n->peer_net = NULL;
1541 n->peer_hash_mix = 0;
1542
1543 list_for_each_entry_safe(conn, safe, conns, list) {
1544 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1545 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1546 conn->peer_node, conn->port,
1547 conn->peer_port, TIPC_ERR_NO_NODE);
1548 if (likely(skb))
1549 skb_queue_tail(inputq, skb);
1550 list_del(&conn->list);
1551 kfree(conn);
1552 }
1553 }
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1567 char *linkname, size_t len)
1568 {
1569 struct tipc_link *link;
1570 int err = -EINVAL;
1571 struct tipc_node *node = tipc_node_find(net, addr);
1572
1573 if (!node)
1574 return err;
1575
1576 if (bearer_id >= MAX_BEARERS)
1577 goto exit;
1578
1579 tipc_node_read_lock(node);
1580 link = node->links[bearer_id].link;
1581 if (link) {
1582 strncpy(linkname, tipc_link_name(link), len);
1583 err = 0;
1584 }
1585 tipc_node_read_unlock(node);
1586 exit:
1587 tipc_node_put(node);
1588 return err;
1589 }
1590
1591
1592 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1593 {
1594 void *hdr;
1595 struct nlattr *attrs;
1596
1597 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1598 NLM_F_MULTI, TIPC_NL_NODE_GET);
1599 if (!hdr)
1600 return -EMSGSIZE;
1601
1602 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1603 if (!attrs)
1604 goto msg_full;
1605
1606 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1607 goto attr_msg_full;
1608 if (node_is_up(node))
1609 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1610 goto attr_msg_full;
1611
1612 nla_nest_end(msg->skb, attrs);
1613 genlmsg_end(msg->skb, hdr);
1614
1615 return 0;
1616
1617 attr_msg_full:
1618 nla_nest_cancel(msg->skb, attrs);
1619 msg_full:
1620 genlmsg_cancel(msg->skb, hdr);
1621
1622 return -EMSGSIZE;
1623 }
1624
1625 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1626 {
1627 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1628 struct sk_buff_head inputq;
1629
1630 switch (msg_user(hdr)) {
1631 case TIPC_LOW_IMPORTANCE:
1632 case TIPC_MEDIUM_IMPORTANCE:
1633 case TIPC_HIGH_IMPORTANCE:
1634 case TIPC_CRITICAL_IMPORTANCE:
1635 if (msg_connected(hdr) || msg_named(hdr) ||
1636 msg_direct(hdr)) {
1637 tipc_loopback_trace(peer_net, list);
1638 spin_lock_init(&list->lock);
1639 tipc_sk_rcv(peer_net, list);
1640 return;
1641 }
1642 if (msg_mcast(hdr)) {
1643 tipc_loopback_trace(peer_net, list);
1644 skb_queue_head_init(&inputq);
1645 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1646 __skb_queue_purge(list);
1647 skb_queue_purge(&inputq);
1648 return;
1649 }
1650 return;
1651 case MSG_FRAGMENTER:
1652 if (tipc_msg_assemble(list)) {
1653 tipc_loopback_trace(peer_net, list);
1654 skb_queue_head_init(&inputq);
1655 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1656 __skb_queue_purge(list);
1657 skb_queue_purge(&inputq);
1658 }
1659 return;
1660 case GROUP_PROTOCOL:
1661 case CONN_MANAGER:
1662 tipc_loopback_trace(peer_net, list);
1663 spin_lock_init(&list->lock);
1664 tipc_sk_rcv(peer_net, list);
1665 return;
1666 case LINK_PROTOCOL:
1667 case NAME_DISTRIBUTOR:
1668 case TUNNEL_PROTOCOL:
1669 case BCAST_PROTOCOL:
1670 return;
1671 default:
1672 return;
1673 }
1674 }
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1686 u32 dnode, int selector)
1687 {
1688 struct tipc_link_entry *le = NULL;
1689 struct tipc_node *n;
1690 struct sk_buff_head xmitq;
1691 bool node_up = false;
1692 int bearer_id;
1693 int rc;
1694
1695 if (in_own_node(net, dnode)) {
1696 tipc_loopback_trace(net, list);
1697 spin_lock_init(&list->lock);
1698 tipc_sk_rcv(net, list);
1699 return 0;
1700 }
1701
1702 n = tipc_node_find(net, dnode);
1703 if (unlikely(!n)) {
1704 __skb_queue_purge(list);
1705 return -EHOSTUNREACH;
1706 }
1707
1708 tipc_node_read_lock(n);
1709 node_up = node_is_up(n);
1710 if (node_up && n->peer_net && check_net(n->peer_net)) {
1711
1712 tipc_lxc_xmit(n->peer_net, list);
1713 if (likely(skb_queue_empty(list))) {
1714 tipc_node_read_unlock(n);
1715 tipc_node_put(n);
1716 return 0;
1717 }
1718 }
1719
1720 bearer_id = n->active_links[selector & 1];
1721 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1722 tipc_node_read_unlock(n);
1723 tipc_node_put(n);
1724 __skb_queue_purge(list);
1725 return -EHOSTUNREACH;
1726 }
1727
1728 __skb_queue_head_init(&xmitq);
1729 le = &n->links[bearer_id];
1730 spin_lock_bh(&le->lock);
1731 rc = tipc_link_xmit(le->link, list, &xmitq);
1732 spin_unlock_bh(&le->lock);
1733 tipc_node_read_unlock(n);
1734
1735 if (unlikely(rc == -ENOBUFS))
1736 tipc_node_link_down(n, bearer_id, false);
1737 else
1738 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1739
1740 tipc_node_put(n);
1741
1742 return rc;
1743 }
1744
1745
1746
1747
1748
1749
1750
1751 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1752 u32 selector)
1753 {
1754 struct sk_buff_head head;
1755
1756 __skb_queue_head_init(&head);
1757 __skb_queue_tail(&head, skb);
1758 tipc_node_xmit(net, &head, dnode, selector);
1759 return 0;
1760 }
1761
1762
1763
1764
1765 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1766 {
1767 struct sk_buff *skb;
1768 u32 selector, dnode;
1769
1770 while ((skb = __skb_dequeue(xmitq))) {
1771 selector = msg_origport(buf_msg(skb));
1772 dnode = msg_destnode(buf_msg(skb));
1773 tipc_node_xmit_skb(net, skb, dnode, selector);
1774 }
1775 return 0;
1776 }
1777
1778 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
1779 {
1780 struct sk_buff_head xmitq;
1781 struct sk_buff *txskb;
1782 struct tipc_node *n;
1783 u16 dummy;
1784 u32 dst;
1785
1786
1787 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
1788 __skb_queue_head_init(&xmitq);
1789 __skb_queue_tail(&xmitq, skb);
1790 tipc_bcast_xmit(net, &xmitq, &dummy);
1791 return;
1792 }
1793
1794
1795 rcu_read_lock();
1796 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1797 dst = n->addr;
1798 if (in_own_node(net, dst))
1799 continue;
1800 if (!node_is_up(n))
1801 continue;
1802 txskb = pskb_copy(skb, GFP_ATOMIC);
1803 if (!txskb)
1804 break;
1805 msg_set_destnode(buf_msg(txskb), dst);
1806 tipc_node_xmit_skb(net, txskb, dst, 0);
1807 }
1808 rcu_read_unlock();
1809 kfree_skb(skb);
1810 }
1811
1812 static void tipc_node_mcast_rcv(struct tipc_node *n)
1813 {
1814 struct tipc_bclink_entry *be = &n->bc_entry;
1815
1816
1817 spin_lock_bh(&be->inputq2.lock);
1818 spin_lock_bh(&be->inputq1.lock);
1819 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1820 spin_unlock_bh(&be->inputq1.lock);
1821 spin_unlock_bh(&be->inputq2.lock);
1822 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1823 }
1824
1825 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1826 int bearer_id, struct sk_buff_head *xmitq)
1827 {
1828 struct tipc_link *ucl;
1829 int rc;
1830
1831 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1832
1833 if (rc & TIPC_LINK_DOWN_EVT) {
1834 tipc_node_reset_links(n);
1835 return;
1836 }
1837
1838 if (!(rc & TIPC_LINK_SND_STATE))
1839 return;
1840
1841
1842 if (msg_probe(hdr))
1843 return;
1844
1845
1846 tipc_node_read_lock(n);
1847 ucl = n->links[bearer_id].link;
1848 if (ucl)
1849 tipc_link_build_state_msg(ucl, xmitq);
1850 tipc_node_read_unlock(n);
1851 }
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1862 {
1863 int rc;
1864 struct sk_buff_head xmitq;
1865 struct tipc_bclink_entry *be;
1866 struct tipc_link_entry *le;
1867 struct tipc_msg *hdr = buf_msg(skb);
1868 int usr = msg_user(hdr);
1869 u32 dnode = msg_destnode(hdr);
1870 struct tipc_node *n;
1871
1872 __skb_queue_head_init(&xmitq);
1873
1874
1875 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1876 n = tipc_node_find(net, dnode);
1877 else
1878 n = tipc_node_find(net, msg_prevnode(hdr));
1879 if (!n) {
1880 kfree_skb(skb);
1881 return;
1882 }
1883 be = &n->bc_entry;
1884 le = &n->links[bearer_id];
1885
1886 rc = tipc_bcast_rcv(net, be->link, skb);
1887
1888
1889 if (rc & TIPC_LINK_SND_STATE) {
1890 tipc_node_read_lock(n);
1891 tipc_link_build_state_msg(le->link, &xmitq);
1892 tipc_node_read_unlock(n);
1893 }
1894
1895 if (!skb_queue_empty(&xmitq))
1896 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1897
1898 if (!skb_queue_empty(&be->inputq1))
1899 tipc_node_mcast_rcv(n);
1900
1901
1902 if (!skb_queue_empty(&n->bc_entry.namedq))
1903 tipc_named_rcv(net, &n->bc_entry.namedq,
1904 &n->bc_entry.named_rcv_nxt,
1905 &n->bc_entry.named_open);
1906
1907
1908 if (rc & TIPC_LINK_DOWN_EVT)
1909 tipc_node_reset_links(n);
1910
1911 tipc_node_put(n);
1912 }
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1923 int bearer_id, struct sk_buff_head *xmitq)
1924 {
1925 struct tipc_msg *hdr = buf_msg(skb);
1926 int usr = msg_user(hdr);
1927 int mtyp = msg_type(hdr);
1928 u16 oseqno = msg_seqno(hdr);
1929 u16 exp_pkts = msg_msgcnt(hdr);
1930 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1931 int state = n->state;
1932 struct tipc_link *l, *tnl, *pl = NULL;
1933 struct tipc_media_addr *maddr;
1934 int pb_id;
1935
1936 if (trace_tipc_node_check_state_enabled()) {
1937 trace_tipc_skb_dump(skb, false, "skb for node state check");
1938 trace_tipc_node_check_state(n, true, " ");
1939 }
1940 l = n->links[bearer_id].link;
1941 if (!l)
1942 return false;
1943 rcv_nxt = tipc_link_rcv_nxt(l);
1944
1945
1946 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1947 return true;
1948
1949
1950 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1951 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1952 pl = n->links[pb_id].link;
1953 break;
1954 }
1955 }
1956
1957 if (!tipc_link_validate_msg(l, hdr)) {
1958 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1959 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1960 return false;
1961 }
1962
1963
1964 if (state == SELF_UP_PEER_COMING) {
1965 if (!tipc_link_is_up(l))
1966 return true;
1967 if (!msg_peer_link_is_up(hdr))
1968 return true;
1969 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1970 }
1971
1972 if (state == SELF_DOWN_PEER_LEAVING) {
1973 if (msg_peer_node_is_up(hdr))
1974 return false;
1975 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1976 return true;
1977 }
1978
1979 if (state == SELF_LEAVING_PEER_DOWN)
1980 return false;
1981
1982
1983 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1984 return true;
1985
1986
1987 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1988 syncpt = oseqno + exp_pkts - 1;
1989 if (pl && !tipc_link_is_reset(pl)) {
1990 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1991 trace_tipc_node_link_down(n, true,
1992 "node link down <- failover!");
1993 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1994 tipc_link_inputq(l));
1995 }
1996
1997
1998
1999
2000
2001
2002
2003 if (n->state != NODE_FAILINGOVER)
2004 tipc_node_link_failover(n, pl, l, xmitq);
2005
2006
2007 if (less(syncpt, n->sync_point))
2008 n->sync_point = syncpt;
2009 }
2010
2011
2012 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
2013 if (!more(rcv_nxt, n->sync_point))
2014 return true;
2015 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
2016 if (pl)
2017 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
2018 return true;
2019 }
2020
2021
2022 if (!pl || !tipc_link_is_up(pl))
2023 return true;
2024
2025
2026 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
2027 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
2028 syncpt = msg_syncpt(hdr);
2029 else
2030 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
2031 if (!tipc_link_is_up(l))
2032 __tipc_node_link_up(n, bearer_id, xmitq);
2033 if (n->state == SELF_UP_PEER_UP) {
2034 n->sync_point = syncpt;
2035 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
2036 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
2037 }
2038 }
2039
2040
2041 if (n->state == NODE_SYNCHING) {
2042 if (tipc_link_is_synching(l)) {
2043 tnl = l;
2044 } else {
2045 tnl = pl;
2046 pl = l;
2047 }
2048 inputq_len = skb_queue_len(tipc_link_inputq(pl));
2049 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
2050 if (more(dlv_nxt, n->sync_point)) {
2051 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
2052 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
2053 return true;
2054 }
2055 if (l == pl)
2056 return true;
2057 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
2058 return true;
2059 if (usr == LINK_PROTOCOL)
2060 return true;
2061 return false;
2062 }
2063 return true;
2064 }
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2076 {
2077 struct sk_buff_head xmitq;
2078 struct tipc_link_entry *le;
2079 struct tipc_msg *hdr;
2080 struct tipc_node *n;
2081 int bearer_id = b->identity;
2082 u32 self = tipc_own_addr(net);
2083 int usr, rc = 0;
2084 u16 bc_ack;
2085 #ifdef CONFIG_TIPC_CRYPTO
2086 struct tipc_ehdr *ehdr;
2087
2088
2089 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2090 goto rcv;
2091
2092 ehdr = (struct tipc_ehdr *)skb->data;
2093 if (likely(ehdr->user != LINK_CONFIG)) {
2094 n = tipc_node_find(net, ntohl(ehdr->addr));
2095 if (unlikely(!n))
2096 goto discard;
2097 } else {
2098 n = tipc_node_find_by_id(net, ehdr->id);
2099 }
2100 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2101 if (!skb)
2102 return;
2103
2104 rcv:
2105 #endif
2106
2107 if (unlikely(!tipc_msg_validate(&skb)))
2108 goto discard;
2109 __skb_queue_head_init(&xmitq);
2110 hdr = buf_msg(skb);
2111 usr = msg_user(hdr);
2112 bc_ack = msg_bcast_ack(hdr);
2113
2114
2115 if (unlikely(msg_non_seq(hdr))) {
2116 if (unlikely(usr == LINK_CONFIG))
2117 return tipc_disc_rcv(net, skb, b);
2118 else
2119 return tipc_node_bc_rcv(net, skb, bearer_id);
2120 }
2121
2122
2123 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2124 goto discard;
2125
2126
2127 n = tipc_node_find(net, msg_prevnode(hdr));
2128 if (unlikely(!n))
2129 goto discard;
2130 le = &n->links[bearer_id];
2131
2132
2133 if (unlikely(usr == LINK_PROTOCOL)) {
2134 if (unlikely(skb_linearize(skb))) {
2135 tipc_node_put(n);
2136 goto discard;
2137 }
2138 hdr = buf_msg(skb);
2139 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2140 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2141 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2142 }
2143
2144
2145 tipc_node_read_lock(n);
2146 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2147 spin_lock_bh(&le->lock);
2148 if (le->link) {
2149 rc = tipc_link_rcv(le->link, skb, &xmitq);
2150 skb = NULL;
2151 }
2152 spin_unlock_bh(&le->lock);
2153 }
2154 tipc_node_read_unlock(n);
2155
2156
2157 if (unlikely(skb)) {
2158 if (unlikely(skb_linearize(skb)))
2159 goto out_node_put;
2160 tipc_node_write_lock(n);
2161 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2162 if (le->link) {
2163 rc = tipc_link_rcv(le->link, skb, &xmitq);
2164 skb = NULL;
2165 }
2166 }
2167 tipc_node_write_unlock(n);
2168 }
2169
2170 if (unlikely(rc & TIPC_LINK_UP_EVT))
2171 tipc_node_link_up(n, bearer_id, &xmitq);
2172
2173 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2174 tipc_node_link_down(n, bearer_id, false);
2175
2176 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2177 tipc_named_rcv(net, &n->bc_entry.namedq,
2178 &n->bc_entry.named_rcv_nxt,
2179 &n->bc_entry.named_open);
2180
2181 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2182 tipc_node_mcast_rcv(n);
2183
2184 if (!skb_queue_empty(&le->inputq))
2185 tipc_sk_rcv(net, &le->inputq);
2186
2187 if (!skb_queue_empty(&xmitq))
2188 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2189
2190 out_node_put:
2191 tipc_node_put(n);
2192 discard:
2193 kfree_skb(skb);
2194 }
2195
2196 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2197 int prop)
2198 {
2199 struct tipc_net *tn = tipc_net(net);
2200 int bearer_id = b->identity;
2201 struct sk_buff_head xmitq;
2202 struct tipc_link_entry *e;
2203 struct tipc_node *n;
2204
2205 __skb_queue_head_init(&xmitq);
2206
2207 rcu_read_lock();
2208
2209 list_for_each_entry_rcu(n, &tn->node_list, list) {
2210 tipc_node_write_lock(n);
2211 e = &n->links[bearer_id];
2212 if (e->link) {
2213 if (prop == TIPC_NLA_PROP_TOL)
2214 tipc_link_set_tolerance(e->link, b->tolerance,
2215 &xmitq);
2216 else if (prop == TIPC_NLA_PROP_MTU)
2217 tipc_link_set_mtu(e->link, b->mtu);
2218
2219
2220 e->mtu = tipc_link_mss(e->link);
2221 }
2222
2223 tipc_node_write_unlock(n);
2224 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2225 }
2226
2227 rcu_read_unlock();
2228 }
2229
2230 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2231 {
2232 struct net *net = sock_net(skb->sk);
2233 struct tipc_net *tn = net_generic(net, tipc_net_id);
2234 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2235 struct tipc_node *peer, *temp_node;
2236 u8 node_id[NODE_ID_LEN];
2237 u64 *w0 = (u64 *)&node_id[0];
2238 u64 *w1 = (u64 *)&node_id[8];
2239 u32 addr;
2240 int err;
2241
2242
2243 if (!info->attrs[TIPC_NLA_NET])
2244 return -EINVAL;
2245
2246 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2247 info->attrs[TIPC_NLA_NET],
2248 tipc_nl_net_policy, info->extack);
2249 if (err)
2250 return err;
2251
2252
2253
2254
2255 if (attrs[TIPC_NLA_NET_ADDR]) {
2256 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2257 if (!addr)
2258 return -EINVAL;
2259 }
2260
2261 if (attrs[TIPC_NLA_NET_NODEID]) {
2262 if (!attrs[TIPC_NLA_NET_NODEID_W1])
2263 return -EINVAL;
2264 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
2265 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
2266 addr = hash128to32(node_id);
2267 }
2268
2269 if (in_own_node(net, addr))
2270 return -ENOTSUPP;
2271
2272 spin_lock_bh(&tn->node_list_lock);
2273 peer = tipc_node_find(net, addr);
2274 if (!peer) {
2275 spin_unlock_bh(&tn->node_list_lock);
2276 return -ENXIO;
2277 }
2278
2279 tipc_node_write_lock(peer);
2280 if (peer->state != SELF_DOWN_PEER_DOWN &&
2281 peer->state != SELF_DOWN_PEER_LEAVING) {
2282 tipc_node_write_unlock(peer);
2283 err = -EBUSY;
2284 goto err_out;
2285 }
2286
2287 tipc_node_clear_links(peer);
2288 tipc_node_write_unlock(peer);
2289 tipc_node_delete(peer);
2290
2291
2292 tn->capabilities = TIPC_NODE_CAPABILITIES;
2293 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2294 tn->capabilities &= temp_node->capabilities;
2295 }
2296 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2297 err = 0;
2298 err_out:
2299 tipc_node_put(peer);
2300 spin_unlock_bh(&tn->node_list_lock);
2301
2302 return err;
2303 }
2304
2305 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2306 {
2307 int err;
2308 struct net *net = sock_net(skb->sk);
2309 struct tipc_net *tn = net_generic(net, tipc_net_id);
2310 int done = cb->args[0];
2311 int last_addr = cb->args[1];
2312 struct tipc_node *node;
2313 struct tipc_nl_msg msg;
2314
2315 if (done)
2316 return 0;
2317
2318 msg.skb = skb;
2319 msg.portid = NETLINK_CB(cb->skb).portid;
2320 msg.seq = cb->nlh->nlmsg_seq;
2321
2322 rcu_read_lock();
2323 if (last_addr) {
2324 node = tipc_node_find(net, last_addr);
2325 if (!node) {
2326 rcu_read_unlock();
2327
2328
2329
2330
2331
2332
2333
2334 cb->prev_seq = 1;
2335 return -EPIPE;
2336 }
2337 tipc_node_put(node);
2338 }
2339
2340 list_for_each_entry_rcu(node, &tn->node_list, list) {
2341 if (node->preliminary)
2342 continue;
2343 if (last_addr) {
2344 if (node->addr == last_addr)
2345 last_addr = 0;
2346 else
2347 continue;
2348 }
2349
2350 tipc_node_read_lock(node);
2351 err = __tipc_nl_add_node(&msg, node);
2352 if (err) {
2353 last_addr = node->addr;
2354 tipc_node_read_unlock(node);
2355 goto out;
2356 }
2357
2358 tipc_node_read_unlock(node);
2359 }
2360 done = 1;
2361 out:
2362 cb->args[0] = done;
2363 cb->args[1] = last_addr;
2364 rcu_read_unlock();
2365
2366 return skb->len;
2367 }
2368
2369
2370
2371
2372
2373
2374
2375
2376 static struct tipc_node *tipc_node_find_by_name(struct net *net,
2377 const char *link_name,
2378 unsigned int *bearer_id)
2379 {
2380 struct tipc_net *tn = net_generic(net, tipc_net_id);
2381 struct tipc_link *l;
2382 struct tipc_node *n;
2383 struct tipc_node *found_node = NULL;
2384 int i;
2385
2386 *bearer_id = 0;
2387 rcu_read_lock();
2388 list_for_each_entry_rcu(n, &tn->node_list, list) {
2389 tipc_node_read_lock(n);
2390 for (i = 0; i < MAX_BEARERS; i++) {
2391 l = n->links[i].link;
2392 if (l && !strcmp(tipc_link_name(l), link_name)) {
2393 *bearer_id = i;
2394 found_node = n;
2395 break;
2396 }
2397 }
2398 tipc_node_read_unlock(n);
2399 if (found_node)
2400 break;
2401 }
2402 rcu_read_unlock();
2403
2404 return found_node;
2405 }
2406
2407 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2408 {
2409 int err;
2410 int res = 0;
2411 int bearer_id;
2412 char *name;
2413 struct tipc_link *link;
2414 struct tipc_node *node;
2415 struct sk_buff_head xmitq;
2416 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2417 struct net *net = sock_net(skb->sk);
2418
2419 __skb_queue_head_init(&xmitq);
2420
2421 if (!info->attrs[TIPC_NLA_LINK])
2422 return -EINVAL;
2423
2424 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2425 info->attrs[TIPC_NLA_LINK],
2426 tipc_nl_link_policy, info->extack);
2427 if (err)
2428 return err;
2429
2430 if (!attrs[TIPC_NLA_LINK_NAME])
2431 return -EINVAL;
2432
2433 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2434
2435 if (strcmp(name, tipc_bclink_name) == 0)
2436 return tipc_nl_bc_link_set(net, attrs);
2437
2438 node = tipc_node_find_by_name(net, name, &bearer_id);
2439 if (!node)
2440 return -EINVAL;
2441
2442 tipc_node_read_lock(node);
2443
2444 link = node->links[bearer_id].link;
2445 if (!link) {
2446 res = -EINVAL;
2447 goto out;
2448 }
2449
2450 if (attrs[TIPC_NLA_LINK_PROP]) {
2451 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2452
2453 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2454 if (err) {
2455 res = err;
2456 goto out;
2457 }
2458
2459 if (props[TIPC_NLA_PROP_TOL]) {
2460 u32 tol;
2461
2462 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2463 tipc_link_set_tolerance(link, tol, &xmitq);
2464 }
2465 if (props[TIPC_NLA_PROP_PRIO]) {
2466 u32 prio;
2467
2468 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2469 tipc_link_set_prio(link, prio, &xmitq);
2470 }
2471 if (props[TIPC_NLA_PROP_WIN]) {
2472 u32 max_win;
2473
2474 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2475 tipc_link_set_queue_limits(link,
2476 tipc_link_min_win(link),
2477 max_win);
2478 }
2479 }
2480
2481 out:
2482 tipc_node_read_unlock(node);
2483 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2484 NULL);
2485 return res;
2486 }
2487
2488 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2489 {
2490 struct net *net = genl_info_net(info);
2491 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2492 struct tipc_nl_msg msg;
2493 char *name;
2494 int err;
2495
2496 msg.portid = info->snd_portid;
2497 msg.seq = info->snd_seq;
2498
2499 if (!info->attrs[TIPC_NLA_LINK])
2500 return -EINVAL;
2501
2502 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2503 info->attrs[TIPC_NLA_LINK],
2504 tipc_nl_link_policy, info->extack);
2505 if (err)
2506 return err;
2507
2508 if (!attrs[TIPC_NLA_LINK_NAME])
2509 return -EINVAL;
2510
2511 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2512
2513 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2514 if (!msg.skb)
2515 return -ENOMEM;
2516
2517 if (strcmp(name, tipc_bclink_name) == 0) {
2518 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2519 if (err)
2520 goto err_free;
2521 } else {
2522 int bearer_id;
2523 struct tipc_node *node;
2524 struct tipc_link *link;
2525
2526 node = tipc_node_find_by_name(net, name, &bearer_id);
2527 if (!node) {
2528 err = -EINVAL;
2529 goto err_free;
2530 }
2531
2532 tipc_node_read_lock(node);
2533 link = node->links[bearer_id].link;
2534 if (!link) {
2535 tipc_node_read_unlock(node);
2536 err = -EINVAL;
2537 goto err_free;
2538 }
2539
2540 err = __tipc_nl_add_link(net, &msg, link, 0);
2541 tipc_node_read_unlock(node);
2542 if (err)
2543 goto err_free;
2544 }
2545
2546 return genlmsg_reply(msg.skb, info);
2547
2548 err_free:
2549 nlmsg_free(msg.skb);
2550 return err;
2551 }
2552
2553 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2554 {
2555 int err;
2556 char *link_name;
2557 unsigned int bearer_id;
2558 struct tipc_link *link;
2559 struct tipc_node *node;
2560 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2561 struct net *net = sock_net(skb->sk);
2562 struct tipc_net *tn = tipc_net(net);
2563 struct tipc_link_entry *le;
2564
2565 if (!info->attrs[TIPC_NLA_LINK])
2566 return -EINVAL;
2567
2568 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2569 info->attrs[TIPC_NLA_LINK],
2570 tipc_nl_link_policy, info->extack);
2571 if (err)
2572 return err;
2573
2574 if (!attrs[TIPC_NLA_LINK_NAME])
2575 return -EINVAL;
2576
2577 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2578
2579 err = -EINVAL;
2580 if (!strcmp(link_name, tipc_bclink_name)) {
2581 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2582 if (err)
2583 return err;
2584 return 0;
2585 } else if (strstr(link_name, tipc_bclink_name)) {
2586 rcu_read_lock();
2587 list_for_each_entry_rcu(node, &tn->node_list, list) {
2588 tipc_node_read_lock(node);
2589 link = node->bc_entry.link;
2590 if (link && !strcmp(link_name, tipc_link_name(link))) {
2591 err = tipc_bclink_reset_stats(net, link);
2592 tipc_node_read_unlock(node);
2593 break;
2594 }
2595 tipc_node_read_unlock(node);
2596 }
2597 rcu_read_unlock();
2598 return err;
2599 }
2600
2601 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2602 if (!node)
2603 return -EINVAL;
2604
2605 le = &node->links[bearer_id];
2606 tipc_node_read_lock(node);
2607 spin_lock_bh(&le->lock);
2608 link = node->links[bearer_id].link;
2609 if (!link) {
2610 spin_unlock_bh(&le->lock);
2611 tipc_node_read_unlock(node);
2612 return -EINVAL;
2613 }
2614 tipc_link_reset_stats(link);
2615 spin_unlock_bh(&le->lock);
2616 tipc_node_read_unlock(node);
2617 return 0;
2618 }
2619
2620
2621 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2622 struct tipc_node *node, u32 *prev_link,
2623 bool bc_link)
2624 {
2625 u32 i;
2626 int err;
2627
2628 for (i = *prev_link; i < MAX_BEARERS; i++) {
2629 *prev_link = i;
2630
2631 if (!node->links[i].link)
2632 continue;
2633
2634 err = __tipc_nl_add_link(net, msg,
2635 node->links[i].link, NLM_F_MULTI);
2636 if (err)
2637 return err;
2638 }
2639
2640 if (bc_link) {
2641 *prev_link = i;
2642 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2643 if (err)
2644 return err;
2645 }
2646
2647 *prev_link = 0;
2648
2649 return 0;
2650 }
2651
2652 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2653 {
2654 struct net *net = sock_net(skb->sk);
2655 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2656 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2657 struct tipc_net *tn = net_generic(net, tipc_net_id);
2658 struct tipc_node *node;
2659 struct tipc_nl_msg msg;
2660 u32 prev_node = cb->args[0];
2661 u32 prev_link = cb->args[1];
2662 int done = cb->args[2];
2663 bool bc_link = cb->args[3];
2664 int err;
2665
2666 if (done)
2667 return 0;
2668
2669 if (!prev_node) {
2670
2671 if (attrs && attrs[TIPC_NLA_LINK]) {
2672 err = nla_parse_nested_deprecated(link,
2673 TIPC_NLA_LINK_MAX,
2674 attrs[TIPC_NLA_LINK],
2675 tipc_nl_link_policy,
2676 NULL);
2677 if (unlikely(err))
2678 return err;
2679 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2680 return -EINVAL;
2681 bc_link = true;
2682 }
2683 }
2684
2685 msg.skb = skb;
2686 msg.portid = NETLINK_CB(cb->skb).portid;
2687 msg.seq = cb->nlh->nlmsg_seq;
2688
2689 rcu_read_lock();
2690 if (prev_node) {
2691 node = tipc_node_find(net, prev_node);
2692 if (!node) {
2693
2694
2695
2696
2697
2698
2699 cb->prev_seq = 1;
2700 goto out;
2701 }
2702 tipc_node_put(node);
2703
2704 list_for_each_entry_continue_rcu(node, &tn->node_list,
2705 list) {
2706 tipc_node_read_lock(node);
2707 err = __tipc_nl_add_node_links(net, &msg, node,
2708 &prev_link, bc_link);
2709 tipc_node_read_unlock(node);
2710 if (err)
2711 goto out;
2712
2713 prev_node = node->addr;
2714 }
2715 } else {
2716 err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2717 if (err)
2718 goto out;
2719
2720 list_for_each_entry_rcu(node, &tn->node_list, list) {
2721 tipc_node_read_lock(node);
2722 err = __tipc_nl_add_node_links(net, &msg, node,
2723 &prev_link, bc_link);
2724 tipc_node_read_unlock(node);
2725 if (err)
2726 goto out;
2727
2728 prev_node = node->addr;
2729 }
2730 }
2731 done = 1;
2732 out:
2733 rcu_read_unlock();
2734
2735 cb->args[0] = prev_node;
2736 cb->args[1] = prev_link;
2737 cb->args[2] = done;
2738 cb->args[3] = bc_link;
2739
2740 return skb->len;
2741 }
2742
2743 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2744 {
2745 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2746 struct net *net = sock_net(skb->sk);
2747 int err;
2748
2749 if (!info->attrs[TIPC_NLA_MON])
2750 return -EINVAL;
2751
2752 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2753 info->attrs[TIPC_NLA_MON],
2754 tipc_nl_monitor_policy,
2755 info->extack);
2756 if (err)
2757 return err;
2758
2759 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2760 u32 val;
2761
2762 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2763 err = tipc_nl_monitor_set_threshold(net, val);
2764 if (err)
2765 return err;
2766 }
2767
2768 return 0;
2769 }
2770
2771 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2772 {
2773 struct nlattr *attrs;
2774 void *hdr;
2775 u32 val;
2776
2777 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2778 0, TIPC_NL_MON_GET);
2779 if (!hdr)
2780 return -EMSGSIZE;
2781
2782 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2783 if (!attrs)
2784 goto msg_full;
2785
2786 val = tipc_nl_monitor_get_threshold(net);
2787
2788 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2789 goto attr_msg_full;
2790
2791 nla_nest_end(msg->skb, attrs);
2792 genlmsg_end(msg->skb, hdr);
2793
2794 return 0;
2795
2796 attr_msg_full:
2797 nla_nest_cancel(msg->skb, attrs);
2798 msg_full:
2799 genlmsg_cancel(msg->skb, hdr);
2800
2801 return -EMSGSIZE;
2802 }
2803
2804 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2805 {
2806 struct net *net = sock_net(skb->sk);
2807 struct tipc_nl_msg msg;
2808 int err;
2809
2810 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2811 if (!msg.skb)
2812 return -ENOMEM;
2813 msg.portid = info->snd_portid;
2814 msg.seq = info->snd_seq;
2815
2816 err = __tipc_nl_add_monitor_prop(net, &msg);
2817 if (err) {
2818 nlmsg_free(msg.skb);
2819 return err;
2820 }
2821
2822 return genlmsg_reply(msg.skb, info);
2823 }
2824
2825 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2826 {
2827 struct net *net = sock_net(skb->sk);
2828 u32 prev_bearer = cb->args[0];
2829 struct tipc_nl_msg msg;
2830 int bearer_id;
2831 int err;
2832
2833 if (prev_bearer == MAX_BEARERS)
2834 return 0;
2835
2836 msg.skb = skb;
2837 msg.portid = NETLINK_CB(cb->skb).portid;
2838 msg.seq = cb->nlh->nlmsg_seq;
2839
2840 rtnl_lock();
2841 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2842 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2843 if (err)
2844 break;
2845 }
2846 rtnl_unlock();
2847 cb->args[0] = bearer_id;
2848
2849 return skb->len;
2850 }
2851
2852 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2853 struct netlink_callback *cb)
2854 {
2855 struct net *net = sock_net(skb->sk);
2856 u32 prev_node = cb->args[1];
2857 u32 bearer_id = cb->args[2];
2858 int done = cb->args[0];
2859 struct tipc_nl_msg msg;
2860 int err;
2861
2862 if (!prev_node) {
2863 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2864 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2865
2866 if (!attrs[TIPC_NLA_MON])
2867 return -EINVAL;
2868
2869 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2870 attrs[TIPC_NLA_MON],
2871 tipc_nl_monitor_policy,
2872 NULL);
2873 if (err)
2874 return err;
2875
2876 if (!mon[TIPC_NLA_MON_REF])
2877 return -EINVAL;
2878
2879 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2880
2881 if (bearer_id >= MAX_BEARERS)
2882 return -EINVAL;
2883 }
2884
2885 if (done)
2886 return 0;
2887
2888 msg.skb = skb;
2889 msg.portid = NETLINK_CB(cb->skb).portid;
2890 msg.seq = cb->nlh->nlmsg_seq;
2891
2892 rtnl_lock();
2893 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2894 if (!err)
2895 done = 1;
2896
2897 rtnl_unlock();
2898 cb->args[0] = done;
2899 cb->args[1] = prev_node;
2900 cb->args[2] = bearer_id;
2901
2902 return skb->len;
2903 }
2904
2905 #ifdef CONFIG_TIPC_CRYPTO
2906 static int tipc_nl_retrieve_key(struct nlattr **attrs,
2907 struct tipc_aead_key **pkey)
2908 {
2909 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2910 struct tipc_aead_key *key;
2911
2912 if (!attr)
2913 return -ENODATA;
2914
2915 if (nla_len(attr) < sizeof(*key))
2916 return -EINVAL;
2917 key = (struct tipc_aead_key *)nla_data(attr);
2918 if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
2919 nla_len(attr) < tipc_aead_key_size(key))
2920 return -EINVAL;
2921
2922 *pkey = key;
2923 return 0;
2924 }
2925
2926 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2927 {
2928 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2929
2930 if (!attr)
2931 return -ENODATA;
2932
2933 if (nla_len(attr) < TIPC_NODEID_LEN)
2934 return -EINVAL;
2935
2936 *node_id = (u8 *)nla_data(attr);
2937 return 0;
2938 }
2939
2940 static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
2941 {
2942 struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
2943
2944 if (!attr)
2945 return -ENODATA;
2946
2947 *intv = nla_get_u32(attr);
2948 return 0;
2949 }
2950
2951 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2952 {
2953 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2954 struct net *net = sock_net(skb->sk);
2955 struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
2956 struct tipc_node *n = NULL;
2957 struct tipc_aead_key *ukey;
2958 bool rekeying = true, master_key = false;
2959 u8 *id, *own_id, mode;
2960 u32 intv = 0;
2961 int rc = 0;
2962
2963 if (!info->attrs[TIPC_NLA_NODE])
2964 return -EINVAL;
2965
2966 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2967 info->attrs[TIPC_NLA_NODE],
2968 tipc_nl_node_policy, info->extack);
2969 if (rc)
2970 return rc;
2971
2972 own_id = tipc_own_id(net);
2973 if (!own_id) {
2974 GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
2975 return -EPERM;
2976 }
2977
2978 rc = tipc_nl_retrieve_rekeying(attrs, &intv);
2979 if (rc == -ENODATA)
2980 rekeying = false;
2981
2982 rc = tipc_nl_retrieve_key(attrs, &ukey);
2983 if (rc == -ENODATA && rekeying)
2984 goto rekeying;
2985 else if (rc)
2986 return rc;
2987
2988 rc = tipc_aead_key_validate(ukey, info);
2989 if (rc)
2990 return rc;
2991
2992 rc = tipc_nl_retrieve_nodeid(attrs, &id);
2993 switch (rc) {
2994 case -ENODATA:
2995 mode = CLUSTER_KEY;
2996 master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
2997 break;
2998 case 0:
2999 mode = PER_NODE_KEY;
3000 if (memcmp(id, own_id, NODE_ID_LEN)) {
3001 n = tipc_node_find_by_id(net, id) ?:
3002 tipc_node_create(net, 0, id, 0xffffu, 0, true);
3003 if (unlikely(!n))
3004 return -ENOMEM;
3005 c = n->crypto_rx;
3006 }
3007 break;
3008 default:
3009 return rc;
3010 }
3011
3012
3013 rc = tipc_crypto_key_init(c, ukey, mode, master_key);
3014 if (n)
3015 tipc_node_put(n);
3016
3017 if (unlikely(rc < 0)) {
3018 GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
3019 return rc;
3020 } else if (c == tx) {
3021
3022 if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
3023 GENL_SET_ERR_MSG(info, "failed to replicate new key");
3024 rekeying:
3025
3026 tipc_crypto_rekeying_sched(tx, rekeying, intv);
3027 }
3028
3029 return 0;
3030 }
3031
3032 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
3033 {
3034 int err;
3035
3036 rtnl_lock();
3037 err = __tipc_nl_node_set_key(skb, info);
3038 rtnl_unlock();
3039
3040 return err;
3041 }
3042
3043 static int __tipc_nl_node_flush_key(struct sk_buff *skb,
3044 struct genl_info *info)
3045 {
3046 struct net *net = sock_net(skb->sk);
3047 struct tipc_net *tn = tipc_net(net);
3048 struct tipc_node *n;
3049
3050 tipc_crypto_key_flush(tn->crypto_tx);
3051 rcu_read_lock();
3052 list_for_each_entry_rcu(n, &tn->node_list, list)
3053 tipc_crypto_key_flush(n->crypto_rx);
3054 rcu_read_unlock();
3055
3056 return 0;
3057 }
3058
3059 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
3060 {
3061 int err;
3062
3063 rtnl_lock();
3064 err = __tipc_nl_node_flush_key(skb, info);
3065 rtnl_unlock();
3066
3067 return err;
3068 }
3069 #endif
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079 int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
3080 {
3081 int i = 0;
3082 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
3083
3084 if (!n) {
3085 i += scnprintf(buf, sz, "node data: (null)\n");
3086 return i;
3087 }
3088
3089 i += scnprintf(buf, sz, "node data: %x", n->addr);
3090 i += scnprintf(buf + i, sz - i, " %x", n->state);
3091 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
3092 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
3093 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
3094 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
3095 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
3096 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
3097 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
3098 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
3099 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
3100
3101 if (!more)
3102 return i;
3103
3104 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
3105 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
3106 i += scnprintf(buf + i, sz - i, " media: ");
3107 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
3108 i += scnprintf(buf + i, sz - i, "\n");
3109 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
3110 i += scnprintf(buf + i, sz - i, " inputq: ");
3111 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3112
3113 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3114 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3115 i += scnprintf(buf + i, sz - i, " media: ");
3116 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3117 i += scnprintf(buf + i, sz - i, "\n");
3118 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3119 i += scnprintf(buf + i, sz - i, " inputq: ");
3120 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3121
3122 i += scnprintf(buf + i, sz - i, "bclink:\n ");
3123 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3124
3125 return i;
3126 }
3127
3128 void tipc_node_pre_cleanup_net(struct net *exit_net)
3129 {
3130 struct tipc_node *n;
3131 struct tipc_net *tn;
3132 struct net *tmp;
3133
3134 rcu_read_lock();
3135 for_each_net_rcu(tmp) {
3136 if (tmp == exit_net)
3137 continue;
3138 tn = tipc_net(tmp);
3139 if (!tn)
3140 continue;
3141 spin_lock_bh(&tn->node_list_lock);
3142 list_for_each_entry_rcu(n, &tn->node_list, list) {
3143 if (!n->peer_net)
3144 continue;
3145 if (n->peer_net != exit_net)
3146 continue;
3147 tipc_node_write_lock(n);
3148 n->peer_net = NULL;
3149 n->peer_hash_mix = 0;
3150 tipc_node_write_unlock_fast(n);
3151 break;
3152 }
3153 spin_unlock_bh(&tn->node_list_lock);
3154 }
3155 rcu_read_unlock();
3156 }