Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * net/tipc/monitor.c
0003  *
0004  * Copyright (c) 2016, Ericsson AB
0005  * All rights reserved.
0006  *
0007  * Redistribution and use in source and binary forms, with or without
0008  * modification, are permitted provided that the following conditions are met:
0009  *
0010  * 1. Redistributions of source code must retain the above copyright
0011  *    notice, this list of conditions and the following disclaimer.
0012  * 2. Redistributions in binary form must reproduce the above copyright
0013  *    notice, this list of conditions and the following disclaimer in the
0014  *    documentation and/or other materials provided with the distribution.
0015  * 3. Neither the names of the copyright holders nor the names of its
0016  *    contributors may be used to endorse or promote products derived from
0017  *    this software without specific prior written permission.
0018  *
0019  * Alternatively, this software may be distributed under the terms of the
0020  * GNU General Public License ("GPL") version 2 as published by the Free
0021  * Software Foundation.
0022  *
0023  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0024  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0025  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0026  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
0027  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0028  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0029  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0030  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0031  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0032  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
0033  * POSSIBILITY OF SUCH DAMAGE.
0034  */
0035 
0036 #include <net/genetlink.h>
0037 #include "core.h"
0038 #include "addr.h"
0039 #include "monitor.h"
0040 #include "bearer.h"
0041 
0042 #define MAX_MON_DOMAIN       64
0043 #define MON_TIMEOUT          120000
0044 #define MAX_PEER_DOWN_EVENTS 4
0045 
0046 /* struct tipc_mon_domain: domain record to be transferred between peers
0047  * @len: actual size of domain record
0048  * @gen: current generation of sender's domain
0049  * @ack_gen: most recent generation of self's domain acked by peer
0050  * @member_cnt: number of domain member nodes described in this record
0051  * @up_map: bit map indicating which of the members the sender considers up
0052  * @members: identity of the domain members
0053  */
0054 struct tipc_mon_domain {
0055     u16 len;
0056     u16 gen;
0057     u16 ack_gen;
0058     u16 member_cnt;
0059     u64 up_map;
0060     u32 members[MAX_MON_DOMAIN];
0061 };
0062 
0063 /* struct tipc_peer: state of a peer node and its domain
0064  * @addr: tipc node identity of peer
0065  * @head_map: shows which other nodes currently consider peer 'up'
0066  * @domain: most recent domain record from peer
0067  * @hash: position in hashed lookup list
0068  * @list: position in linked list, in circular ascending order by 'addr'
0069  * @applied: number of reported domain members applied on this monitor list
0070  * @is_up: peer is up as seen from this node
0071  * @is_head: peer is assigned domain head as seen from this node
0072  * @is_local: peer is in local domain and should be continuously monitored
0073  * @down_cnt: - numbers of other peers which have reported this on lost
0074  */
0075 struct tipc_peer {
0076     u32 addr;
0077     struct tipc_mon_domain *domain;
0078     struct hlist_node hash;
0079     struct list_head list;
0080     u8 applied;
0081     u8 down_cnt;
0082     bool is_up;
0083     bool is_head;
0084     bool is_local;
0085 };
0086 
0087 struct tipc_monitor {
0088     struct hlist_head peers[NODE_HTABLE_SIZE];
0089     int peer_cnt;
0090     struct tipc_peer *self;
0091     rwlock_t lock;
0092     struct tipc_mon_domain cache;
0093     u16 list_gen;
0094     u16 dom_gen;
0095     struct net *net;
0096     struct timer_list timer;
0097     unsigned long timer_intv;
0098 };
0099 
0100 static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
0101 {
0102     return tipc_net(net)->monitors[bearer_id];
0103 }
0104 
0105 const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
0106 
0107 static inline u16 mon_cpu_to_le16(u16 val)
0108 {
0109     return (__force __u16)htons(val);
0110 }
0111 
0112 static inline u32 mon_cpu_to_le32(u32 val)
0113 {
0114     return (__force __u32)htonl(val);
0115 }
0116 
0117 static inline u64 mon_cpu_to_le64(u64 val)
0118 {
0119     return (__force __u64)cpu_to_be64(val);
0120 }
0121 
0122 static inline u16 mon_le16_to_cpu(u16 val)
0123 {
0124     return ntohs((__force __be16)val);
0125 }
0126 
0127 static inline u32 mon_le32_to_cpu(u32 val)
0128 {
0129     return ntohl((__force __be32)val);
0130 }
0131 
0132 static inline u64 mon_le64_to_cpu(u64 val)
0133 {
0134     return be64_to_cpu((__force __be64)val);
0135 }
0136 
0137 /* dom_rec_len(): actual length of domain record for transport
0138  */
0139 static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
0140 {
0141     return (offsetof(struct tipc_mon_domain, members)) + (mcnt * sizeof(u32));
0142 }
0143 
0144 /* dom_size() : calculate size of own domain based on number of peers
0145  */
0146 static int dom_size(int peers)
0147 {
0148     int i = 0;
0149 
0150     while ((i * i) < peers)
0151         i++;
0152     return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
0153 }
0154 
0155 static void map_set(u64 *up_map, int i, unsigned int v)
0156 {
0157     *up_map &= ~(1ULL << i);
0158     *up_map |= ((u64)v << i);
0159 }
0160 
0161 static int map_get(u64 up_map, int i)
0162 {
0163     return (up_map & (1ULL << i)) >> i;
0164 }
0165 
0166 static struct tipc_peer *peer_prev(struct tipc_peer *peer)
0167 {
0168     return list_last_entry(&peer->list, struct tipc_peer, list);
0169 }
0170 
0171 static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
0172 {
0173     return list_first_entry(&peer->list, struct tipc_peer, list);
0174 }
0175 
0176 static struct tipc_peer *peer_head(struct tipc_peer *peer)
0177 {
0178     while (!peer->is_head)
0179         peer = peer_prev(peer);
0180     return peer;
0181 }
0182 
0183 static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr)
0184 {
0185     struct tipc_peer *peer;
0186     unsigned int thash = tipc_hashfn(addr);
0187 
0188     hlist_for_each_entry(peer, &mon->peers[thash], hash) {
0189         if (peer->addr == addr)
0190             return peer;
0191     }
0192     return NULL;
0193 }
0194 
0195 static struct tipc_peer *get_self(struct net *net, int bearer_id)
0196 {
0197     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0198 
0199     return mon->self;
0200 }
0201 
0202 static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon)
0203 {
0204     struct tipc_net *tn = tipc_net(net);
0205 
0206     return mon->peer_cnt > tn->mon_threshold;
0207 }
0208 
0209 /* mon_identify_lost_members() : - identify amd mark potentially lost members
0210  */
0211 static void mon_identify_lost_members(struct tipc_peer *peer,
0212                       struct tipc_mon_domain *dom_bef,
0213                       int applied_bef)
0214 {
0215     struct tipc_peer *member = peer;
0216     struct tipc_mon_domain *dom_aft = peer->domain;
0217     int applied_aft = peer->applied;
0218     int i;
0219 
0220     for (i = 0; i < applied_bef; i++) {
0221         member = peer_nxt(member);
0222 
0223         /* Do nothing if self or peer already see member as down */
0224         if (!member->is_up || !map_get(dom_bef->up_map, i))
0225             continue;
0226 
0227         /* Loss of local node must be detected by active probing */
0228         if (member->is_local)
0229             continue;
0230 
0231         /* Start probing if member was removed from applied domain */
0232         if (!applied_aft || (applied_aft < i)) {
0233             member->down_cnt = 1;
0234             continue;
0235         }
0236 
0237         /* Member loss is confirmed if it is still in applied domain */
0238         if (!map_get(dom_aft->up_map, i))
0239             member->down_cnt++;
0240     }
0241 }
0242 
0243 /* mon_apply_domain() : match a peer's domain record against monitor list
0244  */
0245 static void mon_apply_domain(struct tipc_monitor *mon,
0246                  struct tipc_peer *peer)
0247 {
0248     struct tipc_mon_domain *dom = peer->domain;
0249     struct tipc_peer *member;
0250     u32 addr;
0251     int i;
0252 
0253     if (!dom || !peer->is_up)
0254         return;
0255 
0256     /* Scan across domain members and match against monitor list */
0257     peer->applied = 0;
0258     member = peer_nxt(peer);
0259     for (i = 0; i < dom->member_cnt; i++) {
0260         addr = dom->members[i];
0261         if (addr != member->addr)
0262             return;
0263         peer->applied++;
0264         member = peer_nxt(member);
0265     }
0266 }
0267 
0268 /* mon_update_local_domain() : update after peer addition/removal/up/down
0269  */
0270 static void mon_update_local_domain(struct tipc_monitor *mon)
0271 {
0272     struct tipc_peer *self = mon->self;
0273     struct tipc_mon_domain *cache = &mon->cache;
0274     struct tipc_mon_domain *dom = self->domain;
0275     struct tipc_peer *peer = self;
0276     u64 prev_up_map = dom->up_map;
0277     u16 member_cnt, i;
0278     bool diff;
0279 
0280     /* Update local domain size based on current size of cluster */
0281     member_cnt = dom_size(mon->peer_cnt) - 1;
0282     self->applied = member_cnt;
0283 
0284     /* Update native and cached outgoing local domain records */
0285     dom->len = dom_rec_len(dom, member_cnt);
0286     diff = dom->member_cnt != member_cnt;
0287     dom->member_cnt = member_cnt;
0288     for (i = 0; i < member_cnt; i++) {
0289         peer = peer_nxt(peer);
0290         diff |= dom->members[i] != peer->addr;
0291         dom->members[i] = peer->addr;
0292         map_set(&dom->up_map, i, peer->is_up);
0293         cache->members[i] = mon_cpu_to_le32(peer->addr);
0294     }
0295     diff |= dom->up_map != prev_up_map;
0296     if (!diff)
0297         return;
0298     dom->gen = ++mon->dom_gen;
0299     cache->len = mon_cpu_to_le16(dom->len);
0300     cache->gen = mon_cpu_to_le16(dom->gen);
0301     cache->member_cnt = mon_cpu_to_le16(member_cnt);
0302     cache->up_map = mon_cpu_to_le64(dom->up_map);
0303     mon_apply_domain(mon, self);
0304 }
0305 
0306 /* mon_update_neighbors() : update preceding neighbors of added/removed peer
0307  */
0308 static void mon_update_neighbors(struct tipc_monitor *mon,
0309                  struct tipc_peer *peer)
0310 {
0311     int dz, i;
0312 
0313     dz = dom_size(mon->peer_cnt);
0314     for (i = 0; i < dz; i++) {
0315         mon_apply_domain(mon, peer);
0316         peer = peer_prev(peer);
0317     }
0318 }
0319 
0320 /* mon_assign_roles() : reassign peer roles after a network change
0321  * The monitor list is consistent at this stage; i.e., each peer is monitoring
0322  * a set of domain members as matched between domain record and the monitor list
0323  */
0324 static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
0325 {
0326     struct tipc_peer *peer = peer_nxt(head);
0327     struct tipc_peer *self = mon->self;
0328     int i = 0;
0329 
0330     for (; peer != self; peer = peer_nxt(peer)) {
0331         peer->is_local = false;
0332 
0333         /* Update domain member */
0334         if (i++ < head->applied) {
0335             peer->is_head = false;
0336             if (head == self)
0337                 peer->is_local = true;
0338             continue;
0339         }
0340         /* Assign next domain head */
0341         if (!peer->is_up)
0342             continue;
0343         if (peer->is_head)
0344             break;
0345         head = peer;
0346         head->is_head = true;
0347         i = 0;
0348     }
0349     mon->list_gen++;
0350 }
0351 
0352 void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
0353 {
0354     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0355     struct tipc_peer *self;
0356     struct tipc_peer *peer, *prev, *head;
0357 
0358     if (!mon)
0359         return;
0360 
0361     self = get_self(net, bearer_id);
0362     write_lock_bh(&mon->lock);
0363     peer = get_peer(mon, addr);
0364     if (!peer)
0365         goto exit;
0366     prev = peer_prev(peer);
0367     list_del(&peer->list);
0368     hlist_del(&peer->hash);
0369     kfree(peer->domain);
0370     kfree(peer);
0371     mon->peer_cnt--;
0372     head = peer_head(prev);
0373     if (head == self)
0374         mon_update_local_domain(mon);
0375     mon_update_neighbors(mon, prev);
0376 
0377     /* Revert to full-mesh monitoring if we reach threshold */
0378     if (!tipc_mon_is_active(net, mon)) {
0379         list_for_each_entry(peer, &self->list, list) {
0380             kfree(peer->domain);
0381             peer->domain = NULL;
0382             peer->applied = 0;
0383         }
0384     }
0385     mon_assign_roles(mon, head);
0386 exit:
0387     write_unlock_bh(&mon->lock);
0388 }
0389 
0390 static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr,
0391                   struct tipc_peer **peer)
0392 {
0393     struct tipc_peer *self = mon->self;
0394     struct tipc_peer *cur, *prev, *p;
0395 
0396     p = kzalloc(sizeof(*p), GFP_ATOMIC);
0397     *peer = p;
0398     if (!p)
0399         return false;
0400     p->addr = addr;
0401 
0402     /* Add new peer to lookup list */
0403     INIT_LIST_HEAD(&p->list);
0404     hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
0405 
0406     /* Sort new peer into iterator list, in ascending circular order */
0407     prev = self;
0408     list_for_each_entry(cur, &self->list, list) {
0409         if ((addr > prev->addr) && (addr < cur->addr))
0410             break;
0411         if (((addr < cur->addr) || (addr > prev->addr)) &&
0412             (prev->addr > cur->addr))
0413             break;
0414         prev = cur;
0415     }
0416     list_add_tail(&p->list, &cur->list);
0417     mon->peer_cnt++;
0418     mon_update_neighbors(mon, p);
0419     return true;
0420 }
0421 
0422 void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id)
0423 {
0424     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0425     struct tipc_peer *self = get_self(net, bearer_id);
0426     struct tipc_peer *peer, *head;
0427 
0428     write_lock_bh(&mon->lock);
0429     peer = get_peer(mon, addr);
0430     if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
0431         goto exit;
0432     peer->is_up = true;
0433     head = peer_head(peer);
0434     if (head == self)
0435         mon_update_local_domain(mon);
0436     mon_assign_roles(mon, head);
0437 exit:
0438     write_unlock_bh(&mon->lock);
0439 }
0440 
0441 void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
0442 {
0443     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0444     struct tipc_peer *self;
0445     struct tipc_peer *peer, *head;
0446     struct tipc_mon_domain *dom;
0447     int applied;
0448 
0449     if (!mon)
0450         return;
0451 
0452     self = get_self(net, bearer_id);
0453     write_lock_bh(&mon->lock);
0454     peer = get_peer(mon, addr);
0455     if (!peer) {
0456         pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id);
0457         goto exit;
0458     }
0459     applied = peer->applied;
0460     peer->applied = 0;
0461     dom = peer->domain;
0462     peer->domain = NULL;
0463     if (peer->is_head)
0464         mon_identify_lost_members(peer, dom, applied);
0465     kfree(dom);
0466     peer->is_up = false;
0467     peer->is_head = false;
0468     peer->is_local = false;
0469     peer->down_cnt = 0;
0470     head = peer_head(peer);
0471     if (head == self)
0472         mon_update_local_domain(mon);
0473     mon_assign_roles(mon, head);
0474 exit:
0475     write_unlock_bh(&mon->lock);
0476 }
0477 
0478 /* tipc_mon_rcv - process monitor domain event message
0479  */
0480 void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
0481           struct tipc_mon_state *state, int bearer_id)
0482 {
0483     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0484     struct tipc_mon_domain *arrv_dom = data;
0485     struct tipc_mon_domain dom_bef;
0486     struct tipc_mon_domain *dom;
0487     struct tipc_peer *peer;
0488     u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt);
0489     int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
0490     u16 new_gen = mon_le16_to_cpu(arrv_dom->gen);
0491     u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen);
0492     u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len);
0493     bool probing = state->probing;
0494     int i, applied_bef;
0495 
0496     state->probing = false;
0497 
0498     /* Sanity check received domain record */
0499     if (new_member_cnt > MAX_MON_DOMAIN)
0500         return;
0501     if (dlen < dom_rec_len(arrv_dom, 0))
0502         return;
0503     if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
0504         return;
0505     if (dlen < new_dlen || arrv_dlen != new_dlen)
0506         return;
0507 
0508     /* Synch generation numbers with peer if link just came up */
0509     if (!state->synched) {
0510         state->peer_gen = new_gen - 1;
0511         state->acked_gen = acked_gen;
0512         state->synched = true;
0513     }
0514 
0515     if (more(acked_gen, state->acked_gen))
0516         state->acked_gen = acked_gen;
0517 
0518     /* Drop duplicate unless we are waiting for a probe response */
0519     if (!more(new_gen, state->peer_gen) && !probing)
0520         return;
0521 
0522     write_lock_bh(&mon->lock);
0523     peer = get_peer(mon, addr);
0524     if (!peer || !peer->is_up)
0525         goto exit;
0526 
0527     /* Peer is confirmed, stop any ongoing probing */
0528     peer->down_cnt = 0;
0529 
0530     /* Task is done for duplicate record */
0531     if (!more(new_gen, state->peer_gen))
0532         goto exit;
0533 
0534     state->peer_gen = new_gen;
0535 
0536     /* Cache current domain record for later use */
0537     dom_bef.member_cnt = 0;
0538     dom = peer->domain;
0539     if (dom)
0540         memcpy(&dom_bef, dom, dom->len);
0541 
0542     /* Transform and store received domain record */
0543     if (!dom || (dom->len < new_dlen)) {
0544         kfree(dom);
0545         dom = kmalloc(new_dlen, GFP_ATOMIC);
0546         peer->domain = dom;
0547         if (!dom)
0548             goto exit;
0549     }
0550     dom->len = new_dlen;
0551     dom->gen = new_gen;
0552     dom->member_cnt = new_member_cnt;
0553     dom->up_map = mon_le64_to_cpu(arrv_dom->up_map);
0554     for (i = 0; i < new_member_cnt; i++)
0555         dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]);
0556 
0557     /* Update peers affected by this domain record */
0558     applied_bef = peer->applied;
0559     mon_apply_domain(mon, peer);
0560     mon_identify_lost_members(peer, &dom_bef, applied_bef);
0561     mon_assign_roles(mon, peer_head(peer));
0562 exit:
0563     write_unlock_bh(&mon->lock);
0564 }
0565 
0566 void tipc_mon_prep(struct net *net, void *data, int *dlen,
0567            struct tipc_mon_state *state, int bearer_id)
0568 {
0569     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0570     struct tipc_mon_domain *dom = data;
0571     u16 gen = mon->dom_gen;
0572     u16 len;
0573 
0574     /* Send invalid record if not active */
0575     if (!tipc_mon_is_active(net, mon)) {
0576         dom->len = 0;
0577         return;
0578     }
0579 
0580     /* Send only a dummy record with ack if peer has acked our last sent */
0581     if (likely(state->acked_gen == gen)) {
0582         len = dom_rec_len(dom, 0);
0583         *dlen = len;
0584         dom->len = mon_cpu_to_le16(len);
0585         dom->gen = mon_cpu_to_le16(gen);
0586         dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
0587         dom->member_cnt = 0;
0588         return;
0589     }
0590     /* Send the full record */
0591     read_lock_bh(&mon->lock);
0592     len = mon_le16_to_cpu(mon->cache.len);
0593     *dlen = len;
0594     memcpy(data, &mon->cache, len);
0595     read_unlock_bh(&mon->lock);
0596     dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
0597 }
0598 
0599 void tipc_mon_get_state(struct net *net, u32 addr,
0600             struct tipc_mon_state *state,
0601             int bearer_id)
0602 {
0603     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0604     struct tipc_peer *peer;
0605 
0606     if (!tipc_mon_is_active(net, mon)) {
0607         state->probing = false;
0608         state->monitoring = true;
0609         return;
0610     }
0611 
0612     /* Used cached state if table has not changed */
0613     if (!state->probing &&
0614         (state->list_gen == mon->list_gen) &&
0615         (state->acked_gen == mon->dom_gen))
0616         return;
0617 
0618     read_lock_bh(&mon->lock);
0619     peer = get_peer(mon, addr);
0620     if (peer) {
0621         state->probing = state->acked_gen != mon->dom_gen;
0622         state->probing |= peer->down_cnt;
0623         state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
0624         state->monitoring = peer->is_local;
0625         state->monitoring |= peer->is_head;
0626         state->list_gen = mon->list_gen;
0627     }
0628     read_unlock_bh(&mon->lock);
0629 }
0630 
0631 static void mon_timeout(struct timer_list *t)
0632 {
0633     struct tipc_monitor *mon = from_timer(mon, t, timer);
0634     struct tipc_peer *self;
0635     int best_member_cnt = dom_size(mon->peer_cnt) - 1;
0636 
0637     write_lock_bh(&mon->lock);
0638     self = mon->self;
0639     if (self && (best_member_cnt != self->applied)) {
0640         mon_update_local_domain(mon);
0641         mon_assign_roles(mon, self);
0642     }
0643     write_unlock_bh(&mon->lock);
0644     mod_timer(&mon->timer, jiffies + mon->timer_intv);
0645 }
0646 
0647 int tipc_mon_create(struct net *net, int bearer_id)
0648 {
0649     struct tipc_net *tn = tipc_net(net);
0650     struct tipc_monitor *mon;
0651     struct tipc_peer *self;
0652     struct tipc_mon_domain *dom;
0653 
0654     if (tn->monitors[bearer_id])
0655         return 0;
0656 
0657     mon = kzalloc(sizeof(*mon), GFP_ATOMIC);
0658     self = kzalloc(sizeof(*self), GFP_ATOMIC);
0659     dom = kzalloc(sizeof(*dom), GFP_ATOMIC);
0660     if (!mon || !self || !dom) {
0661         kfree(mon);
0662         kfree(self);
0663         kfree(dom);
0664         return -ENOMEM;
0665     }
0666     tn->monitors[bearer_id] = mon;
0667     rwlock_init(&mon->lock);
0668     mon->net = net;
0669     mon->peer_cnt = 1;
0670     mon->self = self;
0671     self->domain = dom;
0672     self->addr = tipc_own_addr(net);
0673     self->is_up = true;
0674     self->is_head = true;
0675     INIT_LIST_HEAD(&self->list);
0676     timer_setup(&mon->timer, mon_timeout, 0);
0677     mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
0678     mod_timer(&mon->timer, jiffies + mon->timer_intv);
0679     return 0;
0680 }
0681 
0682 void tipc_mon_delete(struct net *net, int bearer_id)
0683 {
0684     struct tipc_net *tn = tipc_net(net);
0685     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0686     struct tipc_peer *self;
0687     struct tipc_peer *peer, *tmp;
0688 
0689     if (!mon)
0690         return;
0691 
0692     self = get_self(net, bearer_id);
0693     write_lock_bh(&mon->lock);
0694     tn->monitors[bearer_id] = NULL;
0695     list_for_each_entry_safe(peer, tmp, &self->list, list) {
0696         list_del(&peer->list);
0697         hlist_del(&peer->hash);
0698         kfree(peer->domain);
0699         kfree(peer);
0700     }
0701     mon->self = NULL;
0702     write_unlock_bh(&mon->lock);
0703     del_timer_sync(&mon->timer);
0704     kfree(self->domain);
0705     kfree(self);
0706     kfree(mon);
0707 }
0708 
0709 void tipc_mon_reinit_self(struct net *net)
0710 {
0711     struct tipc_monitor *mon;
0712     int bearer_id;
0713 
0714     for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
0715         mon = tipc_monitor(net, bearer_id);
0716         if (!mon)
0717             continue;
0718         write_lock_bh(&mon->lock);
0719         mon->self->addr = tipc_own_addr(net);
0720         write_unlock_bh(&mon->lock);
0721     }
0722 }
0723 
0724 int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
0725 {
0726     struct tipc_net *tn = tipc_net(net);
0727 
0728     if (cluster_size > TIPC_CLUSTER_SIZE)
0729         return -EINVAL;
0730 
0731     tn->mon_threshold = cluster_size;
0732 
0733     return 0;
0734 }
0735 
0736 int tipc_nl_monitor_get_threshold(struct net *net)
0737 {
0738     struct tipc_net *tn = tipc_net(net);
0739 
0740     return tn->mon_threshold;
0741 }
0742 
0743 static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
0744                       struct tipc_nl_msg *msg)
0745 {
0746     struct tipc_mon_domain *dom = peer->domain;
0747     struct nlattr *attrs;
0748     void *hdr;
0749 
0750     hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
0751               NLM_F_MULTI, TIPC_NL_MON_PEER_GET);
0752     if (!hdr)
0753         return -EMSGSIZE;
0754 
0755     attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON_PEER);
0756     if (!attrs)
0757         goto msg_full;
0758 
0759     if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
0760         goto attr_msg_full;
0761     if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
0762         goto attr_msg_full;
0763 
0764     if (peer->is_up)
0765         if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP))
0766             goto attr_msg_full;
0767     if (peer->is_local)
0768         if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL))
0769             goto attr_msg_full;
0770     if (peer->is_head)
0771         if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD))
0772             goto attr_msg_full;
0773 
0774     if (dom) {
0775         if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen))
0776             goto attr_msg_full;
0777         if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP,
0778                       dom->up_map, TIPC_NLA_MON_PEER_PAD))
0779             goto attr_msg_full;
0780         if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS,
0781                 dom->member_cnt * sizeof(u32), &dom->members))
0782             goto attr_msg_full;
0783     }
0784 
0785     nla_nest_end(msg->skb, attrs);
0786     genlmsg_end(msg->skb, hdr);
0787     return 0;
0788 
0789 attr_msg_full:
0790     nla_nest_cancel(msg->skb, attrs);
0791 msg_full:
0792     genlmsg_cancel(msg->skb, hdr);
0793 
0794     return -EMSGSIZE;
0795 }
0796 
0797 int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
0798                  u32 bearer_id, u32 *prev_node)
0799 {
0800     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0801     struct tipc_peer *peer;
0802 
0803     if (!mon)
0804         return -EINVAL;
0805 
0806     read_lock_bh(&mon->lock);
0807     peer = mon->self;
0808     do {
0809         if (*prev_node) {
0810             if (peer->addr == *prev_node)
0811                 *prev_node = 0;
0812             else
0813                 continue;
0814         }
0815         if (__tipc_nl_add_monitor_peer(peer, msg)) {
0816             *prev_node = peer->addr;
0817             read_unlock_bh(&mon->lock);
0818             return -EMSGSIZE;
0819         }
0820     } while ((peer = peer_nxt(peer)) != mon->self);
0821     read_unlock_bh(&mon->lock);
0822 
0823     return 0;
0824 }
0825 
0826 int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
0827               u32 bearer_id)
0828 {
0829     struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
0830     char bearer_name[TIPC_MAX_BEARER_NAME];
0831     struct nlattr *attrs;
0832     void *hdr;
0833     int ret;
0834 
0835     ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
0836     if (ret || !mon)
0837         return 0;
0838 
0839     hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
0840               NLM_F_MULTI, TIPC_NL_MON_GET);
0841     if (!hdr)
0842         return -EMSGSIZE;
0843 
0844     attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
0845     if (!attrs)
0846         goto msg_full;
0847 
0848     read_lock_bh(&mon->lock);
0849     if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id))
0850         goto attr_msg_full;
0851     if (tipc_mon_is_active(net, mon))
0852         if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE))
0853             goto attr_msg_full;
0854     if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name))
0855         goto attr_msg_full;
0856     if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt))
0857         goto attr_msg_full;
0858     if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen))
0859         goto attr_msg_full;
0860 
0861     read_unlock_bh(&mon->lock);
0862     nla_nest_end(msg->skb, attrs);
0863     genlmsg_end(msg->skb, hdr);
0864 
0865     return 0;
0866 
0867 attr_msg_full:
0868     read_unlock_bh(&mon->lock);
0869     nla_nest_cancel(msg->skb, attrs);
0870 msg_full:
0871     genlmsg_cancel(msg->skb, hdr);
0872 
0873     return -EMSGSIZE;
0874 }