0001
0002
0003
0004
0005
0006
0007 #include "bridge_loop_avoidance.h"
0008 #include "main.h"
0009
0010 #include <linux/atomic.h>
0011 #include <linux/byteorder/generic.h>
0012 #include <linux/compiler.h>
0013 #include <linux/container_of.h>
0014 #include <linux/crc16.h>
0015 #include <linux/errno.h>
0016 #include <linux/etherdevice.h>
0017 #include <linux/gfp.h>
0018 #include <linux/if_arp.h>
0019 #include <linux/if_ether.h>
0020 #include <linux/if_vlan.h>
0021 #include <linux/jhash.h>
0022 #include <linux/jiffies.h>
0023 #include <linux/kernel.h>
0024 #include <linux/kref.h>
0025 #include <linux/list.h>
0026 #include <linux/lockdep.h>
0027 #include <linux/netdevice.h>
0028 #include <linux/netlink.h>
0029 #include <linux/rculist.h>
0030 #include <linux/rcupdate.h>
0031 #include <linux/skbuff.h>
0032 #include <linux/slab.h>
0033 #include <linux/spinlock.h>
0034 #include <linux/stddef.h>
0035 #include <linux/string.h>
0036 #include <linux/workqueue.h>
0037 #include <net/arp.h>
0038 #include <net/genetlink.h>
0039 #include <net/netlink.h>
0040 #include <net/sock.h>
0041 #include <uapi/linux/batadv_packet.h>
0042 #include <uapi/linux/batman_adv.h>
0043
0044 #include "hard-interface.h"
0045 #include "hash.h"
0046 #include "log.h"
0047 #include "netlink.h"
0048 #include "originator.h"
0049 #include "soft-interface.h"
0050 #include "translation-table.h"
0051
0052 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
0053
0054 static void batadv_bla_periodic_work(struct work_struct *work);
0055 static void
0056 batadv_bla_send_announce(struct batadv_priv *bat_priv,
0057 struct batadv_bla_backbone_gw *backbone_gw);
0058
0059
0060
0061
0062
0063
0064
0065
0066 static inline u32 batadv_choose_claim(const void *data, u32 size)
0067 {
0068 const struct batadv_bla_claim *claim = data;
0069 u32 hash = 0;
0070
0071 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
0072 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
0073
0074 return hash % size;
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
0085 {
0086 const struct batadv_bla_backbone_gw *gw;
0087 u32 hash = 0;
0088
0089 gw = data;
0090 hash = jhash(&gw->orig, sizeof(gw->orig), hash);
0091 hash = jhash(&gw->vid, sizeof(gw->vid), hash);
0092
0093 return hash % size;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103 static bool batadv_compare_backbone_gw(const struct hlist_node *node,
0104 const void *data2)
0105 {
0106 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
0107 hash_entry);
0108 const struct batadv_bla_backbone_gw *gw1 = data1;
0109 const struct batadv_bla_backbone_gw *gw2 = data2;
0110
0111 if (!batadv_compare_eth(gw1->orig, gw2->orig))
0112 return false;
0113
0114 if (gw1->vid != gw2->vid)
0115 return false;
0116
0117 return true;
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127 static bool batadv_compare_claim(const struct hlist_node *node,
0128 const void *data2)
0129 {
0130 const void *data1 = container_of(node, struct batadv_bla_claim,
0131 hash_entry);
0132 const struct batadv_bla_claim *cl1 = data1;
0133 const struct batadv_bla_claim *cl2 = data2;
0134
0135 if (!batadv_compare_eth(cl1->addr, cl2->addr))
0136 return false;
0137
0138 if (cl1->vid != cl2->vid)
0139 return false;
0140
0141 return true;
0142 }
0143
0144
0145
0146
0147
0148
0149 static void batadv_backbone_gw_release(struct kref *ref)
0150 {
0151 struct batadv_bla_backbone_gw *backbone_gw;
0152
0153 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
0154 refcount);
0155
0156 kfree_rcu(backbone_gw, rcu);
0157 }
0158
0159
0160
0161
0162
0163
0164 static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
0165 {
0166 if (!backbone_gw)
0167 return;
0168
0169 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
0170 }
0171
0172
0173
0174
0175
0176
0177 static void batadv_claim_release(struct kref *ref)
0178 {
0179 struct batadv_bla_claim *claim;
0180 struct batadv_bla_backbone_gw *old_backbone_gw;
0181
0182 claim = container_of(ref, struct batadv_bla_claim, refcount);
0183
0184 spin_lock_bh(&claim->backbone_lock);
0185 old_backbone_gw = claim->backbone_gw;
0186 claim->backbone_gw = NULL;
0187 spin_unlock_bh(&claim->backbone_lock);
0188
0189 spin_lock_bh(&old_backbone_gw->crc_lock);
0190 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
0191 spin_unlock_bh(&old_backbone_gw->crc_lock);
0192
0193 batadv_backbone_gw_put(old_backbone_gw);
0194
0195 kfree_rcu(claim, rcu);
0196 }
0197
0198
0199
0200
0201
0202 static void batadv_claim_put(struct batadv_bla_claim *claim)
0203 {
0204 if (!claim)
0205 return;
0206
0207 kref_put(&claim->refcount, batadv_claim_release);
0208 }
0209
0210
0211
0212
0213
0214
0215
0216
0217 static struct batadv_bla_claim *
0218 batadv_claim_hash_find(struct batadv_priv *bat_priv,
0219 struct batadv_bla_claim *data)
0220 {
0221 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
0222 struct hlist_head *head;
0223 struct batadv_bla_claim *claim;
0224 struct batadv_bla_claim *claim_tmp = NULL;
0225 int index;
0226
0227 if (!hash)
0228 return NULL;
0229
0230 index = batadv_choose_claim(data, hash->size);
0231 head = &hash->table[index];
0232
0233 rcu_read_lock();
0234 hlist_for_each_entry_rcu(claim, head, hash_entry) {
0235 if (!batadv_compare_claim(&claim->hash_entry, data))
0236 continue;
0237
0238 if (!kref_get_unless_zero(&claim->refcount))
0239 continue;
0240
0241 claim_tmp = claim;
0242 break;
0243 }
0244 rcu_read_unlock();
0245
0246 return claim_tmp;
0247 }
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 static struct batadv_bla_backbone_gw *
0258 batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
0259 unsigned short vid)
0260 {
0261 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
0262 struct hlist_head *head;
0263 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
0264 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
0265 int index;
0266
0267 if (!hash)
0268 return NULL;
0269
0270 ether_addr_copy(search_entry.orig, addr);
0271 search_entry.vid = vid;
0272
0273 index = batadv_choose_backbone_gw(&search_entry, hash->size);
0274 head = &hash->table[index];
0275
0276 rcu_read_lock();
0277 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
0278 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
0279 &search_entry))
0280 continue;
0281
0282 if (!kref_get_unless_zero(&backbone_gw->refcount))
0283 continue;
0284
0285 backbone_gw_tmp = backbone_gw;
0286 break;
0287 }
0288 rcu_read_unlock();
0289
0290 return backbone_gw_tmp;
0291 }
0292
0293
0294
0295
0296
0297 static void
0298 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
0299 {
0300 struct batadv_hashtable *hash;
0301 struct hlist_node *node_tmp;
0302 struct hlist_head *head;
0303 struct batadv_bla_claim *claim;
0304 int i;
0305 spinlock_t *list_lock;
0306
0307 hash = backbone_gw->bat_priv->bla.claim_hash;
0308 if (!hash)
0309 return;
0310
0311 for (i = 0; i < hash->size; i++) {
0312 head = &hash->table[i];
0313 list_lock = &hash->list_locks[i];
0314
0315 spin_lock_bh(list_lock);
0316 hlist_for_each_entry_safe(claim, node_tmp,
0317 head, hash_entry) {
0318 if (claim->backbone_gw != backbone_gw)
0319 continue;
0320
0321 batadv_claim_put(claim);
0322 hlist_del_rcu(&claim->hash_entry);
0323 }
0324 spin_unlock_bh(list_lock);
0325 }
0326
0327
0328 spin_lock_bh(&backbone_gw->crc_lock);
0329 backbone_gw->crc = BATADV_BLA_CRC_INIT;
0330 spin_unlock_bh(&backbone_gw->crc_lock);
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
0341 unsigned short vid, int claimtype)
0342 {
0343 struct sk_buff *skb;
0344 struct ethhdr *ethhdr;
0345 struct batadv_hard_iface *primary_if;
0346 struct net_device *soft_iface;
0347 u8 *hw_src;
0348 struct batadv_bla_claim_dst local_claim_dest;
0349 __be32 zeroip = 0;
0350
0351 primary_if = batadv_primary_if_get_selected(bat_priv);
0352 if (!primary_if)
0353 return;
0354
0355 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
0356 sizeof(local_claim_dest));
0357 local_claim_dest.type = claimtype;
0358
0359 soft_iface = primary_if->soft_iface;
0360
0361 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
0362
0363 zeroip,
0364 primary_if->soft_iface,
0365
0366 zeroip,
0367
0368 NULL,
0369
0370 primary_if->net_dev->dev_addr,
0371
0372
0373
0374
0375 (u8 *)&local_claim_dest);
0376
0377 if (!skb)
0378 goto out;
0379
0380 ethhdr = (struct ethhdr *)skb->data;
0381 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
0382
0383
0384 switch (claimtype) {
0385 case BATADV_CLAIM_TYPE_CLAIM:
0386
0387
0388
0389 ether_addr_copy(ethhdr->h_source, mac);
0390 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0391 "%s(): CLAIM %pM on vid %d\n", __func__, mac,
0392 batadv_print_vid(vid));
0393 break;
0394 case BATADV_CLAIM_TYPE_UNCLAIM:
0395
0396
0397
0398 ether_addr_copy(hw_src, mac);
0399 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0400 "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
0401 batadv_print_vid(vid));
0402 break;
0403 case BATADV_CLAIM_TYPE_ANNOUNCE:
0404
0405
0406
0407 ether_addr_copy(hw_src, mac);
0408 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0409 "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
0410 ethhdr->h_source, batadv_print_vid(vid));
0411 break;
0412 case BATADV_CLAIM_TYPE_REQUEST:
0413
0414
0415
0416
0417 ether_addr_copy(hw_src, mac);
0418 ether_addr_copy(ethhdr->h_dest, mac);
0419 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0420 "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
0421 ethhdr->h_source, ethhdr->h_dest,
0422 batadv_print_vid(vid));
0423 break;
0424 case BATADV_CLAIM_TYPE_LOOPDETECT:
0425 ether_addr_copy(ethhdr->h_source, mac);
0426 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0427 "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
0428 __func__, ethhdr->h_source, ethhdr->h_dest,
0429 batadv_print_vid(vid));
0430
0431 break;
0432 }
0433
0434 if (vid & BATADV_VLAN_HAS_TAG) {
0435 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
0436 vid & VLAN_VID_MASK);
0437 if (!skb)
0438 goto out;
0439 }
0440
0441 skb_reset_mac_header(skb);
0442 skb->protocol = eth_type_trans(skb, soft_iface);
0443 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
0444 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
0445 skb->len + ETH_HLEN);
0446
0447 netif_rx(skb);
0448 out:
0449 batadv_hardif_put(primary_if);
0450 }
0451
0452
0453
0454
0455
0456
0457
0458
0459 static void batadv_bla_loopdetect_report(struct work_struct *work)
0460 {
0461 struct batadv_bla_backbone_gw *backbone_gw;
0462 struct batadv_priv *bat_priv;
0463 char vid_str[6] = { '\0' };
0464
0465 backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
0466 report_work);
0467 bat_priv = backbone_gw->bat_priv;
0468
0469 batadv_info(bat_priv->soft_iface,
0470 "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
0471 batadv_print_vid(backbone_gw->vid));
0472 snprintf(vid_str, sizeof(vid_str), "%d",
0473 batadv_print_vid(backbone_gw->vid));
0474 vid_str[sizeof(vid_str) - 1] = 0;
0475
0476 batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
0477 vid_str);
0478
0479 batadv_backbone_gw_put(backbone_gw);
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 static struct batadv_bla_backbone_gw *
0492 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig,
0493 unsigned short vid, bool own_backbone)
0494 {
0495 struct batadv_bla_backbone_gw *entry;
0496 struct batadv_orig_node *orig_node;
0497 int hash_added;
0498
0499 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
0500
0501 if (entry)
0502 return entry;
0503
0504 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0505 "%s(): not found (%pM, %d), creating new entry\n", __func__,
0506 orig, batadv_print_vid(vid));
0507
0508 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
0509 if (!entry)
0510 return NULL;
0511
0512 entry->vid = vid;
0513 entry->lasttime = jiffies;
0514 entry->crc = BATADV_BLA_CRC_INIT;
0515 entry->bat_priv = bat_priv;
0516 spin_lock_init(&entry->crc_lock);
0517 atomic_set(&entry->request_sent, 0);
0518 atomic_set(&entry->wait_periods, 0);
0519 ether_addr_copy(entry->orig, orig);
0520 INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
0521 kref_init(&entry->refcount);
0522
0523 kref_get(&entry->refcount);
0524 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
0525 batadv_compare_backbone_gw,
0526 batadv_choose_backbone_gw, entry,
0527 &entry->hash_entry);
0528
0529 if (unlikely(hash_added != 0)) {
0530
0531 kfree(entry);
0532 return NULL;
0533 }
0534
0535
0536 orig_node = batadv_orig_hash_find(bat_priv, orig);
0537 if (orig_node) {
0538 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
0539 "became a backbone gateway");
0540 batadv_orig_node_put(orig_node);
0541 }
0542
0543 if (own_backbone) {
0544 batadv_bla_send_announce(bat_priv, entry);
0545
0546
0547 atomic_inc(&entry->request_sent);
0548 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
0549 atomic_inc(&bat_priv->bla.num_requests);
0550 }
0551
0552 return entry;
0553 }
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564 static void
0565 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
0566 struct batadv_hard_iface *primary_if,
0567 unsigned short vid)
0568 {
0569 struct batadv_bla_backbone_gw *backbone_gw;
0570
0571 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
0572 primary_if->net_dev->dev_addr,
0573 vid, true);
0574 if (unlikely(!backbone_gw))
0575 return;
0576
0577 backbone_gw->lasttime = jiffies;
0578 batadv_backbone_gw_put(backbone_gw);
0579 }
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
0591 struct batadv_hard_iface *primary_if,
0592 unsigned short vid)
0593 {
0594 struct hlist_head *head;
0595 struct batadv_hashtable *hash;
0596 struct batadv_bla_claim *claim;
0597 struct batadv_bla_backbone_gw *backbone_gw;
0598 int i;
0599
0600 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0601 "%s(): received a claim request, send all of our own claims again\n",
0602 __func__);
0603
0604 backbone_gw = batadv_backbone_hash_find(bat_priv,
0605 primary_if->net_dev->dev_addr,
0606 vid);
0607 if (!backbone_gw)
0608 return;
0609
0610 hash = bat_priv->bla.claim_hash;
0611 for (i = 0; i < hash->size; i++) {
0612 head = &hash->table[i];
0613
0614 rcu_read_lock();
0615 hlist_for_each_entry_rcu(claim, head, hash_entry) {
0616
0617 if (claim->backbone_gw != backbone_gw)
0618 continue;
0619
0620 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
0621 BATADV_CLAIM_TYPE_CLAIM);
0622 }
0623 rcu_read_unlock();
0624 }
0625
0626
0627 batadv_bla_send_announce(bat_priv, backbone_gw);
0628 batadv_backbone_gw_put(backbone_gw);
0629 }
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
0640 {
0641
0642 batadv_bla_del_backbone_claims(backbone_gw);
0643
0644 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
0645 "Sending REQUEST to %pM\n", backbone_gw->orig);
0646
0647
0648 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
0649 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
0650
0651
0652 if (!atomic_read(&backbone_gw->request_sent)) {
0653 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
0654 atomic_set(&backbone_gw->request_sent, 1);
0655 }
0656 }
0657
0658
0659
0660
0661
0662
0663 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
0664 struct batadv_bla_backbone_gw *backbone_gw)
0665 {
0666 u8 mac[ETH_ALEN];
0667 __be16 crc;
0668
0669 memcpy(mac, batadv_announce_mac, 4);
0670 spin_lock_bh(&backbone_gw->crc_lock);
0671 crc = htons(backbone_gw->crc);
0672 spin_unlock_bh(&backbone_gw->crc_lock);
0673 memcpy(&mac[4], &crc, 2);
0674
0675 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
0676 BATADV_CLAIM_TYPE_ANNOUNCE);
0677 }
0678
0679
0680
0681
0682
0683
0684
0685
0686 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
0687 const u8 *mac, const unsigned short vid,
0688 struct batadv_bla_backbone_gw *backbone_gw)
0689 {
0690 struct batadv_bla_backbone_gw *old_backbone_gw;
0691 struct batadv_bla_claim *claim;
0692 struct batadv_bla_claim search_claim;
0693 bool remove_crc = false;
0694 int hash_added;
0695
0696 ether_addr_copy(search_claim.addr, mac);
0697 search_claim.vid = vid;
0698 claim = batadv_claim_hash_find(bat_priv, &search_claim);
0699
0700
0701 if (!claim) {
0702 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
0703 if (!claim)
0704 return;
0705
0706 ether_addr_copy(claim->addr, mac);
0707 spin_lock_init(&claim->backbone_lock);
0708 claim->vid = vid;
0709 claim->lasttime = jiffies;
0710 kref_get(&backbone_gw->refcount);
0711 claim->backbone_gw = backbone_gw;
0712 kref_init(&claim->refcount);
0713
0714 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0715 "%s(): adding new entry %pM, vid %d to hash ...\n",
0716 __func__, mac, batadv_print_vid(vid));
0717
0718 kref_get(&claim->refcount);
0719 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
0720 batadv_compare_claim,
0721 batadv_choose_claim, claim,
0722 &claim->hash_entry);
0723
0724 if (unlikely(hash_added != 0)) {
0725
0726 kfree(claim);
0727 return;
0728 }
0729 } else {
0730 claim->lasttime = jiffies;
0731 if (claim->backbone_gw == backbone_gw)
0732
0733 goto claim_free_ref;
0734
0735 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0736 "%s(): changing ownership for %pM, vid %d to gw %pM\n",
0737 __func__, mac, batadv_print_vid(vid),
0738 backbone_gw->orig);
0739
0740 remove_crc = true;
0741 }
0742
0743
0744 spin_lock_bh(&claim->backbone_lock);
0745 old_backbone_gw = claim->backbone_gw;
0746 kref_get(&backbone_gw->refcount);
0747 claim->backbone_gw = backbone_gw;
0748 spin_unlock_bh(&claim->backbone_lock);
0749
0750 if (remove_crc) {
0751
0752 spin_lock_bh(&old_backbone_gw->crc_lock);
0753 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
0754 spin_unlock_bh(&old_backbone_gw->crc_lock);
0755 }
0756
0757 batadv_backbone_gw_put(old_backbone_gw);
0758
0759
0760 spin_lock_bh(&backbone_gw->crc_lock);
0761 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
0762 spin_unlock_bh(&backbone_gw->crc_lock);
0763 backbone_gw->lasttime = jiffies;
0764
0765 claim_free_ref:
0766 batadv_claim_put(claim);
0767 }
0768
0769
0770
0771
0772
0773
0774
0775
0776 static struct batadv_bla_backbone_gw *
0777 batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
0778 {
0779 struct batadv_bla_backbone_gw *backbone_gw;
0780
0781 spin_lock_bh(&claim->backbone_lock);
0782 backbone_gw = claim->backbone_gw;
0783 kref_get(&backbone_gw->refcount);
0784 spin_unlock_bh(&claim->backbone_lock);
0785
0786 return backbone_gw;
0787 }
0788
0789
0790
0791
0792
0793
0794
0795 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
0796 const u8 *mac, const unsigned short vid)
0797 {
0798 struct batadv_bla_claim search_claim, *claim;
0799 struct batadv_bla_claim *claim_removed_entry;
0800 struct hlist_node *claim_removed_node;
0801
0802 ether_addr_copy(search_claim.addr, mac);
0803 search_claim.vid = vid;
0804 claim = batadv_claim_hash_find(bat_priv, &search_claim);
0805 if (!claim)
0806 return;
0807
0808 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
0809 mac, batadv_print_vid(vid));
0810
0811 claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
0812 batadv_compare_claim,
0813 batadv_choose_claim, claim);
0814 if (!claim_removed_node)
0815 goto free_claim;
0816
0817
0818 claim_removed_entry = hlist_entry(claim_removed_node,
0819 struct batadv_bla_claim, hash_entry);
0820 batadv_claim_put(claim_removed_entry);
0821
0822 free_claim:
0823
0824 batadv_claim_put(claim);
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836 static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
0837 u8 *backbone_addr, unsigned short vid)
0838 {
0839 struct batadv_bla_backbone_gw *backbone_gw;
0840 u16 backbone_crc, crc;
0841
0842 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
0843 return false;
0844
0845 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
0846 false);
0847
0848 if (unlikely(!backbone_gw))
0849 return true;
0850
0851
0852 backbone_gw->lasttime = jiffies;
0853 crc = ntohs(*((__force __be16 *)(&an_addr[4])));
0854
0855 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0856 "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
0857 __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
0858
0859 spin_lock_bh(&backbone_gw->crc_lock);
0860 backbone_crc = backbone_gw->crc;
0861 spin_unlock_bh(&backbone_gw->crc_lock);
0862
0863 if (backbone_crc != crc) {
0864 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
0865 "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
0866 __func__, backbone_gw->orig,
0867 batadv_print_vid(backbone_gw->vid),
0868 backbone_crc, crc);
0869
0870 batadv_bla_send_request(backbone_gw);
0871 } else {
0872
0873
0874
0875 if (atomic_read(&backbone_gw->request_sent)) {
0876 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
0877 atomic_set(&backbone_gw->request_sent, 0);
0878 }
0879 }
0880
0881 batadv_backbone_gw_put(backbone_gw);
0882 return true;
0883 }
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895 static bool batadv_handle_request(struct batadv_priv *bat_priv,
0896 struct batadv_hard_iface *primary_if,
0897 u8 *backbone_addr, struct ethhdr *ethhdr,
0898 unsigned short vid)
0899 {
0900
0901 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
0902 return false;
0903
0904
0905
0906
0907 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
0908 return true;
0909
0910 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0911 "%s(): REQUEST vid %d (sent by %pM)...\n",
0912 __func__, batadv_print_vid(vid), ethhdr->h_source);
0913
0914 batadv_bla_answer_request(bat_priv, primary_if, vid);
0915 return true;
0916 }
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928 static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
0929 struct batadv_hard_iface *primary_if,
0930 const u8 *backbone_addr, const u8 *claim_addr,
0931 unsigned short vid)
0932 {
0933 struct batadv_bla_backbone_gw *backbone_gw;
0934
0935
0936 if (primary_if && batadv_compare_eth(backbone_addr,
0937 primary_if->net_dev->dev_addr))
0938 batadv_bla_send_claim(bat_priv, claim_addr, vid,
0939 BATADV_CLAIM_TYPE_UNCLAIM);
0940
0941 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
0942
0943 if (!backbone_gw)
0944 return true;
0945
0946
0947 batadv_dbg(BATADV_DBG_BLA, bat_priv,
0948 "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
0949 claim_addr, batadv_print_vid(vid), backbone_gw->orig);
0950
0951 batadv_bla_del_claim(bat_priv, claim_addr, vid);
0952 batadv_backbone_gw_put(backbone_gw);
0953 return true;
0954 }
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966 static bool batadv_handle_claim(struct batadv_priv *bat_priv,
0967 struct batadv_hard_iface *primary_if,
0968 const u8 *backbone_addr, const u8 *claim_addr,
0969 unsigned short vid)
0970 {
0971 struct batadv_bla_backbone_gw *backbone_gw;
0972
0973
0974
0975 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
0976 false);
0977
0978 if (unlikely(!backbone_gw))
0979 return true;
0980
0981
0982 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
0983 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
0984 batadv_bla_send_claim(bat_priv, claim_addr, vid,
0985 BATADV_CLAIM_TYPE_CLAIM);
0986
0987
0988
0989 batadv_backbone_gw_put(backbone_gw);
0990 return true;
0991 }
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1011 struct batadv_hard_iface *primary_if,
1012 u8 *hw_src, u8 *hw_dst,
1013 struct ethhdr *ethhdr)
1014 {
1015 u8 *backbone_addr;
1016 struct batadv_orig_node *orig_node;
1017 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1018
1019 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1020 bla_dst_own = &bat_priv->bla.claim_dest;
1021
1022
1023
1024
1025 switch (bla_dst->type) {
1026 case BATADV_CLAIM_TYPE_CLAIM:
1027 backbone_addr = hw_src;
1028 break;
1029 case BATADV_CLAIM_TYPE_REQUEST:
1030 case BATADV_CLAIM_TYPE_ANNOUNCE:
1031 case BATADV_CLAIM_TYPE_UNCLAIM:
1032 backbone_addr = ethhdr->h_source;
1033 break;
1034 default:
1035 return 0;
1036 }
1037
1038
1039 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1040 return 0;
1041
1042
1043 if (bla_dst->group == bla_dst_own->group)
1044 return 2;
1045
1046
1047 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1048
1049
1050
1051
1052 if (!orig_node)
1053 return 1;
1054
1055
1056 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1057 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1058 "taking other backbones claim group: %#.4x\n",
1059 ntohs(bla_dst->group));
1060 bla_dst_own->group = bla_dst->group;
1061 }
1062
1063 batadv_orig_node_put(orig_node);
1064
1065 return 2;
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1078 struct batadv_hard_iface *primary_if,
1079 struct sk_buff *skb)
1080 {
1081 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1082 u8 *hw_src, *hw_dst;
1083 struct vlan_hdr *vhdr, vhdr_buf;
1084 struct ethhdr *ethhdr;
1085 struct arphdr *arphdr;
1086 unsigned short vid;
1087 int vlan_depth = 0;
1088 __be16 proto;
1089 int headlen;
1090 int ret;
1091
1092 vid = batadv_get_vid(skb, 0);
1093 ethhdr = eth_hdr(skb);
1094
1095 proto = ethhdr->h_proto;
1096 headlen = ETH_HLEN;
1097 if (vid & BATADV_VLAN_HAS_TAG) {
1098
1099
1100
1101
1102
1103
1104
1105
1106 do {
1107 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1108 &vhdr_buf);
1109 if (!vhdr)
1110 return false;
1111
1112 proto = vhdr->h_vlan_encapsulated_proto;
1113 headlen += VLAN_HLEN;
1114 vlan_depth++;
1115 } while (proto == htons(ETH_P_8021Q));
1116 }
1117
1118 if (proto != htons(ETH_P_ARP))
1119 return false;
1120
1121
1122
1123 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1124 return false;
1125
1126
1127 ethhdr = eth_hdr(skb);
1128 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1129
1130
1131
1132
1133 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1134 return false;
1135 if (arphdr->ar_pro != htons(ETH_P_IP))
1136 return false;
1137 if (arphdr->ar_hln != ETH_ALEN)
1138 return false;
1139 if (arphdr->ar_pln != 4)
1140 return false;
1141
1142 hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1143 hw_dst = hw_src + ETH_ALEN + 4;
1144 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1145 bla_dst_own = &bat_priv->bla.claim_dest;
1146
1147
1148 if (memcmp(bla_dst->magic, bla_dst_own->magic,
1149 sizeof(bla_dst->magic)) != 0)
1150 return false;
1151
1152
1153
1154
1155
1156 if (vlan_depth > 1)
1157 return true;
1158
1159
1160 if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1161 return false;
1162
1163
1164 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1165 ethhdr);
1166 if (ret == 1)
1167 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1168 "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1169 __func__, ethhdr->h_source, batadv_print_vid(vid),
1170 hw_src, hw_dst);
1171
1172 if (ret < 2)
1173 return !!ret;
1174
1175
1176 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1177
1178
1179 switch (bla_dst->type) {
1180 case BATADV_CLAIM_TYPE_CLAIM:
1181 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1182 ethhdr->h_source, vid))
1183 return true;
1184 break;
1185 case BATADV_CLAIM_TYPE_UNCLAIM:
1186 if (batadv_handle_unclaim(bat_priv, primary_if,
1187 ethhdr->h_source, hw_src, vid))
1188 return true;
1189 break;
1190
1191 case BATADV_CLAIM_TYPE_ANNOUNCE:
1192 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1193 vid))
1194 return true;
1195 break;
1196 case BATADV_CLAIM_TYPE_REQUEST:
1197 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1198 vid))
1199 return true;
1200 break;
1201 }
1202
1203 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1204 "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1205 __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
1206 hw_dst);
1207 return true;
1208 }
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1220 {
1221 struct batadv_bla_backbone_gw *backbone_gw;
1222 struct hlist_node *node_tmp;
1223 struct hlist_head *head;
1224 struct batadv_hashtable *hash;
1225 spinlock_t *list_lock;
1226 int i;
1227
1228 hash = bat_priv->bla.backbone_hash;
1229 if (!hash)
1230 return;
1231
1232 for (i = 0; i < hash->size; i++) {
1233 head = &hash->table[i];
1234 list_lock = &hash->list_locks[i];
1235
1236 spin_lock_bh(list_lock);
1237 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1238 head, hash_entry) {
1239 if (now)
1240 goto purge_now;
1241 if (!batadv_has_timed_out(backbone_gw->lasttime,
1242 BATADV_BLA_BACKBONE_TIMEOUT))
1243 continue;
1244
1245 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1246 "%s(): backbone gw %pM timed out\n",
1247 __func__, backbone_gw->orig);
1248
1249 purge_now:
1250
1251 if (atomic_read(&backbone_gw->request_sent))
1252 atomic_dec(&bat_priv->bla.num_requests);
1253
1254 batadv_bla_del_backbone_claims(backbone_gw);
1255
1256 hlist_del_rcu(&backbone_gw->hash_entry);
1257 batadv_backbone_gw_put(backbone_gw);
1258 }
1259 spin_unlock_bh(list_lock);
1260 }
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1273 struct batadv_hard_iface *primary_if,
1274 int now)
1275 {
1276 struct batadv_bla_backbone_gw *backbone_gw;
1277 struct batadv_bla_claim *claim;
1278 struct hlist_head *head;
1279 struct batadv_hashtable *hash;
1280 int i;
1281
1282 hash = bat_priv->bla.claim_hash;
1283 if (!hash)
1284 return;
1285
1286 for (i = 0; i < hash->size; i++) {
1287 head = &hash->table[i];
1288
1289 rcu_read_lock();
1290 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1291 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1292 if (now)
1293 goto purge_now;
1294
1295 if (!batadv_compare_eth(backbone_gw->orig,
1296 primary_if->net_dev->dev_addr))
1297 goto skip;
1298
1299 if (!batadv_has_timed_out(claim->lasttime,
1300 BATADV_BLA_CLAIM_TIMEOUT))
1301 goto skip;
1302
1303 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1304 "%s(): timed out.\n", __func__);
1305
1306 purge_now:
1307 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1308 "%s(): %pM, vid %d\n", __func__,
1309 claim->addr, claim->vid);
1310
1311 batadv_handle_unclaim(bat_priv, primary_if,
1312 backbone_gw->orig,
1313 claim->addr, claim->vid);
1314 skip:
1315 batadv_backbone_gw_put(backbone_gw);
1316 }
1317 rcu_read_unlock();
1318 }
1319 }
1320
1321
1322
1323
1324
1325
1326
1327
1328 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1329 struct batadv_hard_iface *primary_if,
1330 struct batadv_hard_iface *oldif)
1331 {
1332 struct batadv_bla_backbone_gw *backbone_gw;
1333 struct hlist_head *head;
1334 struct batadv_hashtable *hash;
1335 __be16 group;
1336 int i;
1337
1338
1339 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1340 bat_priv->bla.claim_dest.group = group;
1341
1342
1343 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1344 oldif = NULL;
1345
1346 if (!oldif) {
1347 batadv_bla_purge_claims(bat_priv, NULL, 1);
1348 batadv_bla_purge_backbone_gw(bat_priv, 1);
1349 return;
1350 }
1351
1352 hash = bat_priv->bla.backbone_hash;
1353 if (!hash)
1354 return;
1355
1356 for (i = 0; i < hash->size; i++) {
1357 head = &hash->table[i];
1358
1359 rcu_read_lock();
1360 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1361
1362 if (!batadv_compare_eth(backbone_gw->orig,
1363 oldif->net_dev->dev_addr))
1364 continue;
1365
1366 ether_addr_copy(backbone_gw->orig,
1367 primary_if->net_dev->dev_addr);
1368
1369
1370
1371 batadv_bla_send_announce(bat_priv, backbone_gw);
1372 }
1373 rcu_read_unlock();
1374 }
1375 }
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387 static void
1388 batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1389 struct batadv_bla_backbone_gw *backbone_gw)
1390 {
1391 batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1392 backbone_gw->vid);
1393 batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1394 backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1395 }
1396
1397
1398
1399
1400
1401 void batadv_bla_status_update(struct net_device *net_dev)
1402 {
1403 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1404 struct batadv_hard_iface *primary_if;
1405
1406 primary_if = batadv_primary_if_get_selected(bat_priv);
1407 if (!primary_if)
1408 return;
1409
1410
1411
1412
1413 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1414 batadv_hardif_put(primary_if);
1415 }
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 static void batadv_bla_periodic_work(struct work_struct *work)
1426 {
1427 struct delayed_work *delayed_work;
1428 struct batadv_priv *bat_priv;
1429 struct batadv_priv_bla *priv_bla;
1430 struct hlist_head *head;
1431 struct batadv_bla_backbone_gw *backbone_gw;
1432 struct batadv_hashtable *hash;
1433 struct batadv_hard_iface *primary_if;
1434 bool send_loopdetect = false;
1435 int i;
1436
1437 delayed_work = to_delayed_work(work);
1438 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1439 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1440 primary_if = batadv_primary_if_get_selected(bat_priv);
1441 if (!primary_if)
1442 goto out;
1443
1444 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1445 batadv_bla_purge_backbone_gw(bat_priv, 0);
1446
1447 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1448 goto out;
1449
1450 if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1451
1452
1453
1454
1455 eth_random_addr(bat_priv->bla.loopdetect_addr);
1456 bat_priv->bla.loopdetect_addr[0] = 0xba;
1457 bat_priv->bla.loopdetect_addr[1] = 0xbe;
1458 bat_priv->bla.loopdetect_lasttime = jiffies;
1459 atomic_set(&bat_priv->bla.loopdetect_next,
1460 BATADV_BLA_LOOPDETECT_PERIODS);
1461
1462
1463 send_loopdetect = true;
1464 }
1465
1466 hash = bat_priv->bla.backbone_hash;
1467 if (!hash)
1468 goto out;
1469
1470 for (i = 0; i < hash->size; i++) {
1471 head = &hash->table[i];
1472
1473 rcu_read_lock();
1474 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1475 if (!batadv_compare_eth(backbone_gw->orig,
1476 primary_if->net_dev->dev_addr))
1477 continue;
1478
1479 backbone_gw->lasttime = jiffies;
1480
1481 batadv_bla_send_announce(bat_priv, backbone_gw);
1482 if (send_loopdetect)
1483 batadv_bla_send_loopdetect(bat_priv,
1484 backbone_gw);
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495 if (atomic_read(&backbone_gw->request_sent) == 0)
1496 continue;
1497
1498 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1499 continue;
1500
1501 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1502 atomic_set(&backbone_gw->request_sent, 0);
1503 }
1504 rcu_read_unlock();
1505 }
1506 out:
1507 batadv_hardif_put(primary_if);
1508
1509 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1510 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1511 }
1512
1513
1514
1515
1516
1517
1518 static struct lock_class_key batadv_claim_hash_lock_class_key;
1519 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1520
1521
1522
1523
1524
1525
1526
1527 int batadv_bla_init(struct batadv_priv *bat_priv)
1528 {
1529 int i;
1530 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1531 struct batadv_hard_iface *primary_if;
1532 u16 crc;
1533 unsigned long entrytime;
1534
1535 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1536
1537 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1538
1539
1540 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1541 bat_priv->bla.claim_dest.type = 0;
1542 primary_if = batadv_primary_if_get_selected(bat_priv);
1543 if (primary_if) {
1544 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1545 bat_priv->bla.claim_dest.group = htons(crc);
1546 batadv_hardif_put(primary_if);
1547 } else {
1548 bat_priv->bla.claim_dest.group = 0;
1549 }
1550
1551
1552 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1553 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1554 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1555 bat_priv->bla.bcast_duplist_curr = 0;
1556
1557 atomic_set(&bat_priv->bla.loopdetect_next,
1558 BATADV_BLA_LOOPDETECT_PERIODS);
1559
1560 if (bat_priv->bla.claim_hash)
1561 return 0;
1562
1563 bat_priv->bla.claim_hash = batadv_hash_new(128);
1564 if (!bat_priv->bla.claim_hash)
1565 return -ENOMEM;
1566
1567 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1568 if (!bat_priv->bla.backbone_hash) {
1569 batadv_hash_destroy(bat_priv->bla.claim_hash);
1570 return -ENOMEM;
1571 }
1572
1573 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1574 &batadv_claim_hash_lock_class_key);
1575 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1576 &batadv_backbone_hash_lock_class_key);
1577
1578 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1579
1580 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1581
1582 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1583 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1584 return 0;
1585 }
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606 static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
1607 struct sk_buff *skb, u8 *payload_ptr,
1608 const u8 *orig)
1609 {
1610 struct batadv_bcast_duplist_entry *entry;
1611 bool ret = false;
1612 int i, curr;
1613 __be32 crc;
1614
1615
1616 crc = batadv_skb_crc32(skb, payload_ptr);
1617
1618 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1619
1620 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1621 curr = (bat_priv->bla.bcast_duplist_curr + i);
1622 curr %= BATADV_DUPLIST_SIZE;
1623 entry = &bat_priv->bla.bcast_duplist[curr];
1624
1625
1626
1627
1628 if (batadv_has_timed_out(entry->entrytime,
1629 BATADV_DUPLIST_TIMEOUT))
1630 break;
1631
1632 if (entry->crc != crc)
1633 continue;
1634
1635
1636 if (orig && !is_zero_ether_addr(orig) &&
1637 !is_zero_ether_addr(entry->orig)) {
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 if (batadv_compare_eth(entry->orig, orig))
1648 continue;
1649 }
1650
1651
1652
1653
1654 ret = true;
1655 goto out;
1656 }
1657
1658
1659
1660 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1661 curr %= BATADV_DUPLIST_SIZE;
1662 entry = &bat_priv->bla.bcast_duplist[curr];
1663 entry->crc = crc;
1664 entry->entrytime = jiffies;
1665
1666
1667 if (orig)
1668 ether_addr_copy(entry->orig, orig);
1669
1670 else
1671 eth_zero_addr(entry->orig);
1672
1673 bat_priv->bla.bcast_duplist_curr = curr;
1674
1675 out:
1676 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1677
1678 return ret;
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
1694 struct sk_buff *skb)
1695 {
1696 return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
1697 }
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710 bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1711 struct sk_buff *skb)
1712 {
1713 struct batadv_bcast_packet *bcast_packet;
1714 u8 *payload_ptr;
1715
1716 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1717 payload_ptr = (u8 *)(bcast_packet + 1);
1718
1719 return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
1720 bcast_packet->orig);
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1733 unsigned short vid)
1734 {
1735 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1736 struct hlist_head *head;
1737 struct batadv_bla_backbone_gw *backbone_gw;
1738 int i;
1739
1740 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1741 return false;
1742
1743 if (!hash)
1744 return false;
1745
1746 for (i = 0; i < hash->size; i++) {
1747 head = &hash->table[i];
1748
1749 rcu_read_lock();
1750 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1751 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1752 backbone_gw->vid == vid) {
1753 rcu_read_unlock();
1754 return true;
1755 }
1756 }
1757 rcu_read_unlock();
1758 }
1759
1760 return false;
1761 }
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772 bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1773 struct batadv_orig_node *orig_node, int hdr_size)
1774 {
1775 struct batadv_bla_backbone_gw *backbone_gw;
1776 unsigned short vid;
1777
1778 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1779 return false;
1780
1781
1782 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1783 return false;
1784
1785 vid = batadv_get_vid(skb, hdr_size);
1786
1787
1788 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1789 orig_node->orig, vid);
1790 if (!backbone_gw)
1791 return false;
1792
1793 batadv_backbone_gw_put(backbone_gw);
1794 return true;
1795 }
1796
1797
1798
1799
1800
1801
1802
1803 void batadv_bla_free(struct batadv_priv *bat_priv)
1804 {
1805 struct batadv_hard_iface *primary_if;
1806
1807 cancel_delayed_work_sync(&bat_priv->bla.work);
1808 primary_if = batadv_primary_if_get_selected(bat_priv);
1809
1810 if (bat_priv->bla.claim_hash) {
1811 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1812 batadv_hash_destroy(bat_priv->bla.claim_hash);
1813 bat_priv->bla.claim_hash = NULL;
1814 }
1815 if (bat_priv->bla.backbone_hash) {
1816 batadv_bla_purge_backbone_gw(bat_priv, 1);
1817 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1818 bat_priv->bla.backbone_hash = NULL;
1819 }
1820 batadv_hardif_put(primary_if);
1821 }
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836 static bool
1837 batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1838 struct batadv_hard_iface *primary_if,
1839 unsigned short vid)
1840 {
1841 struct batadv_bla_backbone_gw *backbone_gw;
1842 struct ethhdr *ethhdr;
1843 bool ret;
1844
1845 ethhdr = eth_hdr(skb);
1846
1847
1848
1849
1850 if (!batadv_compare_eth(ethhdr->h_source,
1851 bat_priv->bla.loopdetect_addr))
1852 return false;
1853
1854
1855
1856
1857 if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1858 BATADV_BLA_LOOPDETECT_TIMEOUT))
1859 return true;
1860
1861 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1862 primary_if->net_dev->dev_addr,
1863 vid, true);
1864 if (unlikely(!backbone_gw))
1865 return true;
1866
1867 ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1868
1869
1870
1871
1872 if (!ret)
1873 batadv_backbone_gw_put(backbone_gw);
1874
1875 return true;
1876 }
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1895 unsigned short vid, int packet_type)
1896 {
1897 struct batadv_bla_backbone_gw *backbone_gw;
1898 struct ethhdr *ethhdr;
1899 struct batadv_bla_claim search_claim, *claim = NULL;
1900 struct batadv_hard_iface *primary_if;
1901 bool own_claim;
1902 bool ret;
1903
1904 ethhdr = eth_hdr(skb);
1905
1906 primary_if = batadv_primary_if_get_selected(bat_priv);
1907 if (!primary_if)
1908 goto handled;
1909
1910 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1911 goto allow;
1912
1913 if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1914 goto handled;
1915
1916 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1917
1918 if (is_multicast_ether_addr(ethhdr->h_dest))
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932 if (packet_type == BATADV_BCAST ||
1933 packet_type == BATADV_UNICAST)
1934 goto handled;
1935
1936
1937
1938
1939 if (is_multicast_ether_addr(ethhdr->h_dest) &&
1940 packet_type == BATADV_UNICAST &&
1941 batadv_bla_check_ucast_duplist(bat_priv, skb))
1942 goto handled;
1943
1944 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1945 search_claim.vid = vid;
1946 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1947
1948 if (!claim) {
1949
1950
1951
1952
1953 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1954 "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
1955 __func__, ethhdr->h_source,
1956 batadv_is_my_client(bat_priv,
1957 ethhdr->h_source, vid) ?
1958 "yes" : "no");
1959 batadv_handle_claim(bat_priv, primary_if,
1960 primary_if->net_dev->dev_addr,
1961 ethhdr->h_source, vid);
1962 goto allow;
1963 }
1964
1965
1966 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1967 own_claim = batadv_compare_eth(backbone_gw->orig,
1968 primary_if->net_dev->dev_addr);
1969 batadv_backbone_gw_put(backbone_gw);
1970
1971 if (own_claim) {
1972
1973 claim->lasttime = jiffies;
1974 goto allow;
1975 }
1976
1977
1978 if (is_multicast_ether_addr(ethhdr->h_dest) &&
1979 (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
1980
1981
1982
1983
1984
1985
1986 goto handled;
1987 } else {
1988
1989
1990
1991
1992 batadv_handle_claim(bat_priv, primary_if,
1993 primary_if->net_dev->dev_addr,
1994 ethhdr->h_source, vid);
1995 goto allow;
1996 }
1997 allow:
1998 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1999 ret = false;
2000 goto out;
2001
2002 handled:
2003 kfree_skb(skb);
2004 ret = true;
2005
2006 out:
2007 batadv_hardif_put(primary_if);
2008 batadv_claim_put(claim);
2009 return ret;
2010 }
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2030 unsigned short vid)
2031 {
2032 struct ethhdr *ethhdr;
2033 struct batadv_bla_claim search_claim, *claim = NULL;
2034 struct batadv_bla_backbone_gw *backbone_gw;
2035 struct batadv_hard_iface *primary_if;
2036 bool client_roamed;
2037 bool ret = false;
2038
2039 primary_if = batadv_primary_if_get_selected(bat_priv);
2040 if (!primary_if)
2041 goto out;
2042
2043 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2044 goto allow;
2045
2046 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
2047 goto handled;
2048
2049 ethhdr = eth_hdr(skb);
2050
2051 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
2052
2053 if (is_multicast_ether_addr(ethhdr->h_dest))
2054 goto handled;
2055
2056 ether_addr_copy(search_claim.addr, ethhdr->h_source);
2057 search_claim.vid = vid;
2058
2059 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2060
2061
2062 if (!claim)
2063 goto allow;
2064
2065
2066 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2067 client_roamed = batadv_compare_eth(backbone_gw->orig,
2068 primary_if->net_dev->dev_addr);
2069 batadv_backbone_gw_put(backbone_gw);
2070
2071 if (client_roamed) {
2072
2073
2074
2075 if (batadv_has_timed_out(claim->lasttime, 100)) {
2076
2077
2078
2079
2080 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
2081 __func__, ethhdr->h_source);
2082 batadv_handle_unclaim(bat_priv, primary_if,
2083 primary_if->net_dev->dev_addr,
2084 ethhdr->h_source, vid);
2085 goto allow;
2086 } else {
2087 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
2088 __func__, ethhdr->h_source);
2089 goto handled;
2090 }
2091 }
2092
2093
2094 if (is_multicast_ether_addr(ethhdr->h_dest)) {
2095
2096
2097
2098 goto handled;
2099 } else {
2100
2101
2102
2103 goto allow;
2104 }
2105 allow:
2106 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2107 ret = false;
2108 goto out;
2109 handled:
2110 ret = true;
2111 out:
2112 batadv_hardif_put(primary_if);
2113 batadv_claim_put(claim);
2114 return ret;
2115 }
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128 static int
2129 batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
2130 struct netlink_callback *cb,
2131 struct batadv_hard_iface *primary_if,
2132 struct batadv_bla_claim *claim)
2133 {
2134 const u8 *primary_addr = primary_if->net_dev->dev_addr;
2135 u16 backbone_crc;
2136 bool is_own;
2137 void *hdr;
2138 int ret = -EINVAL;
2139
2140 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2141 &batadv_netlink_family, NLM_F_MULTI,
2142 BATADV_CMD_GET_BLA_CLAIM);
2143 if (!hdr) {
2144 ret = -ENOBUFS;
2145 goto out;
2146 }
2147
2148 genl_dump_check_consistent(cb, hdr);
2149
2150 is_own = batadv_compare_eth(claim->backbone_gw->orig,
2151 primary_addr);
2152
2153 spin_lock_bh(&claim->backbone_gw->crc_lock);
2154 backbone_crc = claim->backbone_gw->crc;
2155 spin_unlock_bh(&claim->backbone_gw->crc_lock);
2156
2157 if (is_own)
2158 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2159 genlmsg_cancel(msg, hdr);
2160 goto out;
2161 }
2162
2163 if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
2164 nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
2165 nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2166 claim->backbone_gw->orig) ||
2167 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2168 backbone_crc)) {
2169 genlmsg_cancel(msg, hdr);
2170 goto out;
2171 }
2172
2173 genlmsg_end(msg, hdr);
2174 ret = 0;
2175
2176 out:
2177 return ret;
2178 }
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193 static int
2194 batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
2195 struct netlink_callback *cb,
2196 struct batadv_hard_iface *primary_if,
2197 struct batadv_hashtable *hash, unsigned int bucket,
2198 int *idx_skip)
2199 {
2200 struct batadv_bla_claim *claim;
2201 int idx = 0;
2202 int ret = 0;
2203
2204 spin_lock_bh(&hash->list_locks[bucket]);
2205 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2206
2207 hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
2208 if (idx++ < *idx_skip)
2209 continue;
2210
2211 ret = batadv_bla_claim_dump_entry(msg, portid, cb,
2212 primary_if, claim);
2213 if (ret) {
2214 *idx_skip = idx - 1;
2215 goto unlock;
2216 }
2217 }
2218
2219 *idx_skip = 0;
2220 unlock:
2221 spin_unlock_bh(&hash->list_locks[bucket]);
2222 return ret;
2223 }
2224
2225
2226
2227
2228
2229
2230
2231
2232 int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
2233 {
2234 struct batadv_hard_iface *primary_if = NULL;
2235 int portid = NETLINK_CB(cb->skb).portid;
2236 struct net *net = sock_net(cb->skb->sk);
2237 struct net_device *soft_iface;
2238 struct batadv_hashtable *hash;
2239 struct batadv_priv *bat_priv;
2240 int bucket = cb->args[0];
2241 int idx = cb->args[1];
2242 int ifindex;
2243 int ret = 0;
2244
2245 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2246 BATADV_ATTR_MESH_IFINDEX);
2247 if (!ifindex)
2248 return -EINVAL;
2249
2250 soft_iface = dev_get_by_index(net, ifindex);
2251 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2252 ret = -ENODEV;
2253 goto out;
2254 }
2255
2256 bat_priv = netdev_priv(soft_iface);
2257 hash = bat_priv->bla.claim_hash;
2258
2259 primary_if = batadv_primary_if_get_selected(bat_priv);
2260 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2261 ret = -ENOENT;
2262 goto out;
2263 }
2264
2265 while (bucket < hash->size) {
2266 if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
2267 hash, bucket, &idx))
2268 break;
2269 bucket++;
2270 }
2271
2272 cb->args[0] = bucket;
2273 cb->args[1] = idx;
2274
2275 ret = msg->len;
2276
2277 out:
2278 batadv_hardif_put(primary_if);
2279
2280 dev_put(soft_iface);
2281
2282 return ret;
2283 }
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296 static int
2297 batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
2298 struct netlink_callback *cb,
2299 struct batadv_hard_iface *primary_if,
2300 struct batadv_bla_backbone_gw *backbone_gw)
2301 {
2302 const u8 *primary_addr = primary_if->net_dev->dev_addr;
2303 u16 backbone_crc;
2304 bool is_own;
2305 int msecs;
2306 void *hdr;
2307 int ret = -EINVAL;
2308
2309 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2310 &batadv_netlink_family, NLM_F_MULTI,
2311 BATADV_CMD_GET_BLA_BACKBONE);
2312 if (!hdr) {
2313 ret = -ENOBUFS;
2314 goto out;
2315 }
2316
2317 genl_dump_check_consistent(cb, hdr);
2318
2319 is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
2320
2321 spin_lock_bh(&backbone_gw->crc_lock);
2322 backbone_crc = backbone_gw->crc;
2323 spin_unlock_bh(&backbone_gw->crc_lock);
2324
2325 msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
2326
2327 if (is_own)
2328 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2329 genlmsg_cancel(msg, hdr);
2330 goto out;
2331 }
2332
2333 if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2334 backbone_gw->orig) ||
2335 nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
2336 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2337 backbone_crc) ||
2338 nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
2339 genlmsg_cancel(msg, hdr);
2340 goto out;
2341 }
2342
2343 genlmsg_end(msg, hdr);
2344 ret = 0;
2345
2346 out:
2347 return ret;
2348 }
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363 static int
2364 batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
2365 struct netlink_callback *cb,
2366 struct batadv_hard_iface *primary_if,
2367 struct batadv_hashtable *hash,
2368 unsigned int bucket, int *idx_skip)
2369 {
2370 struct batadv_bla_backbone_gw *backbone_gw;
2371 int idx = 0;
2372 int ret = 0;
2373
2374 spin_lock_bh(&hash->list_locks[bucket]);
2375 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2376
2377 hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
2378 if (idx++ < *idx_skip)
2379 continue;
2380
2381 ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
2382 primary_if, backbone_gw);
2383 if (ret) {
2384 *idx_skip = idx - 1;
2385 goto unlock;
2386 }
2387 }
2388
2389 *idx_skip = 0;
2390 unlock:
2391 spin_unlock_bh(&hash->list_locks[bucket]);
2392 return ret;
2393 }
2394
2395
2396
2397
2398
2399
2400
2401
2402 int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
2403 {
2404 struct batadv_hard_iface *primary_if = NULL;
2405 int portid = NETLINK_CB(cb->skb).portid;
2406 struct net *net = sock_net(cb->skb->sk);
2407 struct net_device *soft_iface;
2408 struct batadv_hashtable *hash;
2409 struct batadv_priv *bat_priv;
2410 int bucket = cb->args[0];
2411 int idx = cb->args[1];
2412 int ifindex;
2413 int ret = 0;
2414
2415 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2416 BATADV_ATTR_MESH_IFINDEX);
2417 if (!ifindex)
2418 return -EINVAL;
2419
2420 soft_iface = dev_get_by_index(net, ifindex);
2421 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2422 ret = -ENODEV;
2423 goto out;
2424 }
2425
2426 bat_priv = netdev_priv(soft_iface);
2427 hash = bat_priv->bla.backbone_hash;
2428
2429 primary_if = batadv_primary_if_get_selected(bat_priv);
2430 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2431 ret = -ENOENT;
2432 goto out;
2433 }
2434
2435 while (bucket < hash->size) {
2436 if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
2437 hash, bucket, &idx))
2438 break;
2439 bucket++;
2440 }
2441
2442 cb->args[0] = bucket;
2443 cb->args[1] = idx;
2444
2445 ret = msg->len;
2446
2447 out:
2448 batadv_hardif_put(primary_if);
2449
2450 dev_put(soft_iface);
2451
2452 return ret;
2453 }
2454
2455 #ifdef CONFIG_BATMAN_ADV_DAT
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468 bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
2469 u8 *addr, unsigned short vid)
2470 {
2471 struct batadv_bla_claim search_claim;
2472 struct batadv_bla_claim *claim = NULL;
2473 struct batadv_hard_iface *primary_if = NULL;
2474 bool ret = true;
2475
2476 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2477 return ret;
2478
2479 primary_if = batadv_primary_if_get_selected(bat_priv);
2480 if (!primary_if)
2481 return ret;
2482
2483
2484 ether_addr_copy(search_claim.addr, addr);
2485 search_claim.vid = vid;
2486
2487 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2488
2489
2490
2491
2492 if (claim) {
2493 if (!batadv_compare_eth(claim->backbone_gw->orig,
2494 primary_if->net_dev->dev_addr))
2495 ret = false;
2496 batadv_claim_put(claim);
2497 }
2498
2499 batadv_hardif_put(primary_if);
2500 return ret;
2501 }
2502 #endif