0001
0002
0003
0004
0005
0006
0007 #include "send.h"
0008 #include "main.h"
0009
0010 #include <linux/atomic.h>
0011 #include <linux/bug.h>
0012 #include <linux/byteorder/generic.h>
0013 #include <linux/container_of.h>
0014 #include <linux/errno.h>
0015 #include <linux/etherdevice.h>
0016 #include <linux/gfp.h>
0017 #include <linux/if.h>
0018 #include <linux/if_ether.h>
0019 #include <linux/jiffies.h>
0020 #include <linux/kref.h>
0021 #include <linux/list.h>
0022 #include <linux/netdevice.h>
0023 #include <linux/printk.h>
0024 #include <linux/rculist.h>
0025 #include <linux/rcupdate.h>
0026 #include <linux/skbuff.h>
0027 #include <linux/slab.h>
0028 #include <linux/spinlock.h>
0029 #include <linux/stddef.h>
0030 #include <linux/workqueue.h>
0031
0032 #include "distributed-arp-table.h"
0033 #include "fragmentation.h"
0034 #include "gateway_client.h"
0035 #include "hard-interface.h"
0036 #include "log.h"
0037 #include "network-coding.h"
0038 #include "originator.h"
0039 #include "routing.h"
0040 #include "soft-interface.h"
0041 #include "translation-table.h"
0042
0043 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 int batadv_send_skb_packet(struct sk_buff *skb,
0064 struct batadv_hard_iface *hard_iface,
0065 const u8 *dst_addr)
0066 {
0067 struct batadv_priv *bat_priv;
0068 struct ethhdr *ethhdr;
0069 int ret;
0070
0071 bat_priv = netdev_priv(hard_iface->soft_iface);
0072
0073 if (hard_iface->if_status != BATADV_IF_ACTIVE)
0074 goto send_skb_err;
0075
0076 if (unlikely(!hard_iface->net_dev))
0077 goto send_skb_err;
0078
0079 if (!(hard_iface->net_dev->flags & IFF_UP)) {
0080 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
0081 hard_iface->net_dev->name);
0082 goto send_skb_err;
0083 }
0084
0085
0086 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
0087 goto send_skb_err;
0088
0089 skb_reset_mac_header(skb);
0090
0091 ethhdr = eth_hdr(skb);
0092 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
0093 ether_addr_copy(ethhdr->h_dest, dst_addr);
0094 ethhdr->h_proto = htons(ETH_P_BATMAN);
0095
0096 skb_set_network_header(skb, ETH_HLEN);
0097 skb->protocol = htons(ETH_P_BATMAN);
0098
0099 skb->dev = hard_iface->net_dev;
0100
0101
0102 batadv_nc_skb_store_for_decoding(bat_priv, skb);
0103
0104
0105
0106
0107
0108 ret = dev_queue_xmit(skb);
0109 return net_xmit_eval(ret);
0110 send_skb_err:
0111 kfree_skb(skb);
0112 return NET_XMIT_DROP;
0113 }
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 int batadv_send_broadcast_skb(struct sk_buff *skb,
0125 struct batadv_hard_iface *hard_iface)
0126 {
0127 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 int batadv_send_unicast_skb(struct sk_buff *skb,
0140 struct batadv_neigh_node *neigh)
0141 {
0142 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
0143 struct batadv_hardif_neigh_node *hardif_neigh;
0144 #endif
0145 int ret;
0146
0147 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
0148
0149 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
0150 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
0151
0152 if (hardif_neigh && ret != NET_XMIT_DROP)
0153 hardif_neigh->bat_v.last_unicast_tx = jiffies;
0154
0155 batadv_hardif_neigh_put(hardif_neigh);
0156 #endif
0157
0158 return ret;
0159 }
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 int batadv_send_skb_to_orig(struct sk_buff *skb,
0177 struct batadv_orig_node *orig_node,
0178 struct batadv_hard_iface *recv_if)
0179 {
0180 struct batadv_priv *bat_priv = orig_node->bat_priv;
0181 struct batadv_neigh_node *neigh_node;
0182 int ret;
0183
0184
0185 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
0186 if (!neigh_node) {
0187 ret = -EINVAL;
0188 goto free_skb;
0189 }
0190
0191
0192
0193
0194 if (atomic_read(&bat_priv->fragmentation) &&
0195 skb->len > neigh_node->if_incoming->net_dev->mtu) {
0196
0197 ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
0198
0199 skb = NULL;
0200
0201 goto put_neigh_node;
0202 }
0203
0204
0205
0206
0207
0208 if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
0209 ret = -EINPROGRESS;
0210 else
0211 ret = batadv_send_unicast_skb(skb, neigh_node);
0212
0213
0214 skb = NULL;
0215
0216 put_neigh_node:
0217 batadv_neigh_node_put(neigh_node);
0218 free_skb:
0219 kfree_skb(skb);
0220
0221 return ret;
0222 }
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233 static bool
0234 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
0235 struct batadv_orig_node *orig_node)
0236 {
0237 struct batadv_unicast_packet *unicast_packet;
0238 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
0239
0240 if (batadv_skb_head_push(skb, hdr_size) < 0)
0241 return false;
0242
0243 unicast_packet = (struct batadv_unicast_packet *)skb->data;
0244 unicast_packet->version = BATADV_COMPAT_VERSION;
0245
0246 unicast_packet->packet_type = BATADV_UNICAST;
0247
0248 unicast_packet->ttl = BATADV_TTL;
0249
0250 ether_addr_copy(unicast_packet->dest, orig_node->orig);
0251
0252 unicast_packet->ttvn = ttvn;
0253
0254 return true;
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
0265 struct batadv_orig_node *orig_node)
0266 {
0267 size_t uni_size = sizeof(struct batadv_unicast_packet);
0268
0269 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
0270 }
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
0283 struct sk_buff *skb,
0284 struct batadv_orig_node *orig,
0285 int packet_subtype)
0286 {
0287 struct batadv_hard_iface *primary_if;
0288 struct batadv_unicast_4addr_packet *uc_4addr_packet;
0289 bool ret = false;
0290
0291 primary_if = batadv_primary_if_get_selected(bat_priv);
0292 if (!primary_if)
0293 goto out;
0294
0295
0296
0297
0298
0299 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
0300 orig))
0301 goto out;
0302
0303 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
0304 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
0305 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
0306 uc_4addr_packet->subtype = packet_subtype;
0307 uc_4addr_packet->reserved = 0;
0308
0309 ret = true;
0310 out:
0311 batadv_hardif_put(primary_if);
0312 return ret;
0313 }
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
0332 struct sk_buff *skb, int packet_type,
0333 int packet_subtype,
0334 struct batadv_orig_node *orig_node,
0335 unsigned short vid)
0336 {
0337 struct batadv_unicast_packet *unicast_packet;
0338 struct ethhdr *ethhdr;
0339 int ret = NET_XMIT_DROP;
0340
0341 if (!orig_node)
0342 goto out;
0343
0344 switch (packet_type) {
0345 case BATADV_UNICAST:
0346 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
0347 goto out;
0348 break;
0349 case BATADV_UNICAST_4ADDR:
0350 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
0351 orig_node,
0352 packet_subtype))
0353 goto out;
0354 break;
0355 default:
0356
0357
0358
0359 goto out;
0360 }
0361
0362
0363
0364
0365 ethhdr = eth_hdr(skb);
0366 unicast_packet = (struct batadv_unicast_packet *)skb->data;
0367
0368
0369
0370
0371
0372
0373 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
0374 unicast_packet->ttvn = unicast_packet->ttvn - 1;
0375
0376 ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
0377
0378 skb = NULL;
0379
0380 out:
0381 kfree_skb(skb);
0382 return ret;
0383 }
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
0404 struct sk_buff *skb, int packet_type,
0405 int packet_subtype, u8 *dst_hint,
0406 unsigned short vid)
0407 {
0408 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
0409 struct batadv_orig_node *orig_node;
0410 u8 *src, *dst;
0411 int ret;
0412
0413 src = ethhdr->h_source;
0414 dst = ethhdr->h_dest;
0415
0416
0417 if (dst_hint) {
0418 src = NULL;
0419 dst = dst_hint;
0420 }
0421 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
0422
0423 ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
0424 packet_subtype, orig_node, vid);
0425
0426 batadv_orig_node_put(orig_node);
0427
0428 return ret;
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
0443 unsigned short vid)
0444 {
0445 struct batadv_orig_node *orig_node;
0446 int ret;
0447
0448 orig_node = batadv_gw_get_selected_orig(bat_priv);
0449 ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
0450 BATADV_P_DATA, orig_node, vid);
0451
0452 batadv_orig_node_put(orig_node);
0453
0454 return ret;
0455 }
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465 void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
0466 bool dropped)
0467 {
0468 if (dropped)
0469 kfree_skb(forw_packet->skb);
0470 else
0471 consume_skb(forw_packet->skb);
0472
0473 batadv_hardif_put(forw_packet->if_incoming);
0474 batadv_hardif_put(forw_packet->if_outgoing);
0475 if (forw_packet->queue_left)
0476 atomic_inc(forw_packet->queue_left);
0477 kfree(forw_packet);
0478 }
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494 struct batadv_forw_packet *
0495 batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
0496 struct batadv_hard_iface *if_outgoing,
0497 atomic_t *queue_left,
0498 struct batadv_priv *bat_priv,
0499 struct sk_buff *skb)
0500 {
0501 struct batadv_forw_packet *forw_packet;
0502 const char *qname;
0503
0504 if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
0505 qname = "unknown";
0506
0507 if (queue_left == &bat_priv->bcast_queue_left)
0508 qname = "bcast";
0509
0510 if (queue_left == &bat_priv->batman_queue_left)
0511 qname = "batman";
0512
0513 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
0514 "%s queue is full\n", qname);
0515
0516 return NULL;
0517 }
0518
0519 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
0520 if (!forw_packet)
0521 goto err;
0522
0523 if (if_incoming)
0524 kref_get(&if_incoming->refcount);
0525
0526 if (if_outgoing)
0527 kref_get(&if_outgoing->refcount);
0528
0529 INIT_HLIST_NODE(&forw_packet->list);
0530 INIT_HLIST_NODE(&forw_packet->cleanup_list);
0531 forw_packet->skb = skb;
0532 forw_packet->queue_left = queue_left;
0533 forw_packet->if_incoming = if_incoming;
0534 forw_packet->if_outgoing = if_outgoing;
0535 forw_packet->num_packets = 0;
0536
0537 return forw_packet;
0538
0539 err:
0540 if (queue_left)
0541 atomic_inc(queue_left);
0542
0543 return NULL;
0544 }
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 static bool
0556 batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
0557 {
0558 return !hlist_unhashed(&forw_packet->cleanup_list);
0559 }
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573 bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
0574 spinlock_t *lock)
0575 {
0576
0577 spin_lock_bh(lock);
0578 if (batadv_forw_packet_was_stolen(forw_packet)) {
0579 spin_unlock_bh(lock);
0580 return false;
0581 }
0582
0583 hlist_del_init(&forw_packet->list);
0584
0585
0586 hlist_add_fake(&forw_packet->cleanup_list);
0587
0588 spin_unlock_bh(lock);
0589 return true;
0590 }
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 static void
0606 batadv_forw_packet_list_steal(struct hlist_head *forw_list,
0607 struct hlist_head *cleanup_list,
0608 const struct batadv_hard_iface *hard_iface)
0609 {
0610 struct batadv_forw_packet *forw_packet;
0611 struct hlist_node *safe_tmp_node;
0612
0613 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
0614 forw_list, list) {
0615
0616
0617
0618 if (hard_iface &&
0619 forw_packet->if_incoming != hard_iface &&
0620 forw_packet->if_outgoing != hard_iface)
0621 continue;
0622
0623 hlist_del(&forw_packet->list);
0624 hlist_add_head(&forw_packet->cleanup_list, cleanup_list);
0625 }
0626 }
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 static void batadv_forw_packet_list_free(struct hlist_head *head)
0639 {
0640 struct batadv_forw_packet *forw_packet;
0641 struct hlist_node *safe_tmp_node;
0642
0643 hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
0644 cleanup_list) {
0645 cancel_delayed_work_sync(&forw_packet->delayed_work);
0646
0647 hlist_del(&forw_packet->cleanup_list);
0648 batadv_forw_packet_free(forw_packet, true);
0649 }
0650 }
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669 static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
0670 spinlock_t *lock, struct hlist_head *head,
0671 unsigned long send_time)
0672 {
0673 spin_lock_bh(lock);
0674
0675
0676 if (batadv_forw_packet_was_stolen(forw_packet)) {
0677
0678
0679
0680 WARN_ONCE(hlist_fake(&forw_packet->cleanup_list),
0681 "Requeuing after batadv_forw_packet_steal() not allowed!\n");
0682
0683 spin_unlock_bh(lock);
0684 return;
0685 }
0686
0687 hlist_del_init(&forw_packet->list);
0688 hlist_add_head(&forw_packet->list, head);
0689
0690 queue_delayed_work(batadv_event_workqueue,
0691 &forw_packet->delayed_work,
0692 send_time - jiffies);
0693 spin_unlock_bh(lock);
0694 }
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706 static void
0707 batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
0708 struct batadv_forw_packet *forw_packet,
0709 unsigned long send_time)
0710 {
0711 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock,
0712 &bat_priv->forw_bcast_list, send_time);
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725 void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
0726 struct batadv_forw_packet *forw_packet,
0727 unsigned long send_time)
0728 {
0729 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock,
0730 &bat_priv->forw_bat_list, send_time);
0731 }
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751 static int batadv_forw_bcast_packet_to_list(struct batadv_priv *bat_priv,
0752 struct sk_buff *skb,
0753 unsigned long delay,
0754 bool own_packet,
0755 struct batadv_hard_iface *if_in,
0756 struct batadv_hard_iface *if_out)
0757 {
0758 struct batadv_forw_packet *forw_packet;
0759 unsigned long send_time = jiffies;
0760 struct sk_buff *newskb;
0761
0762 newskb = skb_clone(skb, GFP_ATOMIC);
0763 if (!newskb)
0764 goto err;
0765
0766 forw_packet = batadv_forw_packet_alloc(if_in, if_out,
0767 &bat_priv->bcast_queue_left,
0768 bat_priv, newskb);
0769 if (!forw_packet)
0770 goto err_packet_free;
0771
0772 forw_packet->own = own_packet;
0773
0774 INIT_DELAYED_WORK(&forw_packet->delayed_work,
0775 batadv_send_outstanding_bcast_packet);
0776
0777 send_time += delay ? delay : msecs_to_jiffies(5);
0778
0779 batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time);
0780 return NETDEV_TX_OK;
0781
0782 err_packet_free:
0783 kfree_skb(newskb);
0784 err:
0785 return NETDEV_TX_BUSY;
0786 }
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807 static int batadv_forw_bcast_packet_if(struct batadv_priv *bat_priv,
0808 struct sk_buff *skb,
0809 unsigned long delay,
0810 bool own_packet,
0811 struct batadv_hard_iface *if_in,
0812 struct batadv_hard_iface *if_out)
0813 {
0814 unsigned int num_bcasts = if_out->num_bcasts;
0815 struct sk_buff *newskb;
0816 int ret = NETDEV_TX_OK;
0817
0818 if (!delay) {
0819 newskb = skb_clone(skb, GFP_ATOMIC);
0820 if (!newskb)
0821 return NETDEV_TX_BUSY;
0822
0823 batadv_send_broadcast_skb(newskb, if_out);
0824 num_bcasts--;
0825 }
0826
0827
0828 if (num_bcasts >= 1) {
0829 BATADV_SKB_CB(skb)->num_bcasts = num_bcasts;
0830
0831 ret = batadv_forw_bcast_packet_to_list(bat_priv, skb, delay,
0832 own_packet, if_in,
0833 if_out);
0834 }
0835
0836 return ret;
0837 }
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849 static bool batadv_send_no_broadcast(struct batadv_priv *bat_priv,
0850 struct sk_buff *skb, bool own_packet,
0851 struct batadv_hard_iface *if_out)
0852 {
0853 struct batadv_hardif_neigh_node *neigh_node = NULL;
0854 struct batadv_bcast_packet *bcast_packet;
0855 u8 *orig_neigh;
0856 u8 *neigh_addr;
0857 char *type;
0858 int ret;
0859
0860 if (!own_packet) {
0861 neigh_addr = eth_hdr(skb)->h_source;
0862 neigh_node = batadv_hardif_neigh_get(if_out,
0863 neigh_addr);
0864 }
0865
0866 bcast_packet = (struct batadv_bcast_packet *)skb->data;
0867 orig_neigh = neigh_node ? neigh_node->orig : NULL;
0868
0869 ret = batadv_hardif_no_broadcast(if_out, bcast_packet->orig,
0870 orig_neigh);
0871
0872 batadv_hardif_neigh_put(neigh_node);
0873
0874
0875 if (!ret)
0876 return false;
0877
0878
0879 switch (ret) {
0880 case BATADV_HARDIF_BCAST_NORECIPIENT:
0881 type = "no neighbor";
0882 break;
0883 case BATADV_HARDIF_BCAST_DUPFWD:
0884 type = "single neighbor is source";
0885 break;
0886 case BATADV_HARDIF_BCAST_DUPORIG:
0887 type = "single neighbor is originator";
0888 break;
0889 default:
0890 type = "unknown";
0891 }
0892
0893 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
0894 "BCAST packet from orig %pM on %s suppressed: %s\n",
0895 bcast_packet->orig,
0896 if_out->net_dev->name, type);
0897
0898 return true;
0899 }
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918 static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
0919 struct sk_buff *skb,
0920 unsigned long delay,
0921 bool own_packet)
0922 {
0923 struct batadv_hard_iface *hard_iface;
0924 struct batadv_hard_iface *primary_if;
0925 int ret = NETDEV_TX_OK;
0926
0927 primary_if = batadv_primary_if_get_selected(bat_priv);
0928 if (!primary_if)
0929 return NETDEV_TX_BUSY;
0930
0931 rcu_read_lock();
0932 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
0933 if (hard_iface->soft_iface != bat_priv->soft_iface)
0934 continue;
0935
0936 if (!kref_get_unless_zero(&hard_iface->refcount))
0937 continue;
0938
0939 if (batadv_send_no_broadcast(bat_priv, skb, own_packet,
0940 hard_iface)) {
0941 batadv_hardif_put(hard_iface);
0942 continue;
0943 }
0944
0945 ret = batadv_forw_bcast_packet_if(bat_priv, skb, delay,
0946 own_packet, primary_if,
0947 hard_iface);
0948 batadv_hardif_put(hard_iface);
0949
0950 if (ret == NETDEV_TX_BUSY)
0951 break;
0952 }
0953 rcu_read_unlock();
0954
0955 batadv_hardif_put(primary_if);
0956 return ret;
0957 }
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972 int batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
0973 struct sk_buff *skb,
0974 unsigned long delay,
0975 bool own_packet)
0976 {
0977 return __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
0978 }
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993 void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
0994 struct sk_buff *skb,
0995 unsigned long delay,
0996 bool own_packet)
0997 {
0998 __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
0999 consume_skb(skb);
1000 }
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 static bool
1016 batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet)
1017 {
1018 return BATADV_SKB_CB(forw_packet->skb)->num_bcasts;
1019 }
1020
1021
1022
1023
1024
1025
1026 static void
1027 batadv_forw_packet_bcasts_dec(struct batadv_forw_packet *forw_packet)
1028 {
1029 BATADV_SKB_CB(forw_packet->skb)->num_bcasts--;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038 bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet)
1039 {
1040 unsigned char num_bcasts = BATADV_SKB_CB(forw_packet->skb)->num_bcasts;
1041
1042 return num_bcasts != forw_packet->if_outgoing->num_bcasts;
1043 }
1044
1045
1046
1047
1048
1049
1050
1051 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
1052 {
1053 unsigned long send_time = jiffies + msecs_to_jiffies(5);
1054 struct batadv_forw_packet *forw_packet;
1055 struct delayed_work *delayed_work;
1056 struct batadv_priv *bat_priv;
1057 struct sk_buff *skb1;
1058 bool dropped = false;
1059
1060 delayed_work = to_delayed_work(work);
1061 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
1062 delayed_work);
1063 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
1064
1065 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
1066 dropped = true;
1067 goto out;
1068 }
1069
1070 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
1071 dropped = true;
1072 goto out;
1073 }
1074
1075
1076 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
1077 if (!skb1)
1078 goto out;
1079
1080 batadv_send_broadcast_skb(skb1, forw_packet->if_outgoing);
1081 batadv_forw_packet_bcasts_dec(forw_packet);
1082
1083 if (batadv_forw_packet_bcasts_left(forw_packet)) {
1084 batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
1085 send_time);
1086 return;
1087 }
1088
1089 out:
1090
1091 if (batadv_forw_packet_steal(forw_packet,
1092 &bat_priv->forw_bcast_list_lock))
1093 batadv_forw_packet_free(forw_packet, dropped);
1094 }
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 void
1108 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
1109 const struct batadv_hard_iface *hard_iface)
1110 {
1111 struct hlist_head head = HLIST_HEAD_INIT;
1112
1113 if (hard_iface)
1114 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1115 "%s(): %s\n",
1116 __func__, hard_iface->net_dev->name);
1117 else
1118 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1119 "%s()\n", __func__);
1120
1121
1122 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
1123 batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head,
1124 hard_iface);
1125 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
1126
1127
1128 spin_lock_bh(&bat_priv->forw_bat_list_lock);
1129 batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head,
1130 hard_iface);
1131 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
1132
1133
1134 batadv_forw_packet_list_free(&head);
1135 }