0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0015
0016 #include <linux/slab.h>
0017 #include <linux/kmemleak.h>
0018 #include <linux/types.h>
0019 #include <linux/kernel.h>
0020 #include <linux/module.h>
0021 #include <linux/socket.h>
0022 #include <linux/netdevice.h>
0023 #include <linux/proc_fs.h>
0024 #ifdef CONFIG_SYSCTL
0025 #include <linux/sysctl.h>
0026 #endif
0027 #include <linux/times.h>
0028 #include <net/net_namespace.h>
0029 #include <net/neighbour.h>
0030 #include <net/arp.h>
0031 #include <net/dst.h>
0032 #include <net/sock.h>
0033 #include <net/netevent.h>
0034 #include <net/netlink.h>
0035 #include <linux/rtnetlink.h>
0036 #include <linux/random.h>
0037 #include <linux/string.h>
0038 #include <linux/log2.h>
0039 #include <linux/inetdevice.h>
0040 #include <net/addrconf.h>
0041
0042 #include <trace/events/neigh.h>
0043
0044 #define NEIGH_DEBUG 1
0045 #define neigh_dbg(level, fmt, ...) \
0046 do { \
0047 if (level <= NEIGH_DEBUG) \
0048 pr_debug(fmt, ##__VA_ARGS__); \
0049 } while (0)
0050
0051 #define PNEIGH_HASHMASK 0xF
0052
0053 static void neigh_timer_handler(struct timer_list *t);
0054 static void __neigh_notify(struct neighbour *n, int type, int flags,
0055 u32 pid);
0056 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
0057 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
0058 struct net_device *dev);
0059
0060 #ifdef CONFIG_PROC_FS
0061 static const struct seq_operations neigh_stat_seq_ops;
0062 #endif
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
0093 {
0094 kfree_skb(skb);
0095 return -ENETDOWN;
0096 }
0097
0098 static void neigh_cleanup_and_release(struct neighbour *neigh)
0099 {
0100 trace_neigh_cleanup_and_release(neigh, 0);
0101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
0102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
0103 neigh_release(neigh);
0104 }
0105
0106
0107
0108
0109
0110
0111
0112 unsigned long neigh_rand_reach_time(unsigned long base)
0113 {
0114 return base ? (prandom_u32() % base) + (base >> 1) : 0;
0115 }
0116 EXPORT_SYMBOL(neigh_rand_reach_time);
0117
0118 static void neigh_mark_dead(struct neighbour *n)
0119 {
0120 n->dead = 1;
0121 if (!list_empty(&n->gc_list)) {
0122 list_del_init(&n->gc_list);
0123 atomic_dec(&n->tbl->gc_entries);
0124 }
0125 if (!list_empty(&n->managed_list))
0126 list_del_init(&n->managed_list);
0127 }
0128
0129 static void neigh_update_gc_list(struct neighbour *n)
0130 {
0131 bool on_gc_list, exempt_from_gc;
0132
0133 write_lock_bh(&n->tbl->lock);
0134 write_lock(&n->lock);
0135 if (n->dead)
0136 goto out;
0137
0138
0139
0140
0141 exempt_from_gc = n->nud_state & NUD_PERMANENT ||
0142 n->flags & NTF_EXT_LEARNED;
0143 on_gc_list = !list_empty(&n->gc_list);
0144
0145 if (exempt_from_gc && on_gc_list) {
0146 list_del_init(&n->gc_list);
0147 atomic_dec(&n->tbl->gc_entries);
0148 } else if (!exempt_from_gc && !on_gc_list) {
0149
0150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
0151 atomic_inc(&n->tbl->gc_entries);
0152 }
0153 out:
0154 write_unlock(&n->lock);
0155 write_unlock_bh(&n->tbl->lock);
0156 }
0157
0158 static void neigh_update_managed_list(struct neighbour *n)
0159 {
0160 bool on_managed_list, add_to_managed;
0161
0162 write_lock_bh(&n->tbl->lock);
0163 write_lock(&n->lock);
0164 if (n->dead)
0165 goto out;
0166
0167 add_to_managed = n->flags & NTF_MANAGED;
0168 on_managed_list = !list_empty(&n->managed_list);
0169
0170 if (!add_to_managed && on_managed_list)
0171 list_del_init(&n->managed_list);
0172 else if (add_to_managed && !on_managed_list)
0173 list_add_tail(&n->managed_list, &n->tbl->managed_list);
0174 out:
0175 write_unlock(&n->lock);
0176 write_unlock_bh(&n->tbl->lock);
0177 }
0178
0179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
0180 bool *gc_update, bool *managed_update)
0181 {
0182 u32 ndm_flags, old_flags = neigh->flags;
0183
0184 if (!(flags & NEIGH_UPDATE_F_ADMIN))
0185 return;
0186
0187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
0188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
0189
0190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
0191 if (ndm_flags & NTF_EXT_LEARNED)
0192 neigh->flags |= NTF_EXT_LEARNED;
0193 else
0194 neigh->flags &= ~NTF_EXT_LEARNED;
0195 *notify = 1;
0196 *gc_update = true;
0197 }
0198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
0199 if (ndm_flags & NTF_MANAGED)
0200 neigh->flags |= NTF_MANAGED;
0201 else
0202 neigh->flags &= ~NTF_MANAGED;
0203 *notify = 1;
0204 *managed_update = true;
0205 }
0206 }
0207
0208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
0209 struct neigh_table *tbl)
0210 {
0211 bool retval = false;
0212
0213 write_lock(&n->lock);
0214 if (refcount_read(&n->refcnt) == 1) {
0215 struct neighbour *neigh;
0216
0217 neigh = rcu_dereference_protected(n->next,
0218 lockdep_is_held(&tbl->lock));
0219 rcu_assign_pointer(*np, neigh);
0220 neigh_mark_dead(n);
0221 retval = true;
0222 }
0223 write_unlock(&n->lock);
0224 if (retval)
0225 neigh_cleanup_and_release(n);
0226 return retval;
0227 }
0228
0229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
0230 {
0231 struct neigh_hash_table *nht;
0232 void *pkey = ndel->primary_key;
0233 u32 hash_val;
0234 struct neighbour *n;
0235 struct neighbour __rcu **np;
0236
0237 nht = rcu_dereference_protected(tbl->nht,
0238 lockdep_is_held(&tbl->lock));
0239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
0240 hash_val = hash_val >> (32 - nht->hash_shift);
0241
0242 np = &nht->hash_buckets[hash_val];
0243 while ((n = rcu_dereference_protected(*np,
0244 lockdep_is_held(&tbl->lock)))) {
0245 if (n == ndel)
0246 return neigh_del(n, np, tbl);
0247 np = &n->next;
0248 }
0249 return false;
0250 }
0251
0252 static int neigh_forced_gc(struct neigh_table *tbl)
0253 {
0254 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
0255 unsigned long tref = jiffies - 5 * HZ;
0256 struct neighbour *n, *tmp;
0257 int shrunk = 0;
0258
0259 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
0260
0261 write_lock_bh(&tbl->lock);
0262
0263 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
0264 if (refcount_read(&n->refcnt) == 1) {
0265 bool remove = false;
0266
0267 write_lock(&n->lock);
0268 if ((n->nud_state == NUD_FAILED) ||
0269 (n->nud_state == NUD_NOARP) ||
0270 (tbl->is_multicast &&
0271 tbl->is_multicast(n->primary_key)) ||
0272 time_after(tref, n->updated))
0273 remove = true;
0274 write_unlock(&n->lock);
0275
0276 if (remove && neigh_remove_one(n, tbl))
0277 shrunk++;
0278 if (shrunk >= max_clean)
0279 break;
0280 }
0281 }
0282
0283 tbl->last_flush = jiffies;
0284
0285 write_unlock_bh(&tbl->lock);
0286
0287 return shrunk;
0288 }
0289
0290 static void neigh_add_timer(struct neighbour *n, unsigned long when)
0291 {
0292 neigh_hold(n);
0293 if (unlikely(mod_timer(&n->timer, when))) {
0294 printk("NEIGH: BUG, double timer add, state is %x\n",
0295 n->nud_state);
0296 dump_stack();
0297 }
0298 }
0299
0300 static int neigh_del_timer(struct neighbour *n)
0301 {
0302 if ((n->nud_state & NUD_IN_TIMER) &&
0303 del_timer(&n->timer)) {
0304 neigh_release(n);
0305 return 1;
0306 }
0307 return 0;
0308 }
0309
0310 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
0311 {
0312 struct sk_buff_head tmp;
0313 unsigned long flags;
0314 struct sk_buff *skb;
0315
0316 skb_queue_head_init(&tmp);
0317 spin_lock_irqsave(&list->lock, flags);
0318 skb = skb_peek(list);
0319 while (skb != NULL) {
0320 struct sk_buff *skb_next = skb_peek_next(skb, list);
0321 struct net_device *dev = skb->dev;
0322
0323 if (net == NULL || net_eq(dev_net(dev), net)) {
0324 struct in_device *in_dev;
0325
0326 rcu_read_lock();
0327 in_dev = __in_dev_get_rcu(dev);
0328 if (in_dev)
0329 in_dev->arp_parms->qlen--;
0330 rcu_read_unlock();
0331 __skb_unlink(skb, list);
0332 __skb_queue_tail(&tmp, skb);
0333 }
0334 skb = skb_next;
0335 }
0336 spin_unlock_irqrestore(&list->lock, flags);
0337
0338 while ((skb = __skb_dequeue(&tmp))) {
0339 dev_put(skb->dev);
0340 kfree_skb(skb);
0341 }
0342 }
0343
0344 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
0345 bool skip_perm)
0346 {
0347 int i;
0348 struct neigh_hash_table *nht;
0349
0350 nht = rcu_dereference_protected(tbl->nht,
0351 lockdep_is_held(&tbl->lock));
0352
0353 for (i = 0; i < (1 << nht->hash_shift); i++) {
0354 struct neighbour *n;
0355 struct neighbour __rcu **np = &nht->hash_buckets[i];
0356
0357 while ((n = rcu_dereference_protected(*np,
0358 lockdep_is_held(&tbl->lock))) != NULL) {
0359 if (dev && n->dev != dev) {
0360 np = &n->next;
0361 continue;
0362 }
0363 if (skip_perm && n->nud_state & NUD_PERMANENT) {
0364 np = &n->next;
0365 continue;
0366 }
0367 rcu_assign_pointer(*np,
0368 rcu_dereference_protected(n->next,
0369 lockdep_is_held(&tbl->lock)));
0370 write_lock(&n->lock);
0371 neigh_del_timer(n);
0372 neigh_mark_dead(n);
0373 if (refcount_read(&n->refcnt) != 1) {
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 __skb_queue_purge(&n->arp_queue);
0384 n->arp_queue_len_bytes = 0;
0385 n->output = neigh_blackhole;
0386 if (n->nud_state & NUD_VALID)
0387 n->nud_state = NUD_NOARP;
0388 else
0389 n->nud_state = NUD_NONE;
0390 neigh_dbg(2, "neigh %p is stray\n", n);
0391 }
0392 write_unlock(&n->lock);
0393 neigh_cleanup_and_release(n);
0394 }
0395 }
0396 }
0397
0398 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
0399 {
0400 write_lock_bh(&tbl->lock);
0401 neigh_flush_dev(tbl, dev, false);
0402 write_unlock_bh(&tbl->lock);
0403 }
0404 EXPORT_SYMBOL(neigh_changeaddr);
0405
0406 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
0407 bool skip_perm)
0408 {
0409 write_lock_bh(&tbl->lock);
0410 neigh_flush_dev(tbl, dev, skip_perm);
0411 pneigh_ifdown_and_unlock(tbl, dev);
0412 pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
0413 if (skb_queue_empty_lockless(&tbl->proxy_queue))
0414 del_timer_sync(&tbl->proxy_timer);
0415 return 0;
0416 }
0417
0418 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
0419 {
0420 __neigh_ifdown(tbl, dev, true);
0421 return 0;
0422 }
0423 EXPORT_SYMBOL(neigh_carrier_down);
0424
0425 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
0426 {
0427 __neigh_ifdown(tbl, dev, false);
0428 return 0;
0429 }
0430 EXPORT_SYMBOL(neigh_ifdown);
0431
0432 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
0433 struct net_device *dev,
0434 u32 flags, bool exempt_from_gc)
0435 {
0436 struct neighbour *n = NULL;
0437 unsigned long now = jiffies;
0438 int entries;
0439
0440 if (exempt_from_gc)
0441 goto do_alloc;
0442
0443 entries = atomic_inc_return(&tbl->gc_entries) - 1;
0444 if (entries >= tbl->gc_thresh3 ||
0445 (entries >= tbl->gc_thresh2 &&
0446 time_after(now, tbl->last_flush + 5 * HZ))) {
0447 if (!neigh_forced_gc(tbl) &&
0448 entries >= tbl->gc_thresh3) {
0449 net_info_ratelimited("%s: neighbor table overflow!\n",
0450 tbl->id);
0451 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
0452 goto out_entries;
0453 }
0454 }
0455
0456 do_alloc:
0457 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
0458 if (!n)
0459 goto out_entries;
0460
0461 __skb_queue_head_init(&n->arp_queue);
0462 rwlock_init(&n->lock);
0463 seqlock_init(&n->ha_lock);
0464 n->updated = n->used = now;
0465 n->nud_state = NUD_NONE;
0466 n->output = neigh_blackhole;
0467 n->flags = flags;
0468 seqlock_init(&n->hh.hh_lock);
0469 n->parms = neigh_parms_clone(&tbl->parms);
0470 timer_setup(&n->timer, neigh_timer_handler, 0);
0471
0472 NEIGH_CACHE_STAT_INC(tbl, allocs);
0473 n->tbl = tbl;
0474 refcount_set(&n->refcnt, 1);
0475 n->dead = 1;
0476 INIT_LIST_HEAD(&n->gc_list);
0477 INIT_LIST_HEAD(&n->managed_list);
0478
0479 atomic_inc(&tbl->entries);
0480 out:
0481 return n;
0482
0483 out_entries:
0484 if (!exempt_from_gc)
0485 atomic_dec(&tbl->gc_entries);
0486 goto out;
0487 }
0488
0489 static void neigh_get_hash_rnd(u32 *x)
0490 {
0491 *x = get_random_u32() | 1;
0492 }
0493
0494 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
0495 {
0496 size_t size = (1 << shift) * sizeof(struct neighbour *);
0497 struct neigh_hash_table *ret;
0498 struct neighbour __rcu **buckets;
0499 int i;
0500
0501 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
0502 if (!ret)
0503 return NULL;
0504 if (size <= PAGE_SIZE) {
0505 buckets = kzalloc(size, GFP_ATOMIC);
0506 } else {
0507 buckets = (struct neighbour __rcu **)
0508 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
0509 get_order(size));
0510 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
0511 }
0512 if (!buckets) {
0513 kfree(ret);
0514 return NULL;
0515 }
0516 ret->hash_buckets = buckets;
0517 ret->hash_shift = shift;
0518 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
0519 neigh_get_hash_rnd(&ret->hash_rnd[i]);
0520 return ret;
0521 }
0522
0523 static void neigh_hash_free_rcu(struct rcu_head *head)
0524 {
0525 struct neigh_hash_table *nht = container_of(head,
0526 struct neigh_hash_table,
0527 rcu);
0528 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
0529 struct neighbour __rcu **buckets = nht->hash_buckets;
0530
0531 if (size <= PAGE_SIZE) {
0532 kfree(buckets);
0533 } else {
0534 kmemleak_free(buckets);
0535 free_pages((unsigned long)buckets, get_order(size));
0536 }
0537 kfree(nht);
0538 }
0539
0540 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
0541 unsigned long new_shift)
0542 {
0543 unsigned int i, hash;
0544 struct neigh_hash_table *new_nht, *old_nht;
0545
0546 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
0547
0548 old_nht = rcu_dereference_protected(tbl->nht,
0549 lockdep_is_held(&tbl->lock));
0550 new_nht = neigh_hash_alloc(new_shift);
0551 if (!new_nht)
0552 return old_nht;
0553
0554 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
0555 struct neighbour *n, *next;
0556
0557 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
0558 lockdep_is_held(&tbl->lock));
0559 n != NULL;
0560 n = next) {
0561 hash = tbl->hash(n->primary_key, n->dev,
0562 new_nht->hash_rnd);
0563
0564 hash >>= (32 - new_nht->hash_shift);
0565 next = rcu_dereference_protected(n->next,
0566 lockdep_is_held(&tbl->lock));
0567
0568 rcu_assign_pointer(n->next,
0569 rcu_dereference_protected(
0570 new_nht->hash_buckets[hash],
0571 lockdep_is_held(&tbl->lock)));
0572 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
0573 }
0574 }
0575
0576 rcu_assign_pointer(tbl->nht, new_nht);
0577 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
0578 return new_nht;
0579 }
0580
0581 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
0582 struct net_device *dev)
0583 {
0584 struct neighbour *n;
0585
0586 NEIGH_CACHE_STAT_INC(tbl, lookups);
0587
0588 rcu_read_lock_bh();
0589 n = __neigh_lookup_noref(tbl, pkey, dev);
0590 if (n) {
0591 if (!refcount_inc_not_zero(&n->refcnt))
0592 n = NULL;
0593 NEIGH_CACHE_STAT_INC(tbl, hits);
0594 }
0595
0596 rcu_read_unlock_bh();
0597 return n;
0598 }
0599 EXPORT_SYMBOL(neigh_lookup);
0600
0601 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
0602 const void *pkey)
0603 {
0604 struct neighbour *n;
0605 unsigned int key_len = tbl->key_len;
0606 u32 hash_val;
0607 struct neigh_hash_table *nht;
0608
0609 NEIGH_CACHE_STAT_INC(tbl, lookups);
0610
0611 rcu_read_lock_bh();
0612 nht = rcu_dereference_bh(tbl->nht);
0613 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
0614
0615 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
0616 n != NULL;
0617 n = rcu_dereference_bh(n->next)) {
0618 if (!memcmp(n->primary_key, pkey, key_len) &&
0619 net_eq(dev_net(n->dev), net)) {
0620 if (!refcount_inc_not_zero(&n->refcnt))
0621 n = NULL;
0622 NEIGH_CACHE_STAT_INC(tbl, hits);
0623 break;
0624 }
0625 }
0626
0627 rcu_read_unlock_bh();
0628 return n;
0629 }
0630 EXPORT_SYMBOL(neigh_lookup_nodev);
0631
0632 static struct neighbour *
0633 ___neigh_create(struct neigh_table *tbl, const void *pkey,
0634 struct net_device *dev, u32 flags,
0635 bool exempt_from_gc, bool want_ref)
0636 {
0637 u32 hash_val, key_len = tbl->key_len;
0638 struct neighbour *n1, *rc, *n;
0639 struct neigh_hash_table *nht;
0640 int error;
0641
0642 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
0643 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
0644 if (!n) {
0645 rc = ERR_PTR(-ENOBUFS);
0646 goto out;
0647 }
0648
0649 memcpy(n->primary_key, pkey, key_len);
0650 n->dev = dev;
0651 netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
0652
0653
0654 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
0655 rc = ERR_PTR(error);
0656 goto out_neigh_release;
0657 }
0658
0659 if (dev->netdev_ops->ndo_neigh_construct) {
0660 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
0661 if (error < 0) {
0662 rc = ERR_PTR(error);
0663 goto out_neigh_release;
0664 }
0665 }
0666
0667
0668 if (n->parms->neigh_setup &&
0669 (error = n->parms->neigh_setup(n)) < 0) {
0670 rc = ERR_PTR(error);
0671 goto out_neigh_release;
0672 }
0673
0674 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
0675
0676 write_lock_bh(&tbl->lock);
0677 nht = rcu_dereference_protected(tbl->nht,
0678 lockdep_is_held(&tbl->lock));
0679
0680 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
0681 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
0682
0683 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
0684
0685 if (n->parms->dead) {
0686 rc = ERR_PTR(-EINVAL);
0687 goto out_tbl_unlock;
0688 }
0689
0690 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
0691 lockdep_is_held(&tbl->lock));
0692 n1 != NULL;
0693 n1 = rcu_dereference_protected(n1->next,
0694 lockdep_is_held(&tbl->lock))) {
0695 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
0696 if (want_ref)
0697 neigh_hold(n1);
0698 rc = n1;
0699 goto out_tbl_unlock;
0700 }
0701 }
0702
0703 n->dead = 0;
0704 if (!exempt_from_gc)
0705 list_add_tail(&n->gc_list, &n->tbl->gc_list);
0706 if (n->flags & NTF_MANAGED)
0707 list_add_tail(&n->managed_list, &n->tbl->managed_list);
0708 if (want_ref)
0709 neigh_hold(n);
0710 rcu_assign_pointer(n->next,
0711 rcu_dereference_protected(nht->hash_buckets[hash_val],
0712 lockdep_is_held(&tbl->lock)));
0713 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
0714 write_unlock_bh(&tbl->lock);
0715 neigh_dbg(2, "neigh %p is created\n", n);
0716 rc = n;
0717 out:
0718 return rc;
0719 out_tbl_unlock:
0720 write_unlock_bh(&tbl->lock);
0721 out_neigh_release:
0722 if (!exempt_from_gc)
0723 atomic_dec(&tbl->gc_entries);
0724 neigh_release(n);
0725 goto out;
0726 }
0727
0728 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
0729 struct net_device *dev, bool want_ref)
0730 {
0731 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
0732 }
0733 EXPORT_SYMBOL(__neigh_create);
0734
0735 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
0736 {
0737 u32 hash_val = *(u32 *)(pkey + key_len - 4);
0738 hash_val ^= (hash_val >> 16);
0739 hash_val ^= hash_val >> 8;
0740 hash_val ^= hash_val >> 4;
0741 hash_val &= PNEIGH_HASHMASK;
0742 return hash_val;
0743 }
0744
0745 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
0746 struct net *net,
0747 const void *pkey,
0748 unsigned int key_len,
0749 struct net_device *dev)
0750 {
0751 while (n) {
0752 if (!memcmp(n->key, pkey, key_len) &&
0753 net_eq(pneigh_net(n), net) &&
0754 (n->dev == dev || !n->dev))
0755 return n;
0756 n = n->next;
0757 }
0758 return NULL;
0759 }
0760
0761 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
0762 struct net *net, const void *pkey, struct net_device *dev)
0763 {
0764 unsigned int key_len = tbl->key_len;
0765 u32 hash_val = pneigh_hash(pkey, key_len);
0766
0767 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
0768 net, pkey, key_len, dev);
0769 }
0770 EXPORT_SYMBOL_GPL(__pneigh_lookup);
0771
0772 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
0773 struct net *net, const void *pkey,
0774 struct net_device *dev, int creat)
0775 {
0776 struct pneigh_entry *n;
0777 unsigned int key_len = tbl->key_len;
0778 u32 hash_val = pneigh_hash(pkey, key_len);
0779
0780 read_lock_bh(&tbl->lock);
0781 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
0782 net, pkey, key_len, dev);
0783 read_unlock_bh(&tbl->lock);
0784
0785 if (n || !creat)
0786 goto out;
0787
0788 ASSERT_RTNL();
0789
0790 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
0791 if (!n)
0792 goto out;
0793
0794 write_pnet(&n->net, net);
0795 memcpy(n->key, pkey, key_len);
0796 n->dev = dev;
0797 netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
0798
0799 if (tbl->pconstructor && tbl->pconstructor(n)) {
0800 netdev_put(dev, &n->dev_tracker);
0801 kfree(n);
0802 n = NULL;
0803 goto out;
0804 }
0805
0806 write_lock_bh(&tbl->lock);
0807 n->next = tbl->phash_buckets[hash_val];
0808 tbl->phash_buckets[hash_val] = n;
0809 write_unlock_bh(&tbl->lock);
0810 out:
0811 return n;
0812 }
0813 EXPORT_SYMBOL(pneigh_lookup);
0814
0815
0816 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
0817 struct net_device *dev)
0818 {
0819 struct pneigh_entry *n, **np;
0820 unsigned int key_len = tbl->key_len;
0821 u32 hash_val = pneigh_hash(pkey, key_len);
0822
0823 write_lock_bh(&tbl->lock);
0824 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
0825 np = &n->next) {
0826 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
0827 net_eq(pneigh_net(n), net)) {
0828 *np = n->next;
0829 write_unlock_bh(&tbl->lock);
0830 if (tbl->pdestructor)
0831 tbl->pdestructor(n);
0832 netdev_put(n->dev, &n->dev_tracker);
0833 kfree(n);
0834 return 0;
0835 }
0836 }
0837 write_unlock_bh(&tbl->lock);
0838 return -ENOENT;
0839 }
0840
0841 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
0842 struct net_device *dev)
0843 {
0844 struct pneigh_entry *n, **np, *freelist = NULL;
0845 u32 h;
0846
0847 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
0848 np = &tbl->phash_buckets[h];
0849 while ((n = *np) != NULL) {
0850 if (!dev || n->dev == dev) {
0851 *np = n->next;
0852 n->next = freelist;
0853 freelist = n;
0854 continue;
0855 }
0856 np = &n->next;
0857 }
0858 }
0859 write_unlock_bh(&tbl->lock);
0860 while ((n = freelist)) {
0861 freelist = n->next;
0862 n->next = NULL;
0863 if (tbl->pdestructor)
0864 tbl->pdestructor(n);
0865 netdev_put(n->dev, &n->dev_tracker);
0866 kfree(n);
0867 }
0868 return -ENOENT;
0869 }
0870
0871 static void neigh_parms_destroy(struct neigh_parms *parms);
0872
0873 static inline void neigh_parms_put(struct neigh_parms *parms)
0874 {
0875 if (refcount_dec_and_test(&parms->refcnt))
0876 neigh_parms_destroy(parms);
0877 }
0878
0879
0880
0881
0882
0883 void neigh_destroy(struct neighbour *neigh)
0884 {
0885 struct net_device *dev = neigh->dev;
0886
0887 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
0888
0889 if (!neigh->dead) {
0890 pr_warn("Destroying alive neighbour %p\n", neigh);
0891 dump_stack();
0892 return;
0893 }
0894
0895 if (neigh_del_timer(neigh))
0896 pr_warn("Impossible event\n");
0897
0898 write_lock_bh(&neigh->lock);
0899 __skb_queue_purge(&neigh->arp_queue);
0900 write_unlock_bh(&neigh->lock);
0901 neigh->arp_queue_len_bytes = 0;
0902
0903 if (dev->netdev_ops->ndo_neigh_destroy)
0904 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
0905
0906 netdev_put(dev, &neigh->dev_tracker);
0907 neigh_parms_put(neigh->parms);
0908
0909 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
0910
0911 atomic_dec(&neigh->tbl->entries);
0912 kfree_rcu(neigh, rcu);
0913 }
0914 EXPORT_SYMBOL(neigh_destroy);
0915
0916
0917
0918
0919
0920
0921 static void neigh_suspect(struct neighbour *neigh)
0922 {
0923 neigh_dbg(2, "neigh %p is suspected\n", neigh);
0924
0925 neigh->output = neigh->ops->output;
0926 }
0927
0928
0929
0930
0931
0932
0933 static void neigh_connect(struct neighbour *neigh)
0934 {
0935 neigh_dbg(2, "neigh %p is connected\n", neigh);
0936
0937 neigh->output = neigh->ops->connected_output;
0938 }
0939
0940 static void neigh_periodic_work(struct work_struct *work)
0941 {
0942 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
0943 struct neighbour *n;
0944 struct neighbour __rcu **np;
0945 unsigned int i;
0946 struct neigh_hash_table *nht;
0947
0948 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
0949
0950 write_lock_bh(&tbl->lock);
0951 nht = rcu_dereference_protected(tbl->nht,
0952 lockdep_is_held(&tbl->lock));
0953
0954
0955
0956
0957
0958 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
0959 struct neigh_parms *p;
0960 tbl->last_rand = jiffies;
0961 list_for_each_entry(p, &tbl->parms_list, list)
0962 p->reachable_time =
0963 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
0964 }
0965
0966 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
0967 goto out;
0968
0969 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
0970 np = &nht->hash_buckets[i];
0971
0972 while ((n = rcu_dereference_protected(*np,
0973 lockdep_is_held(&tbl->lock))) != NULL) {
0974 unsigned int state;
0975
0976 write_lock(&n->lock);
0977
0978 state = n->nud_state;
0979 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
0980 (n->flags & NTF_EXT_LEARNED)) {
0981 write_unlock(&n->lock);
0982 goto next_elt;
0983 }
0984
0985 if (time_before(n->used, n->confirmed))
0986 n->used = n->confirmed;
0987
0988 if (refcount_read(&n->refcnt) == 1 &&
0989 (state == NUD_FAILED ||
0990 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
0991 *np = n->next;
0992 neigh_mark_dead(n);
0993 write_unlock(&n->lock);
0994 neigh_cleanup_and_release(n);
0995 continue;
0996 }
0997 write_unlock(&n->lock);
0998
0999 next_elt:
1000 np = &n->next;
1001 }
1002
1003
1004
1005
1006 write_unlock_bh(&tbl->lock);
1007 cond_resched();
1008 write_lock_bh(&tbl->lock);
1009 nht = rcu_dereference_protected(tbl->nht,
1010 lockdep_is_held(&tbl->lock));
1011 }
1012 out:
1013
1014
1015
1016
1017 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1018 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
1019 write_unlock_bh(&tbl->lock);
1020 }
1021
1022 static __inline__ int neigh_max_probes(struct neighbour *n)
1023 {
1024 struct neigh_parms *p = n->parms;
1025 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1026 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1027 NEIGH_VAR(p, MCAST_PROBES));
1028 }
1029
1030 static void neigh_invalidate(struct neighbour *neigh)
1031 __releases(neigh->lock)
1032 __acquires(neigh->lock)
1033 {
1034 struct sk_buff *skb;
1035
1036 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1037 neigh_dbg(2, "neigh %p is failed\n", neigh);
1038 neigh->updated = jiffies;
1039
1040
1041
1042
1043
1044
1045 while (neigh->nud_state == NUD_FAILED &&
1046 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1047 write_unlock(&neigh->lock);
1048 neigh->ops->error_report(neigh, skb);
1049 write_lock(&neigh->lock);
1050 }
1051 __skb_queue_purge(&neigh->arp_queue);
1052 neigh->arp_queue_len_bytes = 0;
1053 }
1054
1055 static void neigh_probe(struct neighbour *neigh)
1056 __releases(neigh->lock)
1057 {
1058 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1059
1060 if (skb)
1061 skb = skb_clone(skb, GFP_ATOMIC);
1062 write_unlock(&neigh->lock);
1063 if (neigh->ops->solicit)
1064 neigh->ops->solicit(neigh, skb);
1065 atomic_inc(&neigh->probes);
1066 consume_skb(skb);
1067 }
1068
1069
1070
1071 static void neigh_timer_handler(struct timer_list *t)
1072 {
1073 unsigned long now, next;
1074 struct neighbour *neigh = from_timer(neigh, t, timer);
1075 unsigned int state;
1076 int notify = 0;
1077
1078 write_lock(&neigh->lock);
1079
1080 state = neigh->nud_state;
1081 now = jiffies;
1082 next = now + HZ;
1083
1084 if (!(state & NUD_IN_TIMER))
1085 goto out;
1086
1087 if (state & NUD_REACHABLE) {
1088 if (time_before_eq(now,
1089 neigh->confirmed + neigh->parms->reachable_time)) {
1090 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1091 next = neigh->confirmed + neigh->parms->reachable_time;
1092 } else if (time_before_eq(now,
1093 neigh->used +
1094 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1095 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1096 neigh->nud_state = NUD_DELAY;
1097 neigh->updated = jiffies;
1098 neigh_suspect(neigh);
1099 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1100 } else {
1101 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1102 neigh->nud_state = NUD_STALE;
1103 neigh->updated = jiffies;
1104 neigh_suspect(neigh);
1105 notify = 1;
1106 }
1107 } else if (state & NUD_DELAY) {
1108 if (time_before_eq(now,
1109 neigh->confirmed +
1110 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1111 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1112 neigh->nud_state = NUD_REACHABLE;
1113 neigh->updated = jiffies;
1114 neigh_connect(neigh);
1115 notify = 1;
1116 next = neigh->confirmed + neigh->parms->reachable_time;
1117 } else {
1118 neigh_dbg(2, "neigh %p is probed\n", neigh);
1119 neigh->nud_state = NUD_PROBE;
1120 neigh->updated = jiffies;
1121 atomic_set(&neigh->probes, 0);
1122 notify = 1;
1123 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1124 HZ/100);
1125 }
1126 } else {
1127
1128 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1129 }
1130
1131 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1132 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1133 neigh->nud_state = NUD_FAILED;
1134 notify = 1;
1135 neigh_invalidate(neigh);
1136 goto out;
1137 }
1138
1139 if (neigh->nud_state & NUD_IN_TIMER) {
1140 if (time_before(next, jiffies + HZ/100))
1141 next = jiffies + HZ/100;
1142 if (!mod_timer(&neigh->timer, next))
1143 neigh_hold(neigh);
1144 }
1145 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1146 neigh_probe(neigh);
1147 } else {
1148 out:
1149 write_unlock(&neigh->lock);
1150 }
1151
1152 if (notify)
1153 neigh_update_notify(neigh, 0);
1154
1155 trace_neigh_timer_handler(neigh, 0);
1156
1157 neigh_release(neigh);
1158 }
1159
1160 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1161 const bool immediate_ok)
1162 {
1163 int rc;
1164 bool immediate_probe = false;
1165
1166 write_lock_bh(&neigh->lock);
1167
1168 rc = 0;
1169 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1170 goto out_unlock_bh;
1171 if (neigh->dead)
1172 goto out_dead;
1173
1174 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1175 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1176 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1177 unsigned long next, now = jiffies;
1178
1179 atomic_set(&neigh->probes,
1180 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1181 neigh_del_timer(neigh);
1182 neigh->nud_state = NUD_INCOMPLETE;
1183 neigh->updated = now;
1184 if (!immediate_ok) {
1185 next = now + 1;
1186 } else {
1187 immediate_probe = true;
1188 next = now + max(NEIGH_VAR(neigh->parms,
1189 RETRANS_TIME),
1190 HZ / 100);
1191 }
1192 neigh_add_timer(neigh, next);
1193 } else {
1194 neigh->nud_state = NUD_FAILED;
1195 neigh->updated = jiffies;
1196 write_unlock_bh(&neigh->lock);
1197
1198 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1199 return 1;
1200 }
1201 } else if (neigh->nud_state & NUD_STALE) {
1202 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1203 neigh_del_timer(neigh);
1204 neigh->nud_state = NUD_DELAY;
1205 neigh->updated = jiffies;
1206 neigh_add_timer(neigh, jiffies +
1207 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1208 }
1209
1210 if (neigh->nud_state == NUD_INCOMPLETE) {
1211 if (skb) {
1212 while (neigh->arp_queue_len_bytes + skb->truesize >
1213 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1214 struct sk_buff *buff;
1215
1216 buff = __skb_dequeue(&neigh->arp_queue);
1217 if (!buff)
1218 break;
1219 neigh->arp_queue_len_bytes -= buff->truesize;
1220 kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1221 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1222 }
1223 skb_dst_force(skb);
1224 __skb_queue_tail(&neigh->arp_queue, skb);
1225 neigh->arp_queue_len_bytes += skb->truesize;
1226 }
1227 rc = 1;
1228 }
1229 out_unlock_bh:
1230 if (immediate_probe)
1231 neigh_probe(neigh);
1232 else
1233 write_unlock(&neigh->lock);
1234 local_bh_enable();
1235 trace_neigh_event_send_done(neigh, rc);
1236 return rc;
1237
1238 out_dead:
1239 if (neigh->nud_state & NUD_STALE)
1240 goto out_unlock_bh;
1241 write_unlock_bh(&neigh->lock);
1242 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1243 trace_neigh_event_send_dead(neigh, 1);
1244 return 1;
1245 }
1246 EXPORT_SYMBOL(__neigh_event_send);
1247
1248 static void neigh_update_hhs(struct neighbour *neigh)
1249 {
1250 struct hh_cache *hh;
1251 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1252 = NULL;
1253
1254 if (neigh->dev->header_ops)
1255 update = neigh->dev->header_ops->cache_update;
1256
1257 if (update) {
1258 hh = &neigh->hh;
1259 if (READ_ONCE(hh->hh_len)) {
1260 write_seqlock_bh(&hh->hh_lock);
1261 update(hh, neigh->dev, neigh->ha);
1262 write_sequnlock_bh(&hh->hh_lock);
1263 }
1264 }
1265 }
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1287 u8 new, u32 flags, u32 nlmsg_pid,
1288 struct netlink_ext_ack *extack)
1289 {
1290 bool gc_update = false, managed_update = false;
1291 int update_isrouter = 0;
1292 struct net_device *dev;
1293 int err, notify = 0;
1294 u8 old;
1295
1296 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1297
1298 write_lock_bh(&neigh->lock);
1299
1300 dev = neigh->dev;
1301 old = neigh->nud_state;
1302 err = -EPERM;
1303
1304 if (neigh->dead) {
1305 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1306 new = old;
1307 goto out;
1308 }
1309 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1310 (old & (NUD_NOARP | NUD_PERMANENT)))
1311 goto out;
1312
1313 neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update);
1314 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1315 new = old & ~NUD_PERMANENT;
1316 neigh->nud_state = new;
1317 err = 0;
1318 goto out;
1319 }
1320
1321 if (!(new & NUD_VALID)) {
1322 neigh_del_timer(neigh);
1323 if (old & NUD_CONNECTED)
1324 neigh_suspect(neigh);
1325 neigh->nud_state = new;
1326 err = 0;
1327 notify = old & NUD_VALID;
1328 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1329 (new & NUD_FAILED)) {
1330 neigh_invalidate(neigh);
1331 notify = 1;
1332 }
1333 goto out;
1334 }
1335
1336
1337 if (!dev->addr_len) {
1338
1339 lladdr = neigh->ha;
1340 } else if (lladdr) {
1341
1342
1343
1344
1345
1346 if ((old & NUD_VALID) &&
1347 !memcmp(lladdr, neigh->ha, dev->addr_len))
1348 lladdr = neigh->ha;
1349 } else {
1350
1351
1352
1353 err = -EINVAL;
1354 if (!(old & NUD_VALID)) {
1355 NL_SET_ERR_MSG(extack, "No link layer address given");
1356 goto out;
1357 }
1358 lladdr = neigh->ha;
1359 }
1360
1361
1362
1363
1364 if (new & NUD_CONNECTED)
1365 neigh->confirmed = jiffies;
1366
1367
1368
1369
1370 err = 0;
1371 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1372 if (old & NUD_VALID) {
1373 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1374 update_isrouter = 0;
1375 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1376 (old & NUD_CONNECTED)) {
1377 lladdr = neigh->ha;
1378 new = NUD_STALE;
1379 } else
1380 goto out;
1381 } else {
1382 if (lladdr == neigh->ha && new == NUD_STALE &&
1383 !(flags & NEIGH_UPDATE_F_ADMIN))
1384 new = old;
1385 }
1386 }
1387
1388
1389
1390
1391
1392 if (new != old || lladdr != neigh->ha)
1393 neigh->updated = jiffies;
1394
1395 if (new != old) {
1396 neigh_del_timer(neigh);
1397 if (new & NUD_PROBE)
1398 atomic_set(&neigh->probes, 0);
1399 if (new & NUD_IN_TIMER)
1400 neigh_add_timer(neigh, (jiffies +
1401 ((new & NUD_REACHABLE) ?
1402 neigh->parms->reachable_time :
1403 0)));
1404 neigh->nud_state = new;
1405 notify = 1;
1406 }
1407
1408 if (lladdr != neigh->ha) {
1409 write_seqlock(&neigh->ha_lock);
1410 memcpy(&neigh->ha, lladdr, dev->addr_len);
1411 write_sequnlock(&neigh->ha_lock);
1412 neigh_update_hhs(neigh);
1413 if (!(new & NUD_CONNECTED))
1414 neigh->confirmed = jiffies -
1415 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1416 notify = 1;
1417 }
1418 if (new == old)
1419 goto out;
1420 if (new & NUD_CONNECTED)
1421 neigh_connect(neigh);
1422 else
1423 neigh_suspect(neigh);
1424 if (!(old & NUD_VALID)) {
1425 struct sk_buff *skb;
1426
1427
1428
1429 while (neigh->nud_state & NUD_VALID &&
1430 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1431 struct dst_entry *dst = skb_dst(skb);
1432 struct neighbour *n2, *n1 = neigh;
1433 write_unlock_bh(&neigh->lock);
1434
1435 rcu_read_lock();
1436
1437
1438
1439
1440
1441
1442
1443
1444 n2 = NULL;
1445 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1446 n2 = dst_neigh_lookup_skb(dst, skb);
1447 if (n2)
1448 n1 = n2;
1449 }
1450 n1->output(n1, skb);
1451 if (n2)
1452 neigh_release(n2);
1453 rcu_read_unlock();
1454
1455 write_lock_bh(&neigh->lock);
1456 }
1457 __skb_queue_purge(&neigh->arp_queue);
1458 neigh->arp_queue_len_bytes = 0;
1459 }
1460 out:
1461 if (update_isrouter)
1462 neigh_update_is_router(neigh, flags, ¬ify);
1463 write_unlock_bh(&neigh->lock);
1464 if (((new ^ old) & NUD_PERMANENT) || gc_update)
1465 neigh_update_gc_list(neigh);
1466 if (managed_update)
1467 neigh_update_managed_list(neigh);
1468 if (notify)
1469 neigh_update_notify(neigh, nlmsg_pid);
1470 trace_neigh_update_done(neigh, err);
1471 return err;
1472 }
1473
1474 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1475 u32 flags, u32 nlmsg_pid)
1476 {
1477 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1478 }
1479 EXPORT_SYMBOL(neigh_update);
1480
1481
1482
1483
1484 void __neigh_set_probe_once(struct neighbour *neigh)
1485 {
1486 if (neigh->dead)
1487 return;
1488 neigh->updated = jiffies;
1489 if (!(neigh->nud_state & NUD_FAILED))
1490 return;
1491 neigh->nud_state = NUD_INCOMPLETE;
1492 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1493 neigh_add_timer(neigh,
1494 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1495 HZ/100));
1496 }
1497 EXPORT_SYMBOL(__neigh_set_probe_once);
1498
1499 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1500 u8 *lladdr, void *saddr,
1501 struct net_device *dev)
1502 {
1503 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1504 lladdr || !dev->addr_len);
1505 if (neigh)
1506 neigh_update(neigh, lladdr, NUD_STALE,
1507 NEIGH_UPDATE_F_OVERRIDE, 0);
1508 return neigh;
1509 }
1510 EXPORT_SYMBOL(neigh_event_ns);
1511
1512
1513 static void neigh_hh_init(struct neighbour *n)
1514 {
1515 struct net_device *dev = n->dev;
1516 __be16 prot = n->tbl->protocol;
1517 struct hh_cache *hh = &n->hh;
1518
1519 write_lock_bh(&n->lock);
1520
1521
1522
1523
1524 if (!hh->hh_len)
1525 dev->header_ops->cache(n, hh, prot);
1526
1527 write_unlock_bh(&n->lock);
1528 }
1529
1530
1531
1532 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1533 {
1534 int rc = 0;
1535
1536 if (!neigh_event_send(neigh, skb)) {
1537 int err;
1538 struct net_device *dev = neigh->dev;
1539 unsigned int seq;
1540
1541 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1542 neigh_hh_init(neigh);
1543
1544 do {
1545 __skb_pull(skb, skb_network_offset(skb));
1546 seq = read_seqbegin(&neigh->ha_lock);
1547 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1548 neigh->ha, NULL, skb->len);
1549 } while (read_seqretry(&neigh->ha_lock, seq));
1550
1551 if (err >= 0)
1552 rc = dev_queue_xmit(skb);
1553 else
1554 goto out_kfree_skb;
1555 }
1556 out:
1557 return rc;
1558 out_kfree_skb:
1559 rc = -EINVAL;
1560 kfree_skb(skb);
1561 goto out;
1562 }
1563 EXPORT_SYMBOL(neigh_resolve_output);
1564
1565
1566
1567 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1568 {
1569 struct net_device *dev = neigh->dev;
1570 unsigned int seq;
1571 int err;
1572
1573 do {
1574 __skb_pull(skb, skb_network_offset(skb));
1575 seq = read_seqbegin(&neigh->ha_lock);
1576 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1577 neigh->ha, NULL, skb->len);
1578 } while (read_seqretry(&neigh->ha_lock, seq));
1579
1580 if (err >= 0)
1581 err = dev_queue_xmit(skb);
1582 else {
1583 err = -EINVAL;
1584 kfree_skb(skb);
1585 }
1586 return err;
1587 }
1588 EXPORT_SYMBOL(neigh_connected_output);
1589
1590 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1591 {
1592 return dev_queue_xmit(skb);
1593 }
1594 EXPORT_SYMBOL(neigh_direct_output);
1595
1596 static void neigh_managed_work(struct work_struct *work)
1597 {
1598 struct neigh_table *tbl = container_of(work, struct neigh_table,
1599 managed_work.work);
1600 struct neighbour *neigh;
1601
1602 write_lock_bh(&tbl->lock);
1603 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1604 neigh_event_send_probe(neigh, NULL, false);
1605 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1606 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1607 write_unlock_bh(&tbl->lock);
1608 }
1609
1610 static void neigh_proxy_process(struct timer_list *t)
1611 {
1612 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1613 long sched_next = 0;
1614 unsigned long now = jiffies;
1615 struct sk_buff *skb, *n;
1616
1617 spin_lock(&tbl->proxy_queue.lock);
1618
1619 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1620 long tdif = NEIGH_CB(skb)->sched_next - now;
1621
1622 if (tdif <= 0) {
1623 struct net_device *dev = skb->dev;
1624 struct in_device *in_dev;
1625
1626 rcu_read_lock();
1627 in_dev = __in_dev_get_rcu(dev);
1628 if (in_dev)
1629 in_dev->arp_parms->qlen--;
1630 rcu_read_unlock();
1631 __skb_unlink(skb, &tbl->proxy_queue);
1632
1633 if (tbl->proxy_redo && netif_running(dev)) {
1634 rcu_read_lock();
1635 tbl->proxy_redo(skb);
1636 rcu_read_unlock();
1637 } else {
1638 kfree_skb(skb);
1639 }
1640
1641 dev_put(dev);
1642 } else if (!sched_next || tdif < sched_next)
1643 sched_next = tdif;
1644 }
1645 del_timer(&tbl->proxy_timer);
1646 if (sched_next)
1647 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1648 spin_unlock(&tbl->proxy_queue.lock);
1649 }
1650
1651 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1652 struct sk_buff *skb)
1653 {
1654 unsigned long sched_next = jiffies +
1655 prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
1656
1657 if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1658 kfree_skb(skb);
1659 return;
1660 }
1661
1662 NEIGH_CB(skb)->sched_next = sched_next;
1663 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1664
1665 spin_lock(&tbl->proxy_queue.lock);
1666 if (del_timer(&tbl->proxy_timer)) {
1667 if (time_before(tbl->proxy_timer.expires, sched_next))
1668 sched_next = tbl->proxy_timer.expires;
1669 }
1670 skb_dst_drop(skb);
1671 dev_hold(skb->dev);
1672 __skb_queue_tail(&tbl->proxy_queue, skb);
1673 p->qlen++;
1674 mod_timer(&tbl->proxy_timer, sched_next);
1675 spin_unlock(&tbl->proxy_queue.lock);
1676 }
1677 EXPORT_SYMBOL(pneigh_enqueue);
1678
1679 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1680 struct net *net, int ifindex)
1681 {
1682 struct neigh_parms *p;
1683
1684 list_for_each_entry(p, &tbl->parms_list, list) {
1685 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1686 (!p->dev && !ifindex && net_eq(net, &init_net)))
1687 return p;
1688 }
1689
1690 return NULL;
1691 }
1692
1693 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1694 struct neigh_table *tbl)
1695 {
1696 struct neigh_parms *p;
1697 struct net *net = dev_net(dev);
1698 const struct net_device_ops *ops = dev->netdev_ops;
1699
1700 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1701 if (p) {
1702 p->tbl = tbl;
1703 refcount_set(&p->refcnt, 1);
1704 p->reachable_time =
1705 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1706 p->qlen = 0;
1707 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1708 p->dev = dev;
1709 write_pnet(&p->net, net);
1710 p->sysctl_table = NULL;
1711
1712 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1713 netdev_put(dev, &p->dev_tracker);
1714 kfree(p);
1715 return NULL;
1716 }
1717
1718 write_lock_bh(&tbl->lock);
1719 list_add(&p->list, &tbl->parms.list);
1720 write_unlock_bh(&tbl->lock);
1721
1722 neigh_parms_data_state_cleanall(p);
1723 }
1724 return p;
1725 }
1726 EXPORT_SYMBOL(neigh_parms_alloc);
1727
1728 static void neigh_rcu_free_parms(struct rcu_head *head)
1729 {
1730 struct neigh_parms *parms =
1731 container_of(head, struct neigh_parms, rcu_head);
1732
1733 neigh_parms_put(parms);
1734 }
1735
1736 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1737 {
1738 if (!parms || parms == &tbl->parms)
1739 return;
1740 write_lock_bh(&tbl->lock);
1741 list_del(&parms->list);
1742 parms->dead = 1;
1743 write_unlock_bh(&tbl->lock);
1744 netdev_put(parms->dev, &parms->dev_tracker);
1745 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1746 }
1747 EXPORT_SYMBOL(neigh_parms_release);
1748
1749 static void neigh_parms_destroy(struct neigh_parms *parms)
1750 {
1751 kfree(parms);
1752 }
1753
1754 static struct lock_class_key neigh_table_proxy_queue_class;
1755
1756 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1757
1758 void neigh_table_init(int index, struct neigh_table *tbl)
1759 {
1760 unsigned long now = jiffies;
1761 unsigned long phsize;
1762
1763 INIT_LIST_HEAD(&tbl->parms_list);
1764 INIT_LIST_HEAD(&tbl->gc_list);
1765 INIT_LIST_HEAD(&tbl->managed_list);
1766
1767 list_add(&tbl->parms.list, &tbl->parms_list);
1768 write_pnet(&tbl->parms.net, &init_net);
1769 refcount_set(&tbl->parms.refcnt, 1);
1770 tbl->parms.reachable_time =
1771 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1772 tbl->parms.qlen = 0;
1773
1774 tbl->stats = alloc_percpu(struct neigh_statistics);
1775 if (!tbl->stats)
1776 panic("cannot create neighbour cache statistics");
1777
1778 #ifdef CONFIG_PROC_FS
1779 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1780 &neigh_stat_seq_ops, tbl))
1781 panic("cannot create neighbour proc dir entry");
1782 #endif
1783
1784 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1785
1786 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1787 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1788
1789 if (!tbl->nht || !tbl->phash_buckets)
1790 panic("cannot allocate neighbour cache hashes");
1791
1792 if (!tbl->entry_size)
1793 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1794 tbl->key_len, NEIGH_PRIV_ALIGN);
1795 else
1796 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1797
1798 rwlock_init(&tbl->lock);
1799
1800 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1801 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1802 tbl->parms.reachable_time);
1803 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1804 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1805
1806 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1807 skb_queue_head_init_class(&tbl->proxy_queue,
1808 &neigh_table_proxy_queue_class);
1809
1810 tbl->last_flush = now;
1811 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1812
1813 neigh_tables[index] = tbl;
1814 }
1815 EXPORT_SYMBOL(neigh_table_init);
1816
1817 int neigh_table_clear(int index, struct neigh_table *tbl)
1818 {
1819 neigh_tables[index] = NULL;
1820
1821 cancel_delayed_work_sync(&tbl->managed_work);
1822 cancel_delayed_work_sync(&tbl->gc_work);
1823 del_timer_sync(&tbl->proxy_timer);
1824 pneigh_queue_purge(&tbl->proxy_queue, NULL);
1825 neigh_ifdown(tbl, NULL);
1826 if (atomic_read(&tbl->entries))
1827 pr_crit("neighbour leakage\n");
1828
1829 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1830 neigh_hash_free_rcu);
1831 tbl->nht = NULL;
1832
1833 kfree(tbl->phash_buckets);
1834 tbl->phash_buckets = NULL;
1835
1836 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1837
1838 free_percpu(tbl->stats);
1839 tbl->stats = NULL;
1840
1841 return 0;
1842 }
1843 EXPORT_SYMBOL(neigh_table_clear);
1844
1845 static struct neigh_table *neigh_find_table(int family)
1846 {
1847 struct neigh_table *tbl = NULL;
1848
1849 switch (family) {
1850 case AF_INET:
1851 tbl = neigh_tables[NEIGH_ARP_TABLE];
1852 break;
1853 case AF_INET6:
1854 tbl = neigh_tables[NEIGH_ND_TABLE];
1855 break;
1856 case AF_DECnet:
1857 tbl = neigh_tables[NEIGH_DN_TABLE];
1858 break;
1859 }
1860
1861 return tbl;
1862 }
1863
1864 const struct nla_policy nda_policy[NDA_MAX+1] = {
1865 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID },
1866 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1867 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1868 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
1869 [NDA_PROBES] = { .type = NLA_U32 },
1870 [NDA_VLAN] = { .type = NLA_U16 },
1871 [NDA_PORT] = { .type = NLA_U16 },
1872 [NDA_VNI] = { .type = NLA_U32 },
1873 [NDA_IFINDEX] = { .type = NLA_U32 },
1874 [NDA_MASTER] = { .type = NLA_U32 },
1875 [NDA_PROTOCOL] = { .type = NLA_U8 },
1876 [NDA_NH_ID] = { .type = NLA_U32 },
1877 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1878 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED },
1879 };
1880
1881 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1882 struct netlink_ext_ack *extack)
1883 {
1884 struct net *net = sock_net(skb->sk);
1885 struct ndmsg *ndm;
1886 struct nlattr *dst_attr;
1887 struct neigh_table *tbl;
1888 struct neighbour *neigh;
1889 struct net_device *dev = NULL;
1890 int err = -EINVAL;
1891
1892 ASSERT_RTNL();
1893 if (nlmsg_len(nlh) < sizeof(*ndm))
1894 goto out;
1895
1896 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1897 if (!dst_attr) {
1898 NL_SET_ERR_MSG(extack, "Network address not specified");
1899 goto out;
1900 }
1901
1902 ndm = nlmsg_data(nlh);
1903 if (ndm->ndm_ifindex) {
1904 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1905 if (dev == NULL) {
1906 err = -ENODEV;
1907 goto out;
1908 }
1909 }
1910
1911 tbl = neigh_find_table(ndm->ndm_family);
1912 if (tbl == NULL)
1913 return -EAFNOSUPPORT;
1914
1915 if (nla_len(dst_attr) < (int)tbl->key_len) {
1916 NL_SET_ERR_MSG(extack, "Invalid network address");
1917 goto out;
1918 }
1919
1920 if (ndm->ndm_flags & NTF_PROXY) {
1921 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1922 goto out;
1923 }
1924
1925 if (dev == NULL)
1926 goto out;
1927
1928 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1929 if (neigh == NULL) {
1930 err = -ENOENT;
1931 goto out;
1932 }
1933
1934 err = __neigh_update(neigh, NULL, NUD_FAILED,
1935 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1936 NETLINK_CB(skb).portid, extack);
1937 write_lock_bh(&tbl->lock);
1938 neigh_release(neigh);
1939 neigh_remove_one(neigh, tbl);
1940 write_unlock_bh(&tbl->lock);
1941
1942 out:
1943 return err;
1944 }
1945
1946 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1947 struct netlink_ext_ack *extack)
1948 {
1949 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1950 NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1951 struct net *net = sock_net(skb->sk);
1952 struct ndmsg *ndm;
1953 struct nlattr *tb[NDA_MAX+1];
1954 struct neigh_table *tbl;
1955 struct net_device *dev = NULL;
1956 struct neighbour *neigh;
1957 void *dst, *lladdr;
1958 u8 protocol = 0;
1959 u32 ndm_flags;
1960 int err;
1961
1962 ASSERT_RTNL();
1963 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1964 nda_policy, extack);
1965 if (err < 0)
1966 goto out;
1967
1968 err = -EINVAL;
1969 if (!tb[NDA_DST]) {
1970 NL_SET_ERR_MSG(extack, "Network address not specified");
1971 goto out;
1972 }
1973
1974 ndm = nlmsg_data(nlh);
1975 ndm_flags = ndm->ndm_flags;
1976 if (tb[NDA_FLAGS_EXT]) {
1977 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1978
1979 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1980 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1981 hweight32(NTF_EXT_MASK)));
1982 ndm_flags |= (ext << NTF_EXT_SHIFT);
1983 }
1984 if (ndm->ndm_ifindex) {
1985 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1986 if (dev == NULL) {
1987 err = -ENODEV;
1988 goto out;
1989 }
1990
1991 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1992 NL_SET_ERR_MSG(extack, "Invalid link address");
1993 goto out;
1994 }
1995 }
1996
1997 tbl = neigh_find_table(ndm->ndm_family);
1998 if (tbl == NULL)
1999 return -EAFNOSUPPORT;
2000
2001 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
2002 NL_SET_ERR_MSG(extack, "Invalid network address");
2003 goto out;
2004 }
2005
2006 dst = nla_data(tb[NDA_DST]);
2007 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
2008
2009 if (tb[NDA_PROTOCOL])
2010 protocol = nla_get_u8(tb[NDA_PROTOCOL]);
2011 if (ndm_flags & NTF_PROXY) {
2012 struct pneigh_entry *pn;
2013
2014 if (ndm_flags & NTF_MANAGED) {
2015 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
2016 goto out;
2017 }
2018
2019 err = -ENOBUFS;
2020 pn = pneigh_lookup(tbl, net, dst, dev, 1);
2021 if (pn) {
2022 pn->flags = ndm_flags;
2023 if (protocol)
2024 pn->protocol = protocol;
2025 err = 0;
2026 }
2027 goto out;
2028 }
2029
2030 if (!dev) {
2031 NL_SET_ERR_MSG(extack, "Device not specified");
2032 goto out;
2033 }
2034
2035 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2036 err = -EINVAL;
2037 goto out;
2038 }
2039
2040 neigh = neigh_lookup(tbl, dst, dev);
2041 if (neigh == NULL) {
2042 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
2043 bool exempt_from_gc = ndm_permanent ||
2044 ndm_flags & NTF_EXT_LEARNED;
2045
2046 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2047 err = -ENOENT;
2048 goto out;
2049 }
2050 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2051 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2052 err = -EINVAL;
2053 goto out;
2054 }
2055
2056 neigh = ___neigh_create(tbl, dst, dev,
2057 ndm_flags &
2058 (NTF_EXT_LEARNED | NTF_MANAGED),
2059 exempt_from_gc, true);
2060 if (IS_ERR(neigh)) {
2061 err = PTR_ERR(neigh);
2062 goto out;
2063 }
2064 } else {
2065 if (nlh->nlmsg_flags & NLM_F_EXCL) {
2066 err = -EEXIST;
2067 neigh_release(neigh);
2068 goto out;
2069 }
2070
2071 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2072 flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2073 NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2074 }
2075
2076 if (protocol)
2077 neigh->protocol = protocol;
2078 if (ndm_flags & NTF_EXT_LEARNED)
2079 flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2080 if (ndm_flags & NTF_ROUTER)
2081 flags |= NEIGH_UPDATE_F_ISROUTER;
2082 if (ndm_flags & NTF_MANAGED)
2083 flags |= NEIGH_UPDATE_F_MANAGED;
2084 if (ndm_flags & NTF_USE)
2085 flags |= NEIGH_UPDATE_F_USE;
2086
2087 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2088 NETLINK_CB(skb).portid, extack);
2089 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2090 neigh_event_send(neigh, NULL);
2091 err = 0;
2092 }
2093 neigh_release(neigh);
2094 out:
2095 return err;
2096 }
2097
2098 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2099 {
2100 struct nlattr *nest;
2101
2102 nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2103 if (nest == NULL)
2104 return -ENOBUFS;
2105
2106 if ((parms->dev &&
2107 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2108 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2109 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2110 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2111
2112 nla_put_u32(skb, NDTPA_QUEUE_LEN,
2113 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2114 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2115 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2116 nla_put_u32(skb, NDTPA_UCAST_PROBES,
2117 NEIGH_VAR(parms, UCAST_PROBES)) ||
2118 nla_put_u32(skb, NDTPA_MCAST_PROBES,
2119 NEIGH_VAR(parms, MCAST_PROBES)) ||
2120 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2121 NEIGH_VAR(parms, MCAST_REPROBES)) ||
2122 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2123 NDTPA_PAD) ||
2124 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2125 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2126 nla_put_msecs(skb, NDTPA_GC_STALETIME,
2127 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2128 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2129 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2130 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2131 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2132 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2133 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2134 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2135 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2136 nla_put_msecs(skb, NDTPA_LOCKTIME,
2137 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2138 nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2139 NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2140 goto nla_put_failure;
2141 return nla_nest_end(skb, nest);
2142
2143 nla_put_failure:
2144 nla_nest_cancel(skb, nest);
2145 return -EMSGSIZE;
2146 }
2147
2148 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2149 u32 pid, u32 seq, int type, int flags)
2150 {
2151 struct nlmsghdr *nlh;
2152 struct ndtmsg *ndtmsg;
2153
2154 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2155 if (nlh == NULL)
2156 return -EMSGSIZE;
2157
2158 ndtmsg = nlmsg_data(nlh);
2159
2160 read_lock_bh(&tbl->lock);
2161 ndtmsg->ndtm_family = tbl->family;
2162 ndtmsg->ndtm_pad1 = 0;
2163 ndtmsg->ndtm_pad2 = 0;
2164
2165 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2166 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2167 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2168 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2169 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2170 goto nla_put_failure;
2171 {
2172 unsigned long now = jiffies;
2173 long flush_delta = now - tbl->last_flush;
2174 long rand_delta = now - tbl->last_rand;
2175 struct neigh_hash_table *nht;
2176 struct ndt_config ndc = {
2177 .ndtc_key_len = tbl->key_len,
2178 .ndtc_entry_size = tbl->entry_size,
2179 .ndtc_entries = atomic_read(&tbl->entries),
2180 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
2181 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
2182 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
2183 };
2184
2185 rcu_read_lock_bh();
2186 nht = rcu_dereference_bh(tbl->nht);
2187 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2188 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2189 rcu_read_unlock_bh();
2190
2191 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2192 goto nla_put_failure;
2193 }
2194
2195 {
2196 int cpu;
2197 struct ndt_stats ndst;
2198
2199 memset(&ndst, 0, sizeof(ndst));
2200
2201 for_each_possible_cpu(cpu) {
2202 struct neigh_statistics *st;
2203
2204 st = per_cpu_ptr(tbl->stats, cpu);
2205 ndst.ndts_allocs += st->allocs;
2206 ndst.ndts_destroys += st->destroys;
2207 ndst.ndts_hash_grows += st->hash_grows;
2208 ndst.ndts_res_failed += st->res_failed;
2209 ndst.ndts_lookups += st->lookups;
2210 ndst.ndts_hits += st->hits;
2211 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
2212 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
2213 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
2214 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
2215 ndst.ndts_table_fulls += st->table_fulls;
2216 }
2217
2218 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2219 NDTA_PAD))
2220 goto nla_put_failure;
2221 }
2222
2223 BUG_ON(tbl->parms.dev);
2224 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2225 goto nla_put_failure;
2226
2227 read_unlock_bh(&tbl->lock);
2228 nlmsg_end(skb, nlh);
2229 return 0;
2230
2231 nla_put_failure:
2232 read_unlock_bh(&tbl->lock);
2233 nlmsg_cancel(skb, nlh);
2234 return -EMSGSIZE;
2235 }
2236
2237 static int neightbl_fill_param_info(struct sk_buff *skb,
2238 struct neigh_table *tbl,
2239 struct neigh_parms *parms,
2240 u32 pid, u32 seq, int type,
2241 unsigned int flags)
2242 {
2243 struct ndtmsg *ndtmsg;
2244 struct nlmsghdr *nlh;
2245
2246 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2247 if (nlh == NULL)
2248 return -EMSGSIZE;
2249
2250 ndtmsg = nlmsg_data(nlh);
2251
2252 read_lock_bh(&tbl->lock);
2253 ndtmsg->ndtm_family = tbl->family;
2254 ndtmsg->ndtm_pad1 = 0;
2255 ndtmsg->ndtm_pad2 = 0;
2256
2257 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2258 neightbl_fill_parms(skb, parms) < 0)
2259 goto errout;
2260
2261 read_unlock_bh(&tbl->lock);
2262 nlmsg_end(skb, nlh);
2263 return 0;
2264 errout:
2265 read_unlock_bh(&tbl->lock);
2266 nlmsg_cancel(skb, nlh);
2267 return -EMSGSIZE;
2268 }
2269
2270 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2271 [NDTA_NAME] = { .type = NLA_STRING },
2272 [NDTA_THRESH1] = { .type = NLA_U32 },
2273 [NDTA_THRESH2] = { .type = NLA_U32 },
2274 [NDTA_THRESH3] = { .type = NLA_U32 },
2275 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
2276 [NDTA_PARMS] = { .type = NLA_NESTED },
2277 };
2278
2279 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2280 [NDTPA_IFINDEX] = { .type = NLA_U32 },
2281 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
2282 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
2283 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
2284 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
2285 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
2286 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
2287 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
2288 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
2289 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
2290 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
2291 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
2292 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
2293 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
2294 [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 },
2295 };
2296
2297 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2298 struct netlink_ext_ack *extack)
2299 {
2300 struct net *net = sock_net(skb->sk);
2301 struct neigh_table *tbl;
2302 struct ndtmsg *ndtmsg;
2303 struct nlattr *tb[NDTA_MAX+1];
2304 bool found = false;
2305 int err, tidx;
2306
2307 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2308 nl_neightbl_policy, extack);
2309 if (err < 0)
2310 goto errout;
2311
2312 if (tb[NDTA_NAME] == NULL) {
2313 err = -EINVAL;
2314 goto errout;
2315 }
2316
2317 ndtmsg = nlmsg_data(nlh);
2318
2319 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2320 tbl = neigh_tables[tidx];
2321 if (!tbl)
2322 continue;
2323 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2324 continue;
2325 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2326 found = true;
2327 break;
2328 }
2329 }
2330
2331 if (!found)
2332 return -ENOENT;
2333
2334
2335
2336
2337
2338 write_lock_bh(&tbl->lock);
2339
2340 if (tb[NDTA_PARMS]) {
2341 struct nlattr *tbp[NDTPA_MAX+1];
2342 struct neigh_parms *p;
2343 int i, ifindex = 0;
2344
2345 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2346 tb[NDTA_PARMS],
2347 nl_ntbl_parm_policy, extack);
2348 if (err < 0)
2349 goto errout_tbl_lock;
2350
2351 if (tbp[NDTPA_IFINDEX])
2352 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2353
2354 p = lookup_neigh_parms(tbl, net, ifindex);
2355 if (p == NULL) {
2356 err = -ENOENT;
2357 goto errout_tbl_lock;
2358 }
2359
2360 for (i = 1; i <= NDTPA_MAX; i++) {
2361 if (tbp[i] == NULL)
2362 continue;
2363
2364 switch (i) {
2365 case NDTPA_QUEUE_LEN:
2366 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2367 nla_get_u32(tbp[i]) *
2368 SKB_TRUESIZE(ETH_FRAME_LEN));
2369 break;
2370 case NDTPA_QUEUE_LENBYTES:
2371 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2372 nla_get_u32(tbp[i]));
2373 break;
2374 case NDTPA_PROXY_QLEN:
2375 NEIGH_VAR_SET(p, PROXY_QLEN,
2376 nla_get_u32(tbp[i]));
2377 break;
2378 case NDTPA_APP_PROBES:
2379 NEIGH_VAR_SET(p, APP_PROBES,
2380 nla_get_u32(tbp[i]));
2381 break;
2382 case NDTPA_UCAST_PROBES:
2383 NEIGH_VAR_SET(p, UCAST_PROBES,
2384 nla_get_u32(tbp[i]));
2385 break;
2386 case NDTPA_MCAST_PROBES:
2387 NEIGH_VAR_SET(p, MCAST_PROBES,
2388 nla_get_u32(tbp[i]));
2389 break;
2390 case NDTPA_MCAST_REPROBES:
2391 NEIGH_VAR_SET(p, MCAST_REPROBES,
2392 nla_get_u32(tbp[i]));
2393 break;
2394 case NDTPA_BASE_REACHABLE_TIME:
2395 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2396 nla_get_msecs(tbp[i]));
2397
2398
2399
2400
2401 p->reachable_time =
2402 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2403 break;
2404 case NDTPA_GC_STALETIME:
2405 NEIGH_VAR_SET(p, GC_STALETIME,
2406 nla_get_msecs(tbp[i]));
2407 break;
2408 case NDTPA_DELAY_PROBE_TIME:
2409 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2410 nla_get_msecs(tbp[i]));
2411 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2412 break;
2413 case NDTPA_INTERVAL_PROBE_TIME_MS:
2414 NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2415 nla_get_msecs(tbp[i]));
2416 break;
2417 case NDTPA_RETRANS_TIME:
2418 NEIGH_VAR_SET(p, RETRANS_TIME,
2419 nla_get_msecs(tbp[i]));
2420 break;
2421 case NDTPA_ANYCAST_DELAY:
2422 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2423 nla_get_msecs(tbp[i]));
2424 break;
2425 case NDTPA_PROXY_DELAY:
2426 NEIGH_VAR_SET(p, PROXY_DELAY,
2427 nla_get_msecs(tbp[i]));
2428 break;
2429 case NDTPA_LOCKTIME:
2430 NEIGH_VAR_SET(p, LOCKTIME,
2431 nla_get_msecs(tbp[i]));
2432 break;
2433 }
2434 }
2435 }
2436
2437 err = -ENOENT;
2438 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2439 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2440 !net_eq(net, &init_net))
2441 goto errout_tbl_lock;
2442
2443 if (tb[NDTA_THRESH1])
2444 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2445
2446 if (tb[NDTA_THRESH2])
2447 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2448
2449 if (tb[NDTA_THRESH3])
2450 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2451
2452 if (tb[NDTA_GC_INTERVAL])
2453 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2454
2455 err = 0;
2456
2457 errout_tbl_lock:
2458 write_unlock_bh(&tbl->lock);
2459 errout:
2460 return err;
2461 }
2462
2463 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2464 struct netlink_ext_ack *extack)
2465 {
2466 struct ndtmsg *ndtm;
2467
2468 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2469 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2470 return -EINVAL;
2471 }
2472
2473 ndtm = nlmsg_data(nlh);
2474 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
2475 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2476 return -EINVAL;
2477 }
2478
2479 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2480 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2481 return -EINVAL;
2482 }
2483
2484 return 0;
2485 }
2486
2487 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2488 {
2489 const struct nlmsghdr *nlh = cb->nlh;
2490 struct net *net = sock_net(skb->sk);
2491 int family, tidx, nidx = 0;
2492 int tbl_skip = cb->args[0];
2493 int neigh_skip = cb->args[1];
2494 struct neigh_table *tbl;
2495
2496 if (cb->strict_check) {
2497 int err = neightbl_valid_dump_info(nlh, cb->extack);
2498
2499 if (err < 0)
2500 return err;
2501 }
2502
2503 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2504
2505 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2506 struct neigh_parms *p;
2507
2508 tbl = neigh_tables[tidx];
2509 if (!tbl)
2510 continue;
2511
2512 if (tidx < tbl_skip || (family && tbl->family != family))
2513 continue;
2514
2515 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2516 nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2517 NLM_F_MULTI) < 0)
2518 break;
2519
2520 nidx = 0;
2521 p = list_next_entry(&tbl->parms, list);
2522 list_for_each_entry_from(p, &tbl->parms_list, list) {
2523 if (!net_eq(neigh_parms_net(p), net))
2524 continue;
2525
2526 if (nidx < neigh_skip)
2527 goto next;
2528
2529 if (neightbl_fill_param_info(skb, tbl, p,
2530 NETLINK_CB(cb->skb).portid,
2531 nlh->nlmsg_seq,
2532 RTM_NEWNEIGHTBL,
2533 NLM_F_MULTI) < 0)
2534 goto out;
2535 next:
2536 nidx++;
2537 }
2538
2539 neigh_skip = 0;
2540 }
2541 out:
2542 cb->args[0] = tidx;
2543 cb->args[1] = nidx;
2544
2545 return skb->len;
2546 }
2547
2548 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2549 u32 pid, u32 seq, int type, unsigned int flags)
2550 {
2551 u32 neigh_flags, neigh_flags_ext;
2552 unsigned long now = jiffies;
2553 struct nda_cacheinfo ci;
2554 struct nlmsghdr *nlh;
2555 struct ndmsg *ndm;
2556
2557 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2558 if (nlh == NULL)
2559 return -EMSGSIZE;
2560
2561 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2562 neigh_flags = neigh->flags & NTF_OLD_MASK;
2563
2564 ndm = nlmsg_data(nlh);
2565 ndm->ndm_family = neigh->ops->family;
2566 ndm->ndm_pad1 = 0;
2567 ndm->ndm_pad2 = 0;
2568 ndm->ndm_flags = neigh_flags;
2569 ndm->ndm_type = neigh->type;
2570 ndm->ndm_ifindex = neigh->dev->ifindex;
2571
2572 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2573 goto nla_put_failure;
2574
2575 read_lock_bh(&neigh->lock);
2576 ndm->ndm_state = neigh->nud_state;
2577 if (neigh->nud_state & NUD_VALID) {
2578 char haddr[MAX_ADDR_LEN];
2579
2580 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2581 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2582 read_unlock_bh(&neigh->lock);
2583 goto nla_put_failure;
2584 }
2585 }
2586
2587 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2588 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2589 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2590 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2591 read_unlock_bh(&neigh->lock);
2592
2593 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2594 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2595 goto nla_put_failure;
2596
2597 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2598 goto nla_put_failure;
2599 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2600 goto nla_put_failure;
2601
2602 nlmsg_end(skb, nlh);
2603 return 0;
2604
2605 nla_put_failure:
2606 nlmsg_cancel(skb, nlh);
2607 return -EMSGSIZE;
2608 }
2609
2610 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2611 u32 pid, u32 seq, int type, unsigned int flags,
2612 struct neigh_table *tbl)
2613 {
2614 u32 neigh_flags, neigh_flags_ext;
2615 struct nlmsghdr *nlh;
2616 struct ndmsg *ndm;
2617
2618 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2619 if (nlh == NULL)
2620 return -EMSGSIZE;
2621
2622 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2623 neigh_flags = pn->flags & NTF_OLD_MASK;
2624
2625 ndm = nlmsg_data(nlh);
2626 ndm->ndm_family = tbl->family;
2627 ndm->ndm_pad1 = 0;
2628 ndm->ndm_pad2 = 0;
2629 ndm->ndm_flags = neigh_flags | NTF_PROXY;
2630 ndm->ndm_type = RTN_UNICAST;
2631 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2632 ndm->ndm_state = NUD_NONE;
2633
2634 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2635 goto nla_put_failure;
2636
2637 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2638 goto nla_put_failure;
2639 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2640 goto nla_put_failure;
2641
2642 nlmsg_end(skb, nlh);
2643 return 0;
2644
2645 nla_put_failure:
2646 nlmsg_cancel(skb, nlh);
2647 return -EMSGSIZE;
2648 }
2649
2650 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2651 {
2652 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2653 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2654 }
2655
2656 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2657 {
2658 struct net_device *master;
2659
2660 if (!master_idx)
2661 return false;
2662
2663 master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2664
2665
2666
2667
2668 if (master_idx == -1)
2669 return !!master;
2670
2671 if (!master || master->ifindex != master_idx)
2672 return true;
2673
2674 return false;
2675 }
2676
2677 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2678 {
2679 if (filter_idx && (!dev || dev->ifindex != filter_idx))
2680 return true;
2681
2682 return false;
2683 }
2684
2685 struct neigh_dump_filter {
2686 int master_idx;
2687 int dev_idx;
2688 };
2689
2690 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2691 struct netlink_callback *cb,
2692 struct neigh_dump_filter *filter)
2693 {
2694 struct net *net = sock_net(skb->sk);
2695 struct neighbour *n;
2696 int rc, h, s_h = cb->args[1];
2697 int idx, s_idx = idx = cb->args[2];
2698 struct neigh_hash_table *nht;
2699 unsigned int flags = NLM_F_MULTI;
2700
2701 if (filter->dev_idx || filter->master_idx)
2702 flags |= NLM_F_DUMP_FILTERED;
2703
2704 rcu_read_lock_bh();
2705 nht = rcu_dereference_bh(tbl->nht);
2706
2707 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2708 if (h > s_h)
2709 s_idx = 0;
2710 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2711 n != NULL;
2712 n = rcu_dereference_bh(n->next)) {
2713 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2714 goto next;
2715 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2716 neigh_master_filtered(n->dev, filter->master_idx))
2717 goto next;
2718 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2719 cb->nlh->nlmsg_seq,
2720 RTM_NEWNEIGH,
2721 flags) < 0) {
2722 rc = -1;
2723 goto out;
2724 }
2725 next:
2726 idx++;
2727 }
2728 }
2729 rc = skb->len;
2730 out:
2731 rcu_read_unlock_bh();
2732 cb->args[1] = h;
2733 cb->args[2] = idx;
2734 return rc;
2735 }
2736
2737 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2738 struct netlink_callback *cb,
2739 struct neigh_dump_filter *filter)
2740 {
2741 struct pneigh_entry *n;
2742 struct net *net = sock_net(skb->sk);
2743 int rc, h, s_h = cb->args[3];
2744 int idx, s_idx = idx = cb->args[4];
2745 unsigned int flags = NLM_F_MULTI;
2746
2747 if (filter->dev_idx || filter->master_idx)
2748 flags |= NLM_F_DUMP_FILTERED;
2749
2750 read_lock_bh(&tbl->lock);
2751
2752 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2753 if (h > s_h)
2754 s_idx = 0;
2755 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2756 if (idx < s_idx || pneigh_net(n) != net)
2757 goto next;
2758 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2759 neigh_master_filtered(n->dev, filter->master_idx))
2760 goto next;
2761 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2762 cb->nlh->nlmsg_seq,
2763 RTM_NEWNEIGH, flags, tbl) < 0) {
2764 read_unlock_bh(&tbl->lock);
2765 rc = -1;
2766 goto out;
2767 }
2768 next:
2769 idx++;
2770 }
2771 }
2772
2773 read_unlock_bh(&tbl->lock);
2774 rc = skb->len;
2775 out:
2776 cb->args[3] = h;
2777 cb->args[4] = idx;
2778 return rc;
2779
2780 }
2781
2782 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2783 bool strict_check,
2784 struct neigh_dump_filter *filter,
2785 struct netlink_ext_ack *extack)
2786 {
2787 struct nlattr *tb[NDA_MAX + 1];
2788 int err, i;
2789
2790 if (strict_check) {
2791 struct ndmsg *ndm;
2792
2793 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2794 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2795 return -EINVAL;
2796 }
2797
2798 ndm = nlmsg_data(nlh);
2799 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
2800 ndm->ndm_state || ndm->ndm_type) {
2801 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2802 return -EINVAL;
2803 }
2804
2805 if (ndm->ndm_flags & ~NTF_PROXY) {
2806 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2807 return -EINVAL;
2808 }
2809
2810 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2811 tb, NDA_MAX, nda_policy,
2812 extack);
2813 } else {
2814 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2815 NDA_MAX, nda_policy, extack);
2816 }
2817 if (err < 0)
2818 return err;
2819
2820 for (i = 0; i <= NDA_MAX; ++i) {
2821 if (!tb[i])
2822 continue;
2823
2824
2825 switch (i) {
2826 case NDA_IFINDEX:
2827 filter->dev_idx = nla_get_u32(tb[i]);
2828 break;
2829 case NDA_MASTER:
2830 filter->master_idx = nla_get_u32(tb[i]);
2831 break;
2832 default:
2833 if (strict_check) {
2834 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2835 return -EINVAL;
2836 }
2837 }
2838 }
2839
2840 return 0;
2841 }
2842
2843 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2844 {
2845 const struct nlmsghdr *nlh = cb->nlh;
2846 struct neigh_dump_filter filter = {};
2847 struct neigh_table *tbl;
2848 int t, family, s_t;
2849 int proxy = 0;
2850 int err;
2851
2852 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2853
2854
2855
2856
2857 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2858 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2859 proxy = 1;
2860
2861 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2862 if (err < 0 && cb->strict_check)
2863 return err;
2864
2865 s_t = cb->args[0];
2866
2867 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2868 tbl = neigh_tables[t];
2869
2870 if (!tbl)
2871 continue;
2872 if (t < s_t || (family && tbl->family != family))
2873 continue;
2874 if (t > s_t)
2875 memset(&cb->args[1], 0, sizeof(cb->args) -
2876 sizeof(cb->args[0]));
2877 if (proxy)
2878 err = pneigh_dump_table(tbl, skb, cb, &filter);
2879 else
2880 err = neigh_dump_table(tbl, skb, cb, &filter);
2881 if (err < 0)
2882 break;
2883 }
2884
2885 cb->args[0] = t;
2886 return skb->len;
2887 }
2888
2889 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2890 struct neigh_table **tbl,
2891 void **dst, int *dev_idx, u8 *ndm_flags,
2892 struct netlink_ext_ack *extack)
2893 {
2894 struct nlattr *tb[NDA_MAX + 1];
2895 struct ndmsg *ndm;
2896 int err, i;
2897
2898 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2899 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2900 return -EINVAL;
2901 }
2902
2903 ndm = nlmsg_data(nlh);
2904 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
2905 ndm->ndm_type) {
2906 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2907 return -EINVAL;
2908 }
2909
2910 if (ndm->ndm_flags & ~NTF_PROXY) {
2911 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2912 return -EINVAL;
2913 }
2914
2915 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2916 NDA_MAX, nda_policy, extack);
2917 if (err < 0)
2918 return err;
2919
2920 *ndm_flags = ndm->ndm_flags;
2921 *dev_idx = ndm->ndm_ifindex;
2922 *tbl = neigh_find_table(ndm->ndm_family);
2923 if (*tbl == NULL) {
2924 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2925 return -EAFNOSUPPORT;
2926 }
2927
2928 for (i = 0; i <= NDA_MAX; ++i) {
2929 if (!tb[i])
2930 continue;
2931
2932 switch (i) {
2933 case NDA_DST:
2934 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2935 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2936 return -EINVAL;
2937 }
2938 *dst = nla_data(tb[i]);
2939 break;
2940 default:
2941 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2942 return -EINVAL;
2943 }
2944 }
2945
2946 return 0;
2947 }
2948
2949 static inline size_t neigh_nlmsg_size(void)
2950 {
2951 return NLMSG_ALIGN(sizeof(struct ndmsg))
2952 + nla_total_size(MAX_ADDR_LEN)
2953 + nla_total_size(MAX_ADDR_LEN)
2954 + nla_total_size(sizeof(struct nda_cacheinfo))
2955 + nla_total_size(4)
2956 + nla_total_size(4)
2957 + nla_total_size(1);
2958 }
2959
2960 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2961 u32 pid, u32 seq)
2962 {
2963 struct sk_buff *skb;
2964 int err = 0;
2965
2966 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2967 if (!skb)
2968 return -ENOBUFS;
2969
2970 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2971 if (err) {
2972 kfree_skb(skb);
2973 goto errout;
2974 }
2975
2976 err = rtnl_unicast(skb, net, pid);
2977 errout:
2978 return err;
2979 }
2980
2981 static inline size_t pneigh_nlmsg_size(void)
2982 {
2983 return NLMSG_ALIGN(sizeof(struct ndmsg))
2984 + nla_total_size(MAX_ADDR_LEN)
2985 + nla_total_size(4)
2986 + nla_total_size(1);
2987 }
2988
2989 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2990 u32 pid, u32 seq, struct neigh_table *tbl)
2991 {
2992 struct sk_buff *skb;
2993 int err = 0;
2994
2995 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2996 if (!skb)
2997 return -ENOBUFS;
2998
2999 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
3000 if (err) {
3001 kfree_skb(skb);
3002 goto errout;
3003 }
3004
3005 err = rtnl_unicast(skb, net, pid);
3006 errout:
3007 return err;
3008 }
3009
3010 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3011 struct netlink_ext_ack *extack)
3012 {
3013 struct net *net = sock_net(in_skb->sk);
3014 struct net_device *dev = NULL;
3015 struct neigh_table *tbl = NULL;
3016 struct neighbour *neigh;
3017 void *dst = NULL;
3018 u8 ndm_flags = 0;
3019 int dev_idx = 0;
3020 int err;
3021
3022 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
3023 extack);
3024 if (err < 0)
3025 return err;
3026
3027 if (dev_idx) {
3028 dev = __dev_get_by_index(net, dev_idx);
3029 if (!dev) {
3030 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
3031 return -ENODEV;
3032 }
3033 }
3034
3035 if (!dst) {
3036 NL_SET_ERR_MSG(extack, "Network address not specified");
3037 return -EINVAL;
3038 }
3039
3040 if (ndm_flags & NTF_PROXY) {
3041 struct pneigh_entry *pn;
3042
3043 pn = pneigh_lookup(tbl, net, dst, dev, 0);
3044 if (!pn) {
3045 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3046 return -ENOENT;
3047 }
3048 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3049 nlh->nlmsg_seq, tbl);
3050 }
3051
3052 if (!dev) {
3053 NL_SET_ERR_MSG(extack, "No device specified");
3054 return -EINVAL;
3055 }
3056
3057 neigh = neigh_lookup(tbl, dst, dev);
3058 if (!neigh) {
3059 NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3060 return -ENOENT;
3061 }
3062
3063 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3064 nlh->nlmsg_seq);
3065
3066 neigh_release(neigh);
3067
3068 return err;
3069 }
3070
3071 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3072 {
3073 int chain;
3074 struct neigh_hash_table *nht;
3075
3076 rcu_read_lock_bh();
3077 nht = rcu_dereference_bh(tbl->nht);
3078
3079 read_lock(&tbl->lock);
3080 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3081 struct neighbour *n;
3082
3083 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
3084 n != NULL;
3085 n = rcu_dereference_bh(n->next))
3086 cb(n, cookie);
3087 }
3088 read_unlock(&tbl->lock);
3089 rcu_read_unlock_bh();
3090 }
3091 EXPORT_SYMBOL(neigh_for_each);
3092
3093
3094 void __neigh_for_each_release(struct neigh_table *tbl,
3095 int (*cb)(struct neighbour *))
3096 {
3097 int chain;
3098 struct neigh_hash_table *nht;
3099
3100 nht = rcu_dereference_protected(tbl->nht,
3101 lockdep_is_held(&tbl->lock));
3102 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3103 struct neighbour *n;
3104 struct neighbour __rcu **np;
3105
3106 np = &nht->hash_buckets[chain];
3107 while ((n = rcu_dereference_protected(*np,
3108 lockdep_is_held(&tbl->lock))) != NULL) {
3109 int release;
3110
3111 write_lock(&n->lock);
3112 release = cb(n);
3113 if (release) {
3114 rcu_assign_pointer(*np,
3115 rcu_dereference_protected(n->next,
3116 lockdep_is_held(&tbl->lock)));
3117 neigh_mark_dead(n);
3118 } else
3119 np = &n->next;
3120 write_unlock(&n->lock);
3121 if (release)
3122 neigh_cleanup_and_release(n);
3123 }
3124 }
3125 }
3126 EXPORT_SYMBOL(__neigh_for_each_release);
3127
3128 int neigh_xmit(int index, struct net_device *dev,
3129 const void *addr, struct sk_buff *skb)
3130 {
3131 int err = -EAFNOSUPPORT;
3132 if (likely(index < NEIGH_NR_TABLES)) {
3133 struct neigh_table *tbl;
3134 struct neighbour *neigh;
3135
3136 tbl = neigh_tables[index];
3137 if (!tbl)
3138 goto out;
3139 rcu_read_lock_bh();
3140 if (index == NEIGH_ARP_TABLE) {
3141 u32 key = *((u32 *)addr);
3142
3143 neigh = __ipv4_neigh_lookup_noref(dev, key);
3144 } else {
3145 neigh = __neigh_lookup_noref(tbl, addr, dev);
3146 }
3147 if (!neigh)
3148 neigh = __neigh_create(tbl, addr, dev, false);
3149 err = PTR_ERR(neigh);
3150 if (IS_ERR(neigh)) {
3151 rcu_read_unlock_bh();
3152 goto out_kfree_skb;
3153 }
3154 err = neigh->output(neigh, skb);
3155 rcu_read_unlock_bh();
3156 }
3157 else if (index == NEIGH_LINK_TABLE) {
3158 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3159 addr, NULL, skb->len);
3160 if (err < 0)
3161 goto out_kfree_skb;
3162 err = dev_queue_xmit(skb);
3163 }
3164 out:
3165 return err;
3166 out_kfree_skb:
3167 kfree_skb(skb);
3168 goto out;
3169 }
3170 EXPORT_SYMBOL(neigh_xmit);
3171
3172 #ifdef CONFIG_PROC_FS
3173
3174 static struct neighbour *neigh_get_first(struct seq_file *seq)
3175 {
3176 struct neigh_seq_state *state = seq->private;
3177 struct net *net = seq_file_net(seq);
3178 struct neigh_hash_table *nht = state->nht;
3179 struct neighbour *n = NULL;
3180 int bucket;
3181
3182 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3183 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3184 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3185
3186 while (n) {
3187 if (!net_eq(dev_net(n->dev), net))
3188 goto next;
3189 if (state->neigh_sub_iter) {
3190 loff_t fakep = 0;
3191 void *v;
3192
3193 v = state->neigh_sub_iter(state, n, &fakep);
3194 if (!v)
3195 goto next;
3196 }
3197 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3198 break;
3199 if (n->nud_state & ~NUD_NOARP)
3200 break;
3201 next:
3202 n = rcu_dereference_bh(n->next);
3203 }
3204
3205 if (n)
3206 break;
3207 }
3208 state->bucket = bucket;
3209
3210 return n;
3211 }
3212
3213 static struct neighbour *neigh_get_next(struct seq_file *seq,
3214 struct neighbour *n,
3215 loff_t *pos)
3216 {
3217 struct neigh_seq_state *state = seq->private;
3218 struct net *net = seq_file_net(seq);
3219 struct neigh_hash_table *nht = state->nht;
3220
3221 if (state->neigh_sub_iter) {
3222 void *v = state->neigh_sub_iter(state, n, pos);
3223 if (v)
3224 return n;
3225 }
3226 n = rcu_dereference_bh(n->next);
3227
3228 while (1) {
3229 while (n) {
3230 if (!net_eq(dev_net(n->dev), net))
3231 goto next;
3232 if (state->neigh_sub_iter) {
3233 void *v = state->neigh_sub_iter(state, n, pos);
3234 if (v)
3235 return n;
3236 goto next;
3237 }
3238 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3239 break;
3240
3241 if (n->nud_state & ~NUD_NOARP)
3242 break;
3243 next:
3244 n = rcu_dereference_bh(n->next);
3245 }
3246
3247 if (n)
3248 break;
3249
3250 if (++state->bucket >= (1 << nht->hash_shift))
3251 break;
3252
3253 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3254 }
3255
3256 if (n && pos)
3257 --(*pos);
3258 return n;
3259 }
3260
3261 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3262 {
3263 struct neighbour *n = neigh_get_first(seq);
3264
3265 if (n) {
3266 --(*pos);
3267 while (*pos) {
3268 n = neigh_get_next(seq, n, pos);
3269 if (!n)
3270 break;
3271 }
3272 }
3273 return *pos ? NULL : n;
3274 }
3275
3276 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3277 {
3278 struct neigh_seq_state *state = seq->private;
3279 struct net *net = seq_file_net(seq);
3280 struct neigh_table *tbl = state->tbl;
3281 struct pneigh_entry *pn = NULL;
3282 int bucket;
3283
3284 state->flags |= NEIGH_SEQ_IS_PNEIGH;
3285 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3286 pn = tbl->phash_buckets[bucket];
3287 while (pn && !net_eq(pneigh_net(pn), net))
3288 pn = pn->next;
3289 if (pn)
3290 break;
3291 }
3292 state->bucket = bucket;
3293
3294 return pn;
3295 }
3296
3297 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3298 struct pneigh_entry *pn,
3299 loff_t *pos)
3300 {
3301 struct neigh_seq_state *state = seq->private;
3302 struct net *net = seq_file_net(seq);
3303 struct neigh_table *tbl = state->tbl;
3304
3305 do {
3306 pn = pn->next;
3307 } while (pn && !net_eq(pneigh_net(pn), net));
3308
3309 while (!pn) {
3310 if (++state->bucket > PNEIGH_HASHMASK)
3311 break;
3312 pn = tbl->phash_buckets[state->bucket];
3313 while (pn && !net_eq(pneigh_net(pn), net))
3314 pn = pn->next;
3315 if (pn)
3316 break;
3317 }
3318
3319 if (pn && pos)
3320 --(*pos);
3321
3322 return pn;
3323 }
3324
3325 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3326 {
3327 struct pneigh_entry *pn = pneigh_get_first(seq);
3328
3329 if (pn) {
3330 --(*pos);
3331 while (*pos) {
3332 pn = pneigh_get_next(seq, pn, pos);
3333 if (!pn)
3334 break;
3335 }
3336 }
3337 return *pos ? NULL : pn;
3338 }
3339
3340 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3341 {
3342 struct neigh_seq_state *state = seq->private;
3343 void *rc;
3344 loff_t idxpos = *pos;
3345
3346 rc = neigh_get_idx(seq, &idxpos);
3347 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3348 rc = pneigh_get_idx(seq, &idxpos);
3349
3350 return rc;
3351 }
3352
3353 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3354 __acquires(tbl->lock)
3355 __acquires(rcu_bh)
3356 {
3357 struct neigh_seq_state *state = seq->private;
3358
3359 state->tbl = tbl;
3360 state->bucket = 0;
3361 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3362
3363 rcu_read_lock_bh();
3364 state->nht = rcu_dereference_bh(tbl->nht);
3365 read_lock(&tbl->lock);
3366
3367 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3368 }
3369 EXPORT_SYMBOL(neigh_seq_start);
3370
3371 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3372 {
3373 struct neigh_seq_state *state;
3374 void *rc;
3375
3376 if (v == SEQ_START_TOKEN) {
3377 rc = neigh_get_first(seq);
3378 goto out;
3379 }
3380
3381 state = seq->private;
3382 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3383 rc = neigh_get_next(seq, v, NULL);
3384 if (rc)
3385 goto out;
3386 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3387 rc = pneigh_get_first(seq);
3388 } else {
3389 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3390 rc = pneigh_get_next(seq, v, NULL);
3391 }
3392 out:
3393 ++(*pos);
3394 return rc;
3395 }
3396 EXPORT_SYMBOL(neigh_seq_next);
3397
3398 void neigh_seq_stop(struct seq_file *seq, void *v)
3399 __releases(tbl->lock)
3400 __releases(rcu_bh)
3401 {
3402 struct neigh_seq_state *state = seq->private;
3403 struct neigh_table *tbl = state->tbl;
3404
3405 read_unlock(&tbl->lock);
3406 rcu_read_unlock_bh();
3407 }
3408 EXPORT_SYMBOL(neigh_seq_stop);
3409
3410
3411
3412 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3413 {
3414 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3415 int cpu;
3416
3417 if (*pos == 0)
3418 return SEQ_START_TOKEN;
3419
3420 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3421 if (!cpu_possible(cpu))
3422 continue;
3423 *pos = cpu+1;
3424 return per_cpu_ptr(tbl->stats, cpu);
3425 }
3426 return NULL;
3427 }
3428
3429 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3430 {
3431 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3432 int cpu;
3433
3434 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3435 if (!cpu_possible(cpu))
3436 continue;
3437 *pos = cpu+1;
3438 return per_cpu_ptr(tbl->stats, cpu);
3439 }
3440 (*pos)++;
3441 return NULL;
3442 }
3443
3444 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3445 {
3446
3447 }
3448
3449 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3450 {
3451 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3452 struct neigh_statistics *st = v;
3453
3454 if (v == SEQ_START_TOKEN) {
3455 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3456 return 0;
3457 }
3458
3459 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3460 "%08lx %08lx %08lx "
3461 "%08lx %08lx %08lx\n",
3462 atomic_read(&tbl->entries),
3463
3464 st->allocs,
3465 st->destroys,
3466 st->hash_grows,
3467
3468 st->lookups,
3469 st->hits,
3470
3471 st->res_failed,
3472
3473 st->rcv_probes_mcast,
3474 st->rcv_probes_ucast,
3475
3476 st->periodic_gc_runs,
3477 st->forced_gc_runs,
3478 st->unres_discards,
3479 st->table_fulls
3480 );
3481
3482 return 0;
3483 }
3484
3485 static const struct seq_operations neigh_stat_seq_ops = {
3486 .start = neigh_stat_seq_start,
3487 .next = neigh_stat_seq_next,
3488 .stop = neigh_stat_seq_stop,
3489 .show = neigh_stat_seq_show,
3490 };
3491 #endif
3492
3493 static void __neigh_notify(struct neighbour *n, int type, int flags,
3494 u32 pid)
3495 {
3496 struct net *net = dev_net(n->dev);
3497 struct sk_buff *skb;
3498 int err = -ENOBUFS;
3499
3500 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3501 if (skb == NULL)
3502 goto errout;
3503
3504 err = neigh_fill_info(skb, n, pid, 0, type, flags);
3505 if (err < 0) {
3506
3507 WARN_ON(err == -EMSGSIZE);
3508 kfree_skb(skb);
3509 goto errout;
3510 }
3511 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3512 return;
3513 errout:
3514 if (err < 0)
3515 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3516 }
3517
3518 void neigh_app_ns(struct neighbour *n)
3519 {
3520 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3521 }
3522 EXPORT_SYMBOL(neigh_app_ns);
3523
3524 #ifdef CONFIG_SYSCTL
3525 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3526
3527 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3528 void *buffer, size_t *lenp, loff_t *ppos)
3529 {
3530 int size, ret;
3531 struct ctl_table tmp = *ctl;
3532
3533 tmp.extra1 = SYSCTL_ZERO;
3534 tmp.extra2 = &unres_qlen_max;
3535 tmp.data = &size;
3536
3537 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3538 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3539
3540 if (write && !ret)
3541 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3542 return ret;
3543 }
3544
3545 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3546 int family)
3547 {
3548 switch (family) {
3549 case AF_INET:
3550 return __in_dev_arp_parms_get_rcu(dev);
3551 case AF_INET6:
3552 return __in6_dev_nd_parms_get_rcu(dev);
3553 }
3554 return NULL;
3555 }
3556
3557 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3558 int index)
3559 {
3560 struct net_device *dev;
3561 int family = neigh_parms_family(p);
3562
3563 rcu_read_lock();
3564 for_each_netdev_rcu(net, dev) {
3565 struct neigh_parms *dst_p =
3566 neigh_get_dev_parms_rcu(dev, family);
3567
3568 if (dst_p && !test_bit(index, dst_p->data_state))
3569 dst_p->data[index] = p->data[index];
3570 }
3571 rcu_read_unlock();
3572 }
3573
3574 static void neigh_proc_update(struct ctl_table *ctl, int write)
3575 {
3576 struct net_device *dev = ctl->extra1;
3577 struct neigh_parms *p = ctl->extra2;
3578 struct net *net = neigh_parms_net(p);
3579 int index = (int *) ctl->data - p->data;
3580
3581 if (!write)
3582 return;
3583
3584 set_bit(index, p->data_state);
3585 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3586 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3587 if (!dev)
3588 neigh_copy_dflt_parms(net, p, index);
3589 }
3590
3591 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3592 void *buffer, size_t *lenp,
3593 loff_t *ppos)
3594 {
3595 struct ctl_table tmp = *ctl;
3596 int ret;
3597
3598 tmp.extra1 = SYSCTL_ZERO;
3599 tmp.extra2 = SYSCTL_INT_MAX;
3600
3601 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3602 neigh_proc_update(ctl, write);
3603 return ret;
3604 }
3605
3606 static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
3607 void *buffer, size_t *lenp, loff_t *ppos)
3608 {
3609 struct ctl_table tmp = *ctl;
3610 int ret;
3611
3612 int min = msecs_to_jiffies(1);
3613
3614 tmp.extra1 = &min;
3615 tmp.extra2 = NULL;
3616
3617 ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3618 neigh_proc_update(ctl, write);
3619 return ret;
3620 }
3621
3622 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3623 size_t *lenp, loff_t *ppos)
3624 {
3625 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3626
3627 neigh_proc_update(ctl, write);
3628 return ret;
3629 }
3630 EXPORT_SYMBOL(neigh_proc_dointvec);
3631
3632 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3633 size_t *lenp, loff_t *ppos)
3634 {
3635 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3636
3637 neigh_proc_update(ctl, write);
3638 return ret;
3639 }
3640 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3641
3642 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3643 void *buffer, size_t *lenp,
3644 loff_t *ppos)
3645 {
3646 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3647
3648 neigh_proc_update(ctl, write);
3649 return ret;
3650 }
3651
3652 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3653 void *buffer, size_t *lenp, loff_t *ppos)
3654 {
3655 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3656
3657 neigh_proc_update(ctl, write);
3658 return ret;
3659 }
3660 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3661
3662 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3663 void *buffer, size_t *lenp,
3664 loff_t *ppos)
3665 {
3666 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3667
3668 neigh_proc_update(ctl, write);
3669 return ret;
3670 }
3671
3672 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3673 void *buffer, size_t *lenp,
3674 loff_t *ppos)
3675 {
3676 struct neigh_parms *p = ctl->extra2;
3677 int ret;
3678
3679 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3680 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3681 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3682 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3683 else
3684 ret = -1;
3685
3686 if (write && ret == 0) {
3687
3688
3689
3690
3691 p->reachable_time =
3692 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3693 }
3694 return ret;
3695 }
3696
3697 #define NEIGH_PARMS_DATA_OFFSET(index) \
3698 (&((struct neigh_parms *) 0)->data[index])
3699
3700 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3701 [NEIGH_VAR_ ## attr] = { \
3702 .procname = name, \
3703 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3704 .maxlen = sizeof(int), \
3705 .mode = mval, \
3706 .proc_handler = proc, \
3707 }
3708
3709 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3710 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3711
3712 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3713 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3714
3715 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3716 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3717
3718 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3719 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3720
3721 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3722 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3723
3724 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3725 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3726
3727 static struct neigh_sysctl_table {
3728 struct ctl_table_header *sysctl_header;
3729 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3730 } neigh_sysctl_template __read_mostly = {
3731 .neigh_vars = {
3732 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3733 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3734 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3735 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3736 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3737 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3738 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3739 NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3740 "interval_probe_time_ms"),
3741 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3742 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3743 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3744 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3745 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3746 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3747 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3748 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3749 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3750 [NEIGH_VAR_GC_INTERVAL] = {
3751 .procname = "gc_interval",
3752 .maxlen = sizeof(int),
3753 .mode = 0644,
3754 .proc_handler = proc_dointvec_jiffies,
3755 },
3756 [NEIGH_VAR_GC_THRESH1] = {
3757 .procname = "gc_thresh1",
3758 .maxlen = sizeof(int),
3759 .mode = 0644,
3760 .extra1 = SYSCTL_ZERO,
3761 .extra2 = SYSCTL_INT_MAX,
3762 .proc_handler = proc_dointvec_minmax,
3763 },
3764 [NEIGH_VAR_GC_THRESH2] = {
3765 .procname = "gc_thresh2",
3766 .maxlen = sizeof(int),
3767 .mode = 0644,
3768 .extra1 = SYSCTL_ZERO,
3769 .extra2 = SYSCTL_INT_MAX,
3770 .proc_handler = proc_dointvec_minmax,
3771 },
3772 [NEIGH_VAR_GC_THRESH3] = {
3773 .procname = "gc_thresh3",
3774 .maxlen = sizeof(int),
3775 .mode = 0644,
3776 .extra1 = SYSCTL_ZERO,
3777 .extra2 = SYSCTL_INT_MAX,
3778 .proc_handler = proc_dointvec_minmax,
3779 },
3780 {},
3781 },
3782 };
3783
3784 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3785 proc_handler *handler)
3786 {
3787 int i;
3788 struct neigh_sysctl_table *t;
3789 const char *dev_name_source;
3790 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3791 char *p_name;
3792
3793 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3794 if (!t)
3795 goto err;
3796
3797 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3798 t->neigh_vars[i].data += (long) p;
3799 t->neigh_vars[i].extra1 = dev;
3800 t->neigh_vars[i].extra2 = p;
3801 }
3802
3803 if (dev) {
3804 dev_name_source = dev->name;
3805
3806 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3807 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3808 } else {
3809 struct neigh_table *tbl = p->tbl;
3810 dev_name_source = "default";
3811 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3812 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3813 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3814 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3815 }
3816
3817 if (handler) {
3818
3819 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3820
3821 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3822
3823 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3824
3825 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3826 } else {
3827
3828
3829
3830
3831
3832
3833
3834 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3835 neigh_proc_base_reachable_time;
3836
3837 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3838 neigh_proc_base_reachable_time;
3839 }
3840
3841 switch (neigh_parms_family(p)) {
3842 case AF_INET:
3843 p_name = "ipv4";
3844 break;
3845 case AF_INET6:
3846 p_name = "ipv6";
3847 break;
3848 default:
3849 BUG();
3850 }
3851
3852 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3853 p_name, dev_name_source);
3854 t->sysctl_header =
3855 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3856 if (!t->sysctl_header)
3857 goto free;
3858
3859 p->sysctl_table = t;
3860 return 0;
3861
3862 free:
3863 kfree(t);
3864 err:
3865 return -ENOBUFS;
3866 }
3867 EXPORT_SYMBOL(neigh_sysctl_register);
3868
3869 void neigh_sysctl_unregister(struct neigh_parms *p)
3870 {
3871 if (p->sysctl_table) {
3872 struct neigh_sysctl_table *t = p->sysctl_table;
3873 p->sysctl_table = NULL;
3874 unregister_net_sysctl_table(t->sysctl_header);
3875 kfree(t);
3876 }
3877 }
3878 EXPORT_SYMBOL(neigh_sysctl_unregister);
3879
3880 #endif
3881
3882 static int __init neigh_init(void)
3883 {
3884 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3885 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3886 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3887
3888 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3889 0);
3890 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3891
3892 return 0;
3893 }
3894
3895 subsys_initcall(neigh_init);