0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/bitops.h>
0016 #include <linux/errno.h>
0017 #include <linux/module.h>
0018 #include <linux/types.h>
0019 #include <linux/socket.h>
0020 #include <linux/kernel.h>
0021 #include <linux/timer.h>
0022 #include <linux/string.h>
0023 #include <linux/sockios.h>
0024 #include <linux/net.h>
0025 #include <linux/fcntl.h>
0026 #include <linux/mm.h>
0027 #include <linux/slab.h>
0028 #include <linux/interrupt.h>
0029 #include <linux/capability.h>
0030 #include <linux/skbuff.h>
0031 #include <linux/init.h>
0032 #include <linux/security.h>
0033 #include <linux/mutex.h>
0034 #include <linux/if_addr.h>
0035 #include <linux/if_bridge.h>
0036 #include <linux/if_vlan.h>
0037 #include <linux/pci.h>
0038 #include <linux/etherdevice.h>
0039 #include <linux/bpf.h>
0040
0041 #include <linux/uaccess.h>
0042
0043 #include <linux/inet.h>
0044 #include <linux/netdevice.h>
0045 #include <net/ip.h>
0046 #include <net/protocol.h>
0047 #include <net/arp.h>
0048 #include <net/route.h>
0049 #include <net/udp.h>
0050 #include <net/tcp.h>
0051 #include <net/sock.h>
0052 #include <net/pkt_sched.h>
0053 #include <net/fib_rules.h>
0054 #include <net/rtnetlink.h>
0055 #include <net/net_namespace.h>
0056
0057 #include "dev.h"
0058
0059 #define RTNL_MAX_TYPE 50
0060 #define RTNL_SLAVE_MAX_TYPE 40
0061
0062 struct rtnl_link {
0063 rtnl_doit_func doit;
0064 rtnl_dumpit_func dumpit;
0065 struct module *owner;
0066 unsigned int flags;
0067 struct rcu_head rcu;
0068 };
0069
0070 static DEFINE_MUTEX(rtnl_mutex);
0071
0072 void rtnl_lock(void)
0073 {
0074 mutex_lock(&rtnl_mutex);
0075 }
0076 EXPORT_SYMBOL(rtnl_lock);
0077
0078 int rtnl_lock_killable(void)
0079 {
0080 return mutex_lock_killable(&rtnl_mutex);
0081 }
0082 EXPORT_SYMBOL(rtnl_lock_killable);
0083
0084 static struct sk_buff *defer_kfree_skb_list;
0085 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
0086 {
0087 if (head && tail) {
0088 tail->next = defer_kfree_skb_list;
0089 defer_kfree_skb_list = head;
0090 }
0091 }
0092 EXPORT_SYMBOL(rtnl_kfree_skbs);
0093
0094 void __rtnl_unlock(void)
0095 {
0096 struct sk_buff *head = defer_kfree_skb_list;
0097
0098 defer_kfree_skb_list = NULL;
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131 WARN_ON(!list_empty(&net_todo_list));
0132
0133 mutex_unlock(&rtnl_mutex);
0134
0135 while (head) {
0136 struct sk_buff *next = head->next;
0137
0138 kfree_skb(head);
0139 cond_resched();
0140 head = next;
0141 }
0142 }
0143
0144 void rtnl_unlock(void)
0145 {
0146
0147 netdev_run_todo();
0148 }
0149 EXPORT_SYMBOL(rtnl_unlock);
0150
0151 int rtnl_trylock(void)
0152 {
0153 return mutex_trylock(&rtnl_mutex);
0154 }
0155 EXPORT_SYMBOL(rtnl_trylock);
0156
0157 int rtnl_is_locked(void)
0158 {
0159 return mutex_is_locked(&rtnl_mutex);
0160 }
0161 EXPORT_SYMBOL(rtnl_is_locked);
0162
0163 bool refcount_dec_and_rtnl_lock(refcount_t *r)
0164 {
0165 return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
0166 }
0167 EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
0168
0169 #ifdef CONFIG_PROVE_LOCKING
0170 bool lockdep_rtnl_is_held(void)
0171 {
0172 return lockdep_is_held(&rtnl_mutex);
0173 }
0174 EXPORT_SYMBOL(lockdep_rtnl_is_held);
0175 #endif
0176
0177 static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
0178
0179 static inline int rtm_msgindex(int msgtype)
0180 {
0181 int msgindex = msgtype - RTM_BASE;
0182
0183
0184
0185
0186
0187
0188 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
0189
0190 return msgindex;
0191 }
0192
0193 static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
0194 {
0195 struct rtnl_link __rcu **tab;
0196
0197 if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
0198 protocol = PF_UNSPEC;
0199
0200 tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
0201 if (!tab)
0202 tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
0203
0204 return rcu_dereference_rtnl(tab[msgtype]);
0205 }
0206
0207 static int rtnl_register_internal(struct module *owner,
0208 int protocol, int msgtype,
0209 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
0210 unsigned int flags)
0211 {
0212 struct rtnl_link *link, *old;
0213 struct rtnl_link __rcu **tab;
0214 int msgindex;
0215 int ret = -ENOBUFS;
0216
0217 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
0218 msgindex = rtm_msgindex(msgtype);
0219
0220 rtnl_lock();
0221 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
0222 if (tab == NULL) {
0223 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
0224 if (!tab)
0225 goto unlock;
0226
0227
0228 rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
0229 }
0230
0231 old = rtnl_dereference(tab[msgindex]);
0232 if (old) {
0233 link = kmemdup(old, sizeof(*old), GFP_KERNEL);
0234 if (!link)
0235 goto unlock;
0236 } else {
0237 link = kzalloc(sizeof(*link), GFP_KERNEL);
0238 if (!link)
0239 goto unlock;
0240 }
0241
0242 WARN_ON(link->owner && link->owner != owner);
0243 link->owner = owner;
0244
0245 WARN_ON(doit && link->doit && link->doit != doit);
0246 if (doit)
0247 link->doit = doit;
0248 WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
0249 if (dumpit)
0250 link->dumpit = dumpit;
0251
0252 WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
0253 (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
0254 link->flags |= flags;
0255
0256
0257 rcu_assign_pointer(tab[msgindex], link);
0258 ret = 0;
0259 if (old)
0260 kfree_rcu(old, rcu);
0261 unlock:
0262 rtnl_unlock();
0263 return ret;
0264 }
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 int rtnl_register_module(struct module *owner,
0279 int protocol, int msgtype,
0280 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
0281 unsigned int flags)
0282 {
0283 return rtnl_register_internal(owner, protocol, msgtype,
0284 doit, dumpit, flags);
0285 }
0286 EXPORT_SYMBOL_GPL(rtnl_register_module);
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 void rtnl_register(int protocol, int msgtype,
0305 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
0306 unsigned int flags)
0307 {
0308 int err;
0309
0310 err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
0311 flags);
0312 if (err)
0313 pr_err("Unable to register rtnetlink message handler, "
0314 "protocol = %d, message type = %d\n", protocol, msgtype);
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324 int rtnl_unregister(int protocol, int msgtype)
0325 {
0326 struct rtnl_link __rcu **tab;
0327 struct rtnl_link *link;
0328 int msgindex;
0329
0330 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
0331 msgindex = rtm_msgindex(msgtype);
0332
0333 rtnl_lock();
0334 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
0335 if (!tab) {
0336 rtnl_unlock();
0337 return -ENOENT;
0338 }
0339
0340 link = rtnl_dereference(tab[msgindex]);
0341 RCU_INIT_POINTER(tab[msgindex], NULL);
0342 rtnl_unlock();
0343
0344 kfree_rcu(link, rcu);
0345
0346 return 0;
0347 }
0348 EXPORT_SYMBOL_GPL(rtnl_unregister);
0349
0350
0351
0352
0353
0354
0355
0356
0357 void rtnl_unregister_all(int protocol)
0358 {
0359 struct rtnl_link __rcu **tab;
0360 struct rtnl_link *link;
0361 int msgindex;
0362
0363 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
0364
0365 rtnl_lock();
0366 tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
0367 if (!tab) {
0368 rtnl_unlock();
0369 return;
0370 }
0371 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
0372 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
0373 link = rtnl_dereference(tab[msgindex]);
0374 if (!link)
0375 continue;
0376
0377 RCU_INIT_POINTER(tab[msgindex], NULL);
0378 kfree_rcu(link, rcu);
0379 }
0380 rtnl_unlock();
0381
0382 synchronize_net();
0383
0384 kfree(tab);
0385 }
0386 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
0387
0388 static LIST_HEAD(link_ops);
0389
0390 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
0391 {
0392 const struct rtnl_link_ops *ops;
0393
0394 list_for_each_entry(ops, &link_ops, list) {
0395 if (!strcmp(ops->kind, kind))
0396 return ops;
0397 }
0398 return NULL;
0399 }
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 int __rtnl_link_register(struct rtnl_link_ops *ops)
0412 {
0413 if (rtnl_link_ops_get(ops->kind))
0414 return -EEXIST;
0415
0416
0417
0418
0419
0420
0421 if ((ops->alloc || ops->setup) && !ops->dellink)
0422 ops->dellink = unregister_netdevice_queue;
0423
0424 list_add_tail(&ops->list, &link_ops);
0425 return 0;
0426 }
0427 EXPORT_SYMBOL_GPL(__rtnl_link_register);
0428
0429
0430
0431
0432
0433
0434
0435 int rtnl_link_register(struct rtnl_link_ops *ops)
0436 {
0437 int err;
0438
0439
0440 if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
0441 ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
0442 return -EINVAL;
0443
0444 rtnl_lock();
0445 err = __rtnl_link_register(ops);
0446 rtnl_unlock();
0447 return err;
0448 }
0449 EXPORT_SYMBOL_GPL(rtnl_link_register);
0450
0451 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
0452 {
0453 struct net_device *dev;
0454 LIST_HEAD(list_kill);
0455
0456 for_each_netdev(net, dev) {
0457 if (dev->rtnl_link_ops == ops)
0458 ops->dellink(dev, &list_kill);
0459 }
0460 unregister_netdevice_many(&list_kill);
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
0472 {
0473 struct net *net;
0474
0475 for_each_net(net) {
0476 __rtnl_kill_links(net, ops);
0477 }
0478 list_del(&ops->list);
0479 }
0480 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
0481
0482
0483
0484
0485 static void rtnl_lock_unregistering_all(void)
0486 {
0487 struct net *net;
0488 bool unregistering;
0489 DEFINE_WAIT_FUNC(wait, woken_wake_function);
0490
0491 add_wait_queue(&netdev_unregistering_wq, &wait);
0492 for (;;) {
0493 unregistering = false;
0494 rtnl_lock();
0495
0496
0497
0498 for_each_net(net) {
0499 if (atomic_read(&net->dev_unreg_count) > 0) {
0500 unregistering = true;
0501 break;
0502 }
0503 }
0504 if (!unregistering)
0505 break;
0506 __rtnl_unlock();
0507
0508 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
0509 }
0510 remove_wait_queue(&netdev_unregistering_wq, &wait);
0511 }
0512
0513
0514
0515
0516
0517 void rtnl_link_unregister(struct rtnl_link_ops *ops)
0518 {
0519
0520 down_write(&pernet_ops_rwsem);
0521 rtnl_lock_unregistering_all();
0522 __rtnl_link_unregister(ops);
0523 rtnl_unlock();
0524 up_write(&pernet_ops_rwsem);
0525 }
0526 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
0527
0528 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
0529 {
0530 struct net_device *master_dev;
0531 const struct rtnl_link_ops *ops;
0532 size_t size = 0;
0533
0534 rcu_read_lock();
0535
0536 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
0537 if (!master_dev)
0538 goto out;
0539
0540 ops = master_dev->rtnl_link_ops;
0541 if (!ops || !ops->get_slave_size)
0542 goto out;
0543
0544 size = nla_total_size(sizeof(struct nlattr)) +
0545 ops->get_slave_size(master_dev, dev);
0546
0547 out:
0548 rcu_read_unlock();
0549 return size;
0550 }
0551
0552 static size_t rtnl_link_get_size(const struct net_device *dev)
0553 {
0554 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
0555 size_t size;
0556
0557 if (!ops)
0558 return 0;
0559
0560 size = nla_total_size(sizeof(struct nlattr)) +
0561 nla_total_size(strlen(ops->kind) + 1);
0562
0563 if (ops->get_size)
0564
0565 size += nla_total_size(sizeof(struct nlattr)) +
0566 ops->get_size(dev);
0567
0568 if (ops->get_xstats_size)
0569
0570 size += nla_total_size(ops->get_xstats_size(dev));
0571
0572 size += rtnl_link_get_slave_info_data_size(dev);
0573
0574 return size;
0575 }
0576
0577 static LIST_HEAD(rtnl_af_ops);
0578
0579 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
0580 {
0581 const struct rtnl_af_ops *ops;
0582
0583 ASSERT_RTNL();
0584
0585 list_for_each_entry(ops, &rtnl_af_ops, list) {
0586 if (ops->family == family)
0587 return ops;
0588 }
0589
0590 return NULL;
0591 }
0592
0593
0594
0595
0596
0597
0598
0599 void rtnl_af_register(struct rtnl_af_ops *ops)
0600 {
0601 rtnl_lock();
0602 list_add_tail_rcu(&ops->list, &rtnl_af_ops);
0603 rtnl_unlock();
0604 }
0605 EXPORT_SYMBOL_GPL(rtnl_af_register);
0606
0607
0608
0609
0610
0611 void rtnl_af_unregister(struct rtnl_af_ops *ops)
0612 {
0613 rtnl_lock();
0614 list_del_rcu(&ops->list);
0615 rtnl_unlock();
0616
0617 synchronize_rcu();
0618 }
0619 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
0620
0621 static size_t rtnl_link_get_af_size(const struct net_device *dev,
0622 u32 ext_filter_mask)
0623 {
0624 struct rtnl_af_ops *af_ops;
0625 size_t size;
0626
0627
0628 size = nla_total_size(sizeof(struct nlattr));
0629
0630 rcu_read_lock();
0631 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
0632 if (af_ops->get_link_af_size) {
0633
0634 size += nla_total_size(sizeof(struct nlattr)) +
0635 af_ops->get_link_af_size(dev, ext_filter_mask);
0636 }
0637 }
0638 rcu_read_unlock();
0639
0640 return size;
0641 }
0642
0643 static bool rtnl_have_link_slave_info(const struct net_device *dev)
0644 {
0645 struct net_device *master_dev;
0646 bool ret = false;
0647
0648 rcu_read_lock();
0649
0650 master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
0651 if (master_dev && master_dev->rtnl_link_ops)
0652 ret = true;
0653 rcu_read_unlock();
0654 return ret;
0655 }
0656
0657 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
0658 const struct net_device *dev)
0659 {
0660 struct net_device *master_dev;
0661 const struct rtnl_link_ops *ops;
0662 struct nlattr *slave_data;
0663 int err;
0664
0665 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
0666 if (!master_dev)
0667 return 0;
0668 ops = master_dev->rtnl_link_ops;
0669 if (!ops)
0670 return 0;
0671 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
0672 return -EMSGSIZE;
0673 if (ops->fill_slave_info) {
0674 slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
0675 if (!slave_data)
0676 return -EMSGSIZE;
0677 err = ops->fill_slave_info(skb, master_dev, dev);
0678 if (err < 0)
0679 goto err_cancel_slave_data;
0680 nla_nest_end(skb, slave_data);
0681 }
0682 return 0;
0683
0684 err_cancel_slave_data:
0685 nla_nest_cancel(skb, slave_data);
0686 return err;
0687 }
0688
0689 static int rtnl_link_info_fill(struct sk_buff *skb,
0690 const struct net_device *dev)
0691 {
0692 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
0693 struct nlattr *data;
0694 int err;
0695
0696 if (!ops)
0697 return 0;
0698 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
0699 return -EMSGSIZE;
0700 if (ops->fill_xstats) {
0701 err = ops->fill_xstats(skb, dev);
0702 if (err < 0)
0703 return err;
0704 }
0705 if (ops->fill_info) {
0706 data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
0707 if (data == NULL)
0708 return -EMSGSIZE;
0709 err = ops->fill_info(skb, dev);
0710 if (err < 0)
0711 goto err_cancel_data;
0712 nla_nest_end(skb, data);
0713 }
0714 return 0;
0715
0716 err_cancel_data:
0717 nla_nest_cancel(skb, data);
0718 return err;
0719 }
0720
0721 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
0722 {
0723 struct nlattr *linkinfo;
0724 int err = -EMSGSIZE;
0725
0726 linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
0727 if (linkinfo == NULL)
0728 goto out;
0729
0730 err = rtnl_link_info_fill(skb, dev);
0731 if (err < 0)
0732 goto err_cancel_link;
0733
0734 err = rtnl_link_slave_info_fill(skb, dev);
0735 if (err < 0)
0736 goto err_cancel_link;
0737
0738 nla_nest_end(skb, linkinfo);
0739 return 0;
0740
0741 err_cancel_link:
0742 nla_nest_cancel(skb, linkinfo);
0743 out:
0744 return err;
0745 }
0746
0747 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
0748 {
0749 struct sock *rtnl = net->rtnl;
0750
0751 return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
0752 }
0753
0754 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
0755 {
0756 struct sock *rtnl = net->rtnl;
0757
0758 return nlmsg_unicast(rtnl, skb, pid);
0759 }
0760 EXPORT_SYMBOL(rtnl_unicast);
0761
0762 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
0763 struct nlmsghdr *nlh, gfp_t flags)
0764 {
0765 struct sock *rtnl = net->rtnl;
0766
0767 nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
0768 }
0769 EXPORT_SYMBOL(rtnl_notify);
0770
0771 void rtnl_set_sk_err(struct net *net, u32 group, int error)
0772 {
0773 struct sock *rtnl = net->rtnl;
0774
0775 netlink_set_err(rtnl, 0, group, error);
0776 }
0777 EXPORT_SYMBOL(rtnl_set_sk_err);
0778
0779 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
0780 {
0781 struct nlattr *mx;
0782 int i, valid = 0;
0783
0784
0785 if (metrics == dst_default_metrics.metrics)
0786 return 0;
0787
0788 mx = nla_nest_start_noflag(skb, RTA_METRICS);
0789 if (mx == NULL)
0790 return -ENOBUFS;
0791
0792 for (i = 0; i < RTAX_MAX; i++) {
0793 if (metrics[i]) {
0794 if (i == RTAX_CC_ALGO - 1) {
0795 char tmp[TCP_CA_NAME_MAX], *name;
0796
0797 name = tcp_ca_get_name_by_key(metrics[i], tmp);
0798 if (!name)
0799 continue;
0800 if (nla_put_string(skb, i + 1, name))
0801 goto nla_put_failure;
0802 } else if (i == RTAX_FEATURES - 1) {
0803 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
0804
0805 if (!user_features)
0806 continue;
0807 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
0808 if (nla_put_u32(skb, i + 1, user_features))
0809 goto nla_put_failure;
0810 } else {
0811 if (nla_put_u32(skb, i + 1, metrics[i]))
0812 goto nla_put_failure;
0813 }
0814 valid++;
0815 }
0816 }
0817
0818 if (!valid) {
0819 nla_nest_cancel(skb, mx);
0820 return 0;
0821 }
0822
0823 return nla_nest_end(skb, mx);
0824
0825 nla_put_failure:
0826 nla_nest_cancel(skb, mx);
0827 return -EMSGSIZE;
0828 }
0829 EXPORT_SYMBOL(rtnetlink_put_metrics);
0830
0831 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
0832 long expires, u32 error)
0833 {
0834 struct rta_cacheinfo ci = {
0835 .rta_error = error,
0836 .rta_id = id,
0837 };
0838
0839 if (dst) {
0840 ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
0841 ci.rta_used = dst->__use;
0842 ci.rta_clntref = atomic_read(&dst->__refcnt);
0843 }
0844 if (expires) {
0845 unsigned long clock;
0846
0847 clock = jiffies_to_clock_t(abs(expires));
0848 clock = min_t(unsigned long, clock, INT_MAX);
0849 ci.rta_expires = (expires > 0) ? clock : -clock;
0850 }
0851 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
0852 }
0853 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
0854
0855 static void set_operstate(struct net_device *dev, unsigned char transition)
0856 {
0857 unsigned char operstate = dev->operstate;
0858
0859 switch (transition) {
0860 case IF_OPER_UP:
0861 if ((operstate == IF_OPER_DORMANT ||
0862 operstate == IF_OPER_TESTING ||
0863 operstate == IF_OPER_UNKNOWN) &&
0864 !netif_dormant(dev) && !netif_testing(dev))
0865 operstate = IF_OPER_UP;
0866 break;
0867
0868 case IF_OPER_TESTING:
0869 if (operstate == IF_OPER_UP ||
0870 operstate == IF_OPER_UNKNOWN)
0871 operstate = IF_OPER_TESTING;
0872 break;
0873
0874 case IF_OPER_DORMANT:
0875 if (operstate == IF_OPER_UP ||
0876 operstate == IF_OPER_UNKNOWN)
0877 operstate = IF_OPER_DORMANT;
0878 break;
0879 }
0880
0881 if (dev->operstate != operstate) {
0882 write_lock(&dev_base_lock);
0883 dev->operstate = operstate;
0884 write_unlock(&dev_base_lock);
0885 netdev_state_change(dev);
0886 }
0887 }
0888
0889 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
0890 {
0891 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
0892 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
0893 }
0894
0895 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
0896 const struct ifinfomsg *ifm)
0897 {
0898 unsigned int flags = ifm->ifi_flags;
0899
0900
0901 if (ifm->ifi_change)
0902 flags = (flags & ifm->ifi_change) |
0903 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
0904
0905 return flags;
0906 }
0907
0908 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
0909 const struct rtnl_link_stats64 *b)
0910 {
0911 a->rx_packets = b->rx_packets;
0912 a->tx_packets = b->tx_packets;
0913 a->rx_bytes = b->rx_bytes;
0914 a->tx_bytes = b->tx_bytes;
0915 a->rx_errors = b->rx_errors;
0916 a->tx_errors = b->tx_errors;
0917 a->rx_dropped = b->rx_dropped;
0918 a->tx_dropped = b->tx_dropped;
0919
0920 a->multicast = b->multicast;
0921 a->collisions = b->collisions;
0922
0923 a->rx_length_errors = b->rx_length_errors;
0924 a->rx_over_errors = b->rx_over_errors;
0925 a->rx_crc_errors = b->rx_crc_errors;
0926 a->rx_frame_errors = b->rx_frame_errors;
0927 a->rx_fifo_errors = b->rx_fifo_errors;
0928 a->rx_missed_errors = b->rx_missed_errors;
0929
0930 a->tx_aborted_errors = b->tx_aborted_errors;
0931 a->tx_carrier_errors = b->tx_carrier_errors;
0932 a->tx_fifo_errors = b->tx_fifo_errors;
0933 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
0934 a->tx_window_errors = b->tx_window_errors;
0935
0936 a->rx_compressed = b->rx_compressed;
0937 a->tx_compressed = b->tx_compressed;
0938
0939 a->rx_nohandler = b->rx_nohandler;
0940 }
0941
0942
0943 static inline int rtnl_vfinfo_size(const struct net_device *dev,
0944 u32 ext_filter_mask)
0945 {
0946 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
0947 int num_vfs = dev_num_vf(dev->dev.parent);
0948 size_t size = nla_total_size(0);
0949 size += num_vfs *
0950 (nla_total_size(0) +
0951 nla_total_size(sizeof(struct ifla_vf_mac)) +
0952 nla_total_size(sizeof(struct ifla_vf_broadcast)) +
0953 nla_total_size(sizeof(struct ifla_vf_vlan)) +
0954 nla_total_size(0) +
0955 nla_total_size(MAX_VLAN_LIST_LEN *
0956 sizeof(struct ifla_vf_vlan_info)) +
0957 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
0958 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
0959 nla_total_size(sizeof(struct ifla_vf_rate)) +
0960 nla_total_size(sizeof(struct ifla_vf_link_state)) +
0961 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
0962 nla_total_size(0) +
0963
0964 nla_total_size_64bit(sizeof(__u64)) +
0965
0966 nla_total_size_64bit(sizeof(__u64)) +
0967
0968 nla_total_size_64bit(sizeof(__u64)) +
0969
0970 nla_total_size_64bit(sizeof(__u64)) +
0971
0972 nla_total_size_64bit(sizeof(__u64)) +
0973
0974 nla_total_size_64bit(sizeof(__u64)) +
0975
0976 nla_total_size_64bit(sizeof(__u64)) +
0977
0978 nla_total_size_64bit(sizeof(__u64)) +
0979 nla_total_size(sizeof(struct ifla_vf_trust)));
0980 return size;
0981 } else
0982 return 0;
0983 }
0984
0985 static size_t rtnl_port_size(const struct net_device *dev,
0986 u32 ext_filter_mask)
0987 {
0988 size_t port_size = nla_total_size(4)
0989 + nla_total_size(PORT_PROFILE_MAX)
0990 + nla_total_size(PORT_UUID_MAX)
0991 + nla_total_size(PORT_UUID_MAX)
0992 + nla_total_size(1)
0993 + nla_total_size(2);
0994 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
0995 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
0996 + port_size;
0997 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
0998 + port_size;
0999
1000 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1001 !(ext_filter_mask & RTEXT_FILTER_VF))
1002 return 0;
1003 if (dev_num_vf(dev->dev.parent))
1004 return port_self_size + vf_ports_size +
1005 vf_port_size * dev_num_vf(dev->dev.parent);
1006 else
1007 return port_self_size;
1008 }
1009
1010 static size_t rtnl_xdp_size(void)
1011 {
1012 size_t xdp_size = nla_total_size(0) +
1013 nla_total_size(1) +
1014 nla_total_size(4) +
1015 nla_total_size(4);
1016
1017 return xdp_size;
1018 }
1019
1020 static size_t rtnl_prop_list_size(const struct net_device *dev)
1021 {
1022 struct netdev_name_node *name_node;
1023 size_t size;
1024
1025 if (list_empty(&dev->name_node->list))
1026 return 0;
1027 size = nla_total_size(0);
1028 list_for_each_entry(name_node, &dev->name_node->list, list)
1029 size += nla_total_size(ALTIFNAMSIZ);
1030 return size;
1031 }
1032
1033 static size_t rtnl_proto_down_size(const struct net_device *dev)
1034 {
1035 size_t size = nla_total_size(1);
1036
1037 if (dev->proto_down_reason)
1038 size += nla_total_size(0) + nla_total_size(4);
1039
1040 return size;
1041 }
1042
1043 static noinline size_t if_nlmsg_size(const struct net_device *dev,
1044 u32 ext_filter_mask)
1045 {
1046 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
1047 + nla_total_size(IFNAMSIZ)
1048 + nla_total_size(IFALIASZ)
1049 + nla_total_size(IFNAMSIZ)
1050 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
1051 + nla_total_size(sizeof(struct rtnl_link_stats))
1052 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
1053 + nla_total_size(MAX_ADDR_LEN)
1054 + nla_total_size(MAX_ADDR_LEN)
1055 + nla_total_size(4)
1056 + nla_total_size(4)
1057 + nla_total_size(4)
1058 + nla_total_size(4)
1059 + nla_total_size(4)
1060 + nla_total_size(1)
1061 + nla_total_size(4)
1062 + nla_total_size(4)
1063 + nla_total_size(4)
1064 + nla_total_size(4)
1065 + nla_total_size(4)
1066 + nla_total_size(4)
1067 + nla_total_size(4)
1068 + nla_total_size(4)
1069 + nla_total_size(1)
1070 + nla_total_size(1)
1071 + nla_total_size(4)
1072 + nla_total_size(4)
1073 + nla_total_size(4)
1074 + nla_total_size(ext_filter_mask
1075 & RTEXT_FILTER_VF ? 4 : 0)
1076 + rtnl_vfinfo_size(dev, ext_filter_mask)
1077 + rtnl_port_size(dev, ext_filter_mask)
1078 + rtnl_link_get_size(dev)
1079 + rtnl_link_get_af_size(dev, ext_filter_mask)
1080 + nla_total_size(MAX_PHYS_ITEM_ID_LEN)
1081 + nla_total_size(MAX_PHYS_ITEM_ID_LEN)
1082 + nla_total_size(IFNAMSIZ)
1083 + rtnl_xdp_size()
1084 + nla_total_size(4)
1085 + nla_total_size(4)
1086 + nla_total_size(4)
1087 + rtnl_proto_down_size(dev)
1088 + nla_total_size(4)
1089 + nla_total_size(4)
1090 + nla_total_size(4)
1091 + nla_total_size(4)
1092 + nla_total_size(4)
1093 + rtnl_prop_list_size(dev)
1094 + nla_total_size(MAX_ADDR_LEN)
1095 + 0;
1096 }
1097
1098 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
1099 {
1100 struct nlattr *vf_ports;
1101 struct nlattr *vf_port;
1102 int vf;
1103 int err;
1104
1105 vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
1106 if (!vf_ports)
1107 return -EMSGSIZE;
1108
1109 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
1110 vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
1111 if (!vf_port)
1112 goto nla_put_failure;
1113 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
1114 goto nla_put_failure;
1115 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
1116 if (err == -EMSGSIZE)
1117 goto nla_put_failure;
1118 if (err) {
1119 nla_nest_cancel(skb, vf_port);
1120 continue;
1121 }
1122 nla_nest_end(skb, vf_port);
1123 }
1124
1125 nla_nest_end(skb, vf_ports);
1126
1127 return 0;
1128
1129 nla_put_failure:
1130 nla_nest_cancel(skb, vf_ports);
1131 return -EMSGSIZE;
1132 }
1133
1134 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
1135 {
1136 struct nlattr *port_self;
1137 int err;
1138
1139 port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
1140 if (!port_self)
1141 return -EMSGSIZE;
1142
1143 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1144 if (err) {
1145 nla_nest_cancel(skb, port_self);
1146 return (err == -EMSGSIZE) ? err : 0;
1147 }
1148
1149 nla_nest_end(skb, port_self);
1150
1151 return 0;
1152 }
1153
1154 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1155 u32 ext_filter_mask)
1156 {
1157 int err;
1158
1159 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1160 !(ext_filter_mask & RTEXT_FILTER_VF))
1161 return 0;
1162
1163 err = rtnl_port_self_fill(skb, dev);
1164 if (err)
1165 return err;
1166
1167 if (dev_num_vf(dev->dev.parent)) {
1168 err = rtnl_vf_ports_fill(skb, dev);
1169 if (err)
1170 return err;
1171 }
1172
1173 return 0;
1174 }
1175
1176 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1177 {
1178 int err;
1179 struct netdev_phys_item_id ppid;
1180
1181 err = dev_get_phys_port_id(dev, &ppid);
1182 if (err) {
1183 if (err == -EOPNOTSUPP)
1184 return 0;
1185 return err;
1186 }
1187
1188 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1189 return -EMSGSIZE;
1190
1191 return 0;
1192 }
1193
1194 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1195 {
1196 char name[IFNAMSIZ];
1197 int err;
1198
1199 err = dev_get_phys_port_name(dev, name, sizeof(name));
1200 if (err) {
1201 if (err == -EOPNOTSUPP)
1202 return 0;
1203 return err;
1204 }
1205
1206 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1207 return -EMSGSIZE;
1208
1209 return 0;
1210 }
1211
1212 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1213 {
1214 struct netdev_phys_item_id ppid = { };
1215 int err;
1216
1217 err = dev_get_port_parent_id(dev, &ppid, false);
1218 if (err) {
1219 if (err == -EOPNOTSUPP)
1220 return 0;
1221 return err;
1222 }
1223
1224 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
1225 return -EMSGSIZE;
1226
1227 return 0;
1228 }
1229
1230 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1231 struct net_device *dev)
1232 {
1233 struct rtnl_link_stats64 *sp;
1234 struct nlattr *attr;
1235
1236 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1237 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1238 if (!attr)
1239 return -EMSGSIZE;
1240
1241 sp = nla_data(attr);
1242 dev_get_stats(dev, sp);
1243
1244 attr = nla_reserve(skb, IFLA_STATS,
1245 sizeof(struct rtnl_link_stats));
1246 if (!attr)
1247 return -EMSGSIZE;
1248
1249 copy_rtnl_link_stats(nla_data(attr), sp);
1250
1251 return 0;
1252 }
1253
1254 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1255 struct net_device *dev,
1256 int vfs_num,
1257 struct nlattr *vfinfo)
1258 {
1259 struct ifla_vf_rss_query_en vf_rss_query_en;
1260 struct nlattr *vf, *vfstats, *vfvlanlist;
1261 struct ifla_vf_link_state vf_linkstate;
1262 struct ifla_vf_vlan_info vf_vlan_info;
1263 struct ifla_vf_spoofchk vf_spoofchk;
1264 struct ifla_vf_tx_rate vf_tx_rate;
1265 struct ifla_vf_stats vf_stats;
1266 struct ifla_vf_trust vf_trust;
1267 struct ifla_vf_vlan vf_vlan;
1268 struct ifla_vf_rate vf_rate;
1269 struct ifla_vf_mac vf_mac;
1270 struct ifla_vf_broadcast vf_broadcast;
1271 struct ifla_vf_info ivi;
1272 struct ifla_vf_guid node_guid;
1273 struct ifla_vf_guid port_guid;
1274
1275 memset(&ivi, 0, sizeof(ivi));
1276
1277
1278
1279
1280
1281
1282 ivi.spoofchk = -1;
1283 ivi.rss_query_en = -1;
1284 ivi.trusted = -1;
1285
1286
1287
1288 ivi.linkstate = 0;
1289
1290 ivi.vlan_proto = htons(ETH_P_8021Q);
1291 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1292 return 0;
1293
1294 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1295 memset(&node_guid, 0, sizeof(node_guid));
1296 memset(&port_guid, 0, sizeof(port_guid));
1297
1298 vf_mac.vf =
1299 vf_vlan.vf =
1300 vf_vlan_info.vf =
1301 vf_rate.vf =
1302 vf_tx_rate.vf =
1303 vf_spoofchk.vf =
1304 vf_linkstate.vf =
1305 vf_rss_query_en.vf =
1306 vf_trust.vf =
1307 node_guid.vf =
1308 port_guid.vf = ivi.vf;
1309
1310 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1311 memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
1312 vf_vlan.vlan = ivi.vlan;
1313 vf_vlan.qos = ivi.qos;
1314 vf_vlan_info.vlan = ivi.vlan;
1315 vf_vlan_info.qos = ivi.qos;
1316 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1317 vf_tx_rate.rate = ivi.max_tx_rate;
1318 vf_rate.min_tx_rate = ivi.min_tx_rate;
1319 vf_rate.max_tx_rate = ivi.max_tx_rate;
1320 vf_spoofchk.setting = ivi.spoofchk;
1321 vf_linkstate.link_state = ivi.linkstate;
1322 vf_rss_query_en.setting = ivi.rss_query_en;
1323 vf_trust.setting = ivi.trusted;
1324 vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
1325 if (!vf)
1326 goto nla_put_vfinfo_failure;
1327 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1328 nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
1329 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1330 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1331 &vf_rate) ||
1332 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1333 &vf_tx_rate) ||
1334 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1335 &vf_spoofchk) ||
1336 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1337 &vf_linkstate) ||
1338 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1339 sizeof(vf_rss_query_en),
1340 &vf_rss_query_en) ||
1341 nla_put(skb, IFLA_VF_TRUST,
1342 sizeof(vf_trust), &vf_trust))
1343 goto nla_put_vf_failure;
1344
1345 if (dev->netdev_ops->ndo_get_vf_guid &&
1346 !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
1347 &port_guid)) {
1348 if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
1349 &node_guid) ||
1350 nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
1351 &port_guid))
1352 goto nla_put_vf_failure;
1353 }
1354 vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
1355 if (!vfvlanlist)
1356 goto nla_put_vf_failure;
1357 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1358 &vf_vlan_info)) {
1359 nla_nest_cancel(skb, vfvlanlist);
1360 goto nla_put_vf_failure;
1361 }
1362 nla_nest_end(skb, vfvlanlist);
1363 memset(&vf_stats, 0, sizeof(vf_stats));
1364 if (dev->netdev_ops->ndo_get_vf_stats)
1365 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1366 &vf_stats);
1367 vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
1368 if (!vfstats)
1369 goto nla_put_vf_failure;
1370 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1371 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1372 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1373 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1374 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1375 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1376 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1377 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1378 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1379 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1380 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1381 vf_stats.multicast, IFLA_VF_STATS_PAD) ||
1382 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
1383 vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
1384 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
1385 vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
1386 nla_nest_cancel(skb, vfstats);
1387 goto nla_put_vf_failure;
1388 }
1389 nla_nest_end(skb, vfstats);
1390 nla_nest_end(skb, vf);
1391 return 0;
1392
1393 nla_put_vf_failure:
1394 nla_nest_cancel(skb, vf);
1395 nla_put_vfinfo_failure:
1396 nla_nest_cancel(skb, vfinfo);
1397 return -EMSGSIZE;
1398 }
1399
1400 static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
1401 struct net_device *dev,
1402 u32 ext_filter_mask)
1403 {
1404 struct nlattr *vfinfo;
1405 int i, num_vfs;
1406
1407 if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
1408 return 0;
1409
1410 num_vfs = dev_num_vf(dev->dev.parent);
1411 if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
1412 return -EMSGSIZE;
1413
1414 if (!dev->netdev_ops->ndo_get_vf_config)
1415 return 0;
1416
1417 vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
1418 if (!vfinfo)
1419 return -EMSGSIZE;
1420
1421 for (i = 0; i < num_vfs; i++) {
1422 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1423 return -EMSGSIZE;
1424 }
1425
1426 nla_nest_end(skb, vfinfo);
1427 return 0;
1428 }
1429
1430 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1431 {
1432 struct rtnl_link_ifmap map;
1433
1434 memset(&map, 0, sizeof(map));
1435 map.mem_start = dev->mem_start;
1436 map.mem_end = dev->mem_end;
1437 map.base_addr = dev->base_addr;
1438 map.irq = dev->irq;
1439 map.dma = dev->dma;
1440 map.port = dev->if_port;
1441
1442 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1443 return -EMSGSIZE;
1444
1445 return 0;
1446 }
1447
1448 static u32 rtnl_xdp_prog_skb(struct net_device *dev)
1449 {
1450 const struct bpf_prog *generic_xdp_prog;
1451
1452 ASSERT_RTNL();
1453
1454 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1455 if (!generic_xdp_prog)
1456 return 0;
1457 return generic_xdp_prog->aux->id;
1458 }
1459
1460 static u32 rtnl_xdp_prog_drv(struct net_device *dev)
1461 {
1462 return dev_xdp_prog_id(dev, XDP_MODE_DRV);
1463 }
1464
1465 static u32 rtnl_xdp_prog_hw(struct net_device *dev)
1466 {
1467 return dev_xdp_prog_id(dev, XDP_MODE_HW);
1468 }
1469
1470 static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
1471 u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
1472 u32 (*get_prog_id)(struct net_device *dev))
1473 {
1474 u32 curr_id;
1475 int err;
1476
1477 curr_id = get_prog_id(dev);
1478 if (!curr_id)
1479 return 0;
1480
1481 *prog_id = curr_id;
1482 err = nla_put_u32(skb, attr, curr_id);
1483 if (err)
1484 return err;
1485
1486 if (*mode != XDP_ATTACHED_NONE)
1487 *mode = XDP_ATTACHED_MULTI;
1488 else
1489 *mode = tgt_mode;
1490
1491 return 0;
1492 }
1493
1494 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1495 {
1496 struct nlattr *xdp;
1497 u32 prog_id;
1498 int err;
1499 u8 mode;
1500
1501 xdp = nla_nest_start_noflag(skb, IFLA_XDP);
1502 if (!xdp)
1503 return -EMSGSIZE;
1504
1505 prog_id = 0;
1506 mode = XDP_ATTACHED_NONE;
1507 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
1508 IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
1509 if (err)
1510 goto err_cancel;
1511 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
1512 IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
1513 if (err)
1514 goto err_cancel;
1515 err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
1516 IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
1517 if (err)
1518 goto err_cancel;
1519
1520 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
1521 if (err)
1522 goto err_cancel;
1523
1524 if (prog_id && mode != XDP_ATTACHED_MULTI) {
1525 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1526 if (err)
1527 goto err_cancel;
1528 }
1529
1530 nla_nest_end(skb, xdp);
1531 return 0;
1532
1533 err_cancel:
1534 nla_nest_cancel(skb, xdp);
1535 return err;
1536 }
1537
1538 static u32 rtnl_get_event(unsigned long event)
1539 {
1540 u32 rtnl_event_type = IFLA_EVENT_NONE;
1541
1542 switch (event) {
1543 case NETDEV_REBOOT:
1544 rtnl_event_type = IFLA_EVENT_REBOOT;
1545 break;
1546 case NETDEV_FEAT_CHANGE:
1547 rtnl_event_type = IFLA_EVENT_FEATURES;
1548 break;
1549 case NETDEV_BONDING_FAILOVER:
1550 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1551 break;
1552 case NETDEV_NOTIFY_PEERS:
1553 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1554 break;
1555 case NETDEV_RESEND_IGMP:
1556 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1557 break;
1558 case NETDEV_CHANGEINFODATA:
1559 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1560 break;
1561 default:
1562 break;
1563 }
1564
1565 return rtnl_event_type;
1566 }
1567
1568 static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
1569 {
1570 const struct net_device *upper_dev;
1571 int ret = 0;
1572
1573 rcu_read_lock();
1574
1575 upper_dev = netdev_master_upper_dev_get_rcu(dev);
1576 if (upper_dev)
1577 ret = nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex);
1578
1579 rcu_read_unlock();
1580 return ret;
1581 }
1582
1583 static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
1584 bool force)
1585 {
1586 int ifindex = dev_get_iflink(dev);
1587
1588 if (force || dev->ifindex != ifindex)
1589 return nla_put_u32(skb, IFLA_LINK, ifindex);
1590
1591 return 0;
1592 }
1593
1594 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
1595 struct net_device *dev)
1596 {
1597 char buf[IFALIASZ];
1598 int ret;
1599
1600 ret = dev_get_alias(dev, buf, sizeof(buf));
1601 return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
1602 }
1603
1604 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
1605 const struct net_device *dev,
1606 struct net *src_net, gfp_t gfp)
1607 {
1608 bool put_iflink = false;
1609
1610 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
1611 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1612
1613 if (!net_eq(dev_net(dev), link_net)) {
1614 int id = peernet2id_alloc(src_net, link_net, gfp);
1615
1616 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1617 return -EMSGSIZE;
1618
1619 put_iflink = true;
1620 }
1621 }
1622
1623 return nla_put_iflink(skb, dev, put_iflink);
1624 }
1625
1626 static int rtnl_fill_link_af(struct sk_buff *skb,
1627 const struct net_device *dev,
1628 u32 ext_filter_mask)
1629 {
1630 const struct rtnl_af_ops *af_ops;
1631 struct nlattr *af_spec;
1632
1633 af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
1634 if (!af_spec)
1635 return -EMSGSIZE;
1636
1637 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
1638 struct nlattr *af;
1639 int err;
1640
1641 if (!af_ops->fill_link_af)
1642 continue;
1643
1644 af = nla_nest_start_noflag(skb, af_ops->family);
1645 if (!af)
1646 return -EMSGSIZE;
1647
1648 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1649
1650
1651
1652
1653
1654
1655 if (err == -ENODATA)
1656 nla_nest_cancel(skb, af);
1657 else if (err < 0)
1658 return -EMSGSIZE;
1659
1660 nla_nest_end(skb, af);
1661 }
1662
1663 nla_nest_end(skb, af_spec);
1664 return 0;
1665 }
1666
1667 static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
1668 const struct net_device *dev)
1669 {
1670 struct netdev_name_node *name_node;
1671 int count = 0;
1672
1673 list_for_each_entry(name_node, &dev->name_node->list, list) {
1674 if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
1675 return -EMSGSIZE;
1676 count++;
1677 }
1678 return count;
1679 }
1680
1681 static int rtnl_fill_prop_list(struct sk_buff *skb,
1682 const struct net_device *dev)
1683 {
1684 struct nlattr *prop_list;
1685 int ret;
1686
1687 prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
1688 if (!prop_list)
1689 return -EMSGSIZE;
1690
1691 ret = rtnl_fill_alt_ifnames(skb, dev);
1692 if (ret <= 0)
1693 goto nest_cancel;
1694
1695 nla_nest_end(skb, prop_list);
1696 return 0;
1697
1698 nest_cancel:
1699 nla_nest_cancel(skb, prop_list);
1700 return ret;
1701 }
1702
1703 static int rtnl_fill_proto_down(struct sk_buff *skb,
1704 const struct net_device *dev)
1705 {
1706 struct nlattr *pr;
1707 u32 preason;
1708
1709 if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1710 goto nla_put_failure;
1711
1712 preason = dev->proto_down_reason;
1713 if (!preason)
1714 return 0;
1715
1716 pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
1717 if (!pr)
1718 return -EMSGSIZE;
1719
1720 if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
1721 nla_nest_cancel(skb, pr);
1722 goto nla_put_failure;
1723 }
1724
1725 nla_nest_end(skb, pr);
1726 return 0;
1727
1728 nla_put_failure:
1729 return -EMSGSIZE;
1730 }
1731
1732 static int rtnl_fill_ifinfo(struct sk_buff *skb,
1733 struct net_device *dev, struct net *src_net,
1734 int type, u32 pid, u32 seq, u32 change,
1735 unsigned int flags, u32 ext_filter_mask,
1736 u32 event, int *new_nsid, int new_ifindex,
1737 int tgt_netnsid, gfp_t gfp)
1738 {
1739 struct ifinfomsg *ifm;
1740 struct nlmsghdr *nlh;
1741 struct Qdisc *qdisc;
1742
1743 ASSERT_RTNL();
1744 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1745 if (nlh == NULL)
1746 return -EMSGSIZE;
1747
1748 ifm = nlmsg_data(nlh);
1749 ifm->ifi_family = AF_UNSPEC;
1750 ifm->__ifi_pad = 0;
1751 ifm->ifi_type = dev->type;
1752 ifm->ifi_index = dev->ifindex;
1753 ifm->ifi_flags = dev_get_flags(dev);
1754 ifm->ifi_change = change;
1755
1756 if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
1757 goto nla_put_failure;
1758
1759 qdisc = rtnl_dereference(dev->qdisc);
1760 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1761 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1762 nla_put_u8(skb, IFLA_OPERSTATE,
1763 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1764 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1765 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1766 nla_put_u32(skb, IFLA_MIN_MTU, dev->min_mtu) ||
1767 nla_put_u32(skb, IFLA_MAX_MTU, dev->max_mtu) ||
1768 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1769 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1770 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1771 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1772 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1773 nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
1774 nla_put_u32(skb, IFLA_TSO_MAX_SIZE, dev->tso_max_size) ||
1775 nla_put_u32(skb, IFLA_TSO_MAX_SEGS, dev->tso_max_segs) ||
1776 #ifdef CONFIG_RPS
1777 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1778 #endif
1779 put_master_ifindex(skb, dev) ||
1780 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1781 (qdisc &&
1782 nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
1783 nla_put_ifalias(skb, dev) ||
1784 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1785 atomic_read(&dev->carrier_up_count) +
1786 atomic_read(&dev->carrier_down_count)) ||
1787 nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
1788 atomic_read(&dev->carrier_up_count)) ||
1789 nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
1790 atomic_read(&dev->carrier_down_count)))
1791 goto nla_put_failure;
1792
1793 if (rtnl_fill_proto_down(skb, dev))
1794 goto nla_put_failure;
1795
1796 if (event != IFLA_EVENT_NONE) {
1797 if (nla_put_u32(skb, IFLA_EVENT, event))
1798 goto nla_put_failure;
1799 }
1800
1801 if (rtnl_fill_link_ifmap(skb, dev))
1802 goto nla_put_failure;
1803
1804 if (dev->addr_len) {
1805 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1806 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1807 goto nla_put_failure;
1808 }
1809
1810 if (rtnl_phys_port_id_fill(skb, dev))
1811 goto nla_put_failure;
1812
1813 if (rtnl_phys_port_name_fill(skb, dev))
1814 goto nla_put_failure;
1815
1816 if (rtnl_phys_switch_id_fill(skb, dev))
1817 goto nla_put_failure;
1818
1819 if (rtnl_fill_stats(skb, dev))
1820 goto nla_put_failure;
1821
1822 if (rtnl_fill_vf(skb, dev, ext_filter_mask))
1823 goto nla_put_failure;
1824
1825 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1826 goto nla_put_failure;
1827
1828 if (rtnl_xdp_fill(skb, dev))
1829 goto nla_put_failure;
1830
1831 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1832 if (rtnl_link_fill(skb, dev) < 0)
1833 goto nla_put_failure;
1834 }
1835
1836 if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
1837 goto nla_put_failure;
1838
1839 if (new_nsid &&
1840 nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
1841 goto nla_put_failure;
1842 if (new_ifindex &&
1843 nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
1844 goto nla_put_failure;
1845
1846 if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
1847 nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
1848 goto nla_put_failure;
1849
1850 rcu_read_lock();
1851 if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
1852 goto nla_put_failure_rcu;
1853 rcu_read_unlock();
1854
1855 if (rtnl_fill_prop_list(skb, dev))
1856 goto nla_put_failure;
1857
1858 if (dev->dev.parent &&
1859 nla_put_string(skb, IFLA_PARENT_DEV_NAME,
1860 dev_name(dev->dev.parent)))
1861 goto nla_put_failure;
1862
1863 if (dev->dev.parent && dev->dev.parent->bus &&
1864 nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
1865 dev->dev.parent->bus->name))
1866 goto nla_put_failure;
1867
1868 nlmsg_end(skb, nlh);
1869 return 0;
1870
1871 nla_put_failure_rcu:
1872 rcu_read_unlock();
1873 nla_put_failure:
1874 nlmsg_cancel(skb, nlh);
1875 return -EMSGSIZE;
1876 }
1877
1878 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1879 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1880 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1881 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1882 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1883 [IFLA_MTU] = { .type = NLA_U32 },
1884 [IFLA_LINK] = { .type = NLA_U32 },
1885 [IFLA_MASTER] = { .type = NLA_U32 },
1886 [IFLA_CARRIER] = { .type = NLA_U8 },
1887 [IFLA_TXQLEN] = { .type = NLA_U32 },
1888 [IFLA_WEIGHT] = { .type = NLA_U32 },
1889 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1890 [IFLA_LINKMODE] = { .type = NLA_U8 },
1891 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1892 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1893 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1894
1895
1896
1897 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1898 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1899 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1900 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1901 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1902 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1903 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1904 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1905 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1906 [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
1907 [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 },
1908 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1909 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 },
1910 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1911 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1912 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1913 [IFLA_XDP] = { .type = NLA_NESTED },
1914 [IFLA_EVENT] = { .type = NLA_U32 },
1915 [IFLA_GROUP] = { .type = NLA_U32 },
1916 [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
1917 [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
1918 [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
1919 [IFLA_MIN_MTU] = { .type = NLA_U32 },
1920 [IFLA_MAX_MTU] = { .type = NLA_U32 },
1921 [IFLA_PROP_LIST] = { .type = NLA_NESTED },
1922 [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
1923 .len = ALTIFNAMSIZ - 1 },
1924 [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
1925 [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
1926 [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
1927 [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
1928 [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
1929 [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
1930 [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
1931 };
1932
1933 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1934 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1935 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1936 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1937 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1938 };
1939
1940 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1941 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1942 [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
1943 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1944 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1945 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1946 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1947 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1948 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1949 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1950 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1951 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1952 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1953 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1954 };
1955
1956 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1957 [IFLA_PORT_VF] = { .type = NLA_U32 },
1958 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1959 .len = PORT_PROFILE_MAX },
1960 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1961 .len = PORT_UUID_MAX },
1962 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1963 .len = PORT_UUID_MAX },
1964 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1965 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1966
1967
1968
1969
1970
1971 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1972 .len = sizeof(struct ifla_port_vsi) },
1973 };
1974
1975 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1976 [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
1977 [IFLA_XDP_FD] = { .type = NLA_S32 },
1978 [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
1979 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1980 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1981 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1982 };
1983
1984 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1985 {
1986 const struct rtnl_link_ops *ops = NULL;
1987 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1988
1989 if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
1990 return NULL;
1991
1992 if (linfo[IFLA_INFO_KIND]) {
1993 char kind[MODULE_NAME_LEN];
1994
1995 nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1996 ops = rtnl_link_ops_get(kind);
1997 }
1998
1999 return ops;
2000 }
2001
2002 static bool link_master_filtered(struct net_device *dev, int master_idx)
2003 {
2004 struct net_device *master;
2005
2006 if (!master_idx)
2007 return false;
2008
2009 master = netdev_master_upper_dev_get(dev);
2010
2011
2012
2013
2014 if (master_idx == -1)
2015 return !!master;
2016
2017 if (!master || master->ifindex != master_idx)
2018 return true;
2019
2020 return false;
2021 }
2022
2023 static bool link_kind_filtered(const struct net_device *dev,
2024 const struct rtnl_link_ops *kind_ops)
2025 {
2026 if (kind_ops && dev->rtnl_link_ops != kind_ops)
2027 return true;
2028
2029 return false;
2030 }
2031
2032 static bool link_dump_filtered(struct net_device *dev,
2033 int master_idx,
2034 const struct rtnl_link_ops *kind_ops)
2035 {
2036 if (link_master_filtered(dev, master_idx) ||
2037 link_kind_filtered(dev, kind_ops))
2038 return true;
2039
2040 return false;
2041 }
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
2052 {
2053 struct net *net;
2054
2055 net = get_net_ns_by_id(sock_net(sk), netnsid);
2056 if (!net)
2057 return ERR_PTR(-EINVAL);
2058
2059
2060
2061
2062 if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
2063 put_net(net);
2064 return ERR_PTR(-EACCES);
2065 }
2066 return net;
2067 }
2068 EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
2069
2070 static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
2071 bool strict_check, struct nlattr **tb,
2072 struct netlink_ext_ack *extack)
2073 {
2074 int hdrlen;
2075
2076 if (strict_check) {
2077 struct ifinfomsg *ifm;
2078
2079 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2080 NL_SET_ERR_MSG(extack, "Invalid header for link dump");
2081 return -EINVAL;
2082 }
2083
2084 ifm = nlmsg_data(nlh);
2085 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2086 ifm->ifi_change) {
2087 NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
2088 return -EINVAL;
2089 }
2090 if (ifm->ifi_index) {
2091 NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
2092 return -EINVAL;
2093 }
2094
2095 return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
2096 IFLA_MAX, ifla_policy,
2097 extack);
2098 }
2099
2100
2101
2102
2103
2104
2105
2106
2107 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2108 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2109
2110 return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
2111 extack);
2112 }
2113
2114 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
2115 {
2116 struct netlink_ext_ack *extack = cb->extack;
2117 const struct nlmsghdr *nlh = cb->nlh;
2118 struct net *net = sock_net(skb->sk);
2119 struct net *tgt_net = net;
2120 int h, s_h;
2121 int idx = 0, s_idx;
2122 struct net_device *dev;
2123 struct hlist_head *head;
2124 struct nlattr *tb[IFLA_MAX+1];
2125 u32 ext_filter_mask = 0;
2126 const struct rtnl_link_ops *kind_ops = NULL;
2127 unsigned int flags = NLM_F_MULTI;
2128 int master_idx = 0;
2129 int netnsid = -1;
2130 int err, i;
2131
2132 s_h = cb->args[0];
2133 s_idx = cb->args[1];
2134
2135 err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
2136 if (err < 0) {
2137 if (cb->strict_check)
2138 return err;
2139
2140 goto walk_entries;
2141 }
2142
2143 for (i = 0; i <= IFLA_MAX; ++i) {
2144 if (!tb[i])
2145 continue;
2146
2147
2148 switch (i) {
2149 case IFLA_TARGET_NETNSID:
2150 netnsid = nla_get_s32(tb[i]);
2151 tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
2152 if (IS_ERR(tgt_net)) {
2153 NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
2154 return PTR_ERR(tgt_net);
2155 }
2156 break;
2157 case IFLA_EXT_MASK:
2158 ext_filter_mask = nla_get_u32(tb[i]);
2159 break;
2160 case IFLA_MASTER:
2161 master_idx = nla_get_u32(tb[i]);
2162 break;
2163 case IFLA_LINKINFO:
2164 kind_ops = linkinfo_to_kind_ops(tb[i]);
2165 break;
2166 default:
2167 if (cb->strict_check) {
2168 NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
2169 return -EINVAL;
2170 }
2171 }
2172 }
2173
2174 if (master_idx || kind_ops)
2175 flags |= NLM_F_DUMP_FILTERED;
2176
2177 walk_entries:
2178 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
2179 idx = 0;
2180 head = &tgt_net->dev_index_head[h];
2181 hlist_for_each_entry(dev, head, index_hlist) {
2182 if (link_dump_filtered(dev, master_idx, kind_ops))
2183 goto cont;
2184 if (idx < s_idx)
2185 goto cont;
2186 err = rtnl_fill_ifinfo(skb, dev, net,
2187 RTM_NEWLINK,
2188 NETLINK_CB(cb->skb).portid,
2189 nlh->nlmsg_seq, 0, flags,
2190 ext_filter_mask, 0, NULL, 0,
2191 netnsid, GFP_KERNEL);
2192
2193 if (err < 0) {
2194 if (likely(skb->len))
2195 goto out;
2196
2197 goto out_err;
2198 }
2199 cont:
2200 idx++;
2201 }
2202 }
2203 out:
2204 err = skb->len;
2205 out_err:
2206 cb->args[1] = idx;
2207 cb->args[0] = h;
2208 cb->seq = tgt_net->dev_base_seq;
2209 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2210 if (netnsid >= 0)
2211 put_net(tgt_net);
2212
2213 return err;
2214 }
2215
2216 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
2217 struct netlink_ext_ack *exterr)
2218 {
2219 return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
2220 exterr);
2221 }
2222 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
2223
2224 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
2225 {
2226 struct net *net;
2227
2228
2229
2230 if (tb[IFLA_NET_NS_PID])
2231 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
2232 else if (tb[IFLA_NET_NS_FD])
2233 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
2234 else
2235 net = get_net(src_net);
2236 return net;
2237 }
2238 EXPORT_SYMBOL(rtnl_link_get_net);
2239
2240
2241
2242
2243
2244
2245
2246
2247 static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
2248 struct nlattr *tb[])
2249 {
2250 struct net *net;
2251
2252 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
2253 return rtnl_link_get_net(src_net, tb);
2254
2255 if (!tb[IFLA_TARGET_NETNSID])
2256 return get_net(src_net);
2257
2258 net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
2259 if (!net)
2260 return ERR_PTR(-EINVAL);
2261
2262 return net;
2263 }
2264
2265 static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
2266 struct net *src_net,
2267 struct nlattr *tb[], int cap)
2268 {
2269 struct net *net;
2270
2271 net = rtnl_link_get_net_by_nlattr(src_net, tb);
2272 if (IS_ERR(net))
2273 return net;
2274
2275 if (!netlink_ns_capable(skb, net->user_ns, cap)) {
2276 put_net(net);
2277 return ERR_PTR(-EPERM);
2278 }
2279
2280 return net;
2281 }
2282
2283
2284
2285
2286 static int rtnl_ensure_unique_netns(struct nlattr *tb[],
2287 struct netlink_ext_ack *extack,
2288 bool netns_id_only)
2289 {
2290
2291 if (netns_id_only) {
2292 if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
2293 return 0;
2294
2295 NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
2296 return -EOPNOTSUPP;
2297 }
2298
2299 if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
2300 goto invalid_attr;
2301
2302 if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
2303 goto invalid_attr;
2304
2305 if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
2306 goto invalid_attr;
2307
2308 return 0;
2309
2310 invalid_attr:
2311 NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
2312 return -EINVAL;
2313 }
2314
2315 static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2316 int max_tx_rate)
2317 {
2318 const struct net_device_ops *ops = dev->netdev_ops;
2319
2320 if (!ops->ndo_set_vf_rate)
2321 return -EOPNOTSUPP;
2322 if (max_tx_rate && max_tx_rate < min_tx_rate)
2323 return -EINVAL;
2324
2325 return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
2326 }
2327
2328 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
2329 struct netlink_ext_ack *extack)
2330 {
2331 if (dev) {
2332 if (tb[IFLA_ADDRESS] &&
2333 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
2334 return -EINVAL;
2335
2336 if (tb[IFLA_BROADCAST] &&
2337 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
2338 return -EINVAL;
2339 }
2340
2341 if (tb[IFLA_AF_SPEC]) {
2342 struct nlattr *af;
2343 int rem, err;
2344
2345 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2346 const struct rtnl_af_ops *af_ops;
2347
2348 af_ops = rtnl_af_lookup(nla_type(af));
2349 if (!af_ops)
2350 return -EAFNOSUPPORT;
2351
2352 if (!af_ops->set_link_af)
2353 return -EOPNOTSUPP;
2354
2355 if (af_ops->validate_link_af) {
2356 err = af_ops->validate_link_af(dev, af, extack);
2357 if (err < 0)
2358 return err;
2359 }
2360 }
2361 }
2362
2363 return 0;
2364 }
2365
2366 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
2367 int guid_type)
2368 {
2369 const struct net_device_ops *ops = dev->netdev_ops;
2370
2371 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
2372 }
2373
2374 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
2375 {
2376 if (dev->type != ARPHRD_INFINIBAND)
2377 return -EOPNOTSUPP;
2378
2379 return handle_infiniband_guid(dev, ivt, guid_type);
2380 }
2381
2382 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
2383 {
2384 const struct net_device_ops *ops = dev->netdev_ops;
2385 int err = -EINVAL;
2386
2387 if (tb[IFLA_VF_MAC]) {
2388 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
2389
2390 if (ivm->vf >= INT_MAX)
2391 return -EINVAL;
2392 err = -EOPNOTSUPP;
2393 if (ops->ndo_set_vf_mac)
2394 err = ops->ndo_set_vf_mac(dev, ivm->vf,
2395 ivm->mac);
2396 if (err < 0)
2397 return err;
2398 }
2399
2400 if (tb[IFLA_VF_VLAN]) {
2401 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
2402
2403 if (ivv->vf >= INT_MAX)
2404 return -EINVAL;
2405 err = -EOPNOTSUPP;
2406 if (ops->ndo_set_vf_vlan)
2407 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
2408 ivv->qos,
2409 htons(ETH_P_8021Q));
2410 if (err < 0)
2411 return err;
2412 }
2413
2414 if (tb[IFLA_VF_VLAN_LIST]) {
2415 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
2416 struct nlattr *attr;
2417 int rem, len = 0;
2418
2419 err = -EOPNOTSUPP;
2420 if (!ops->ndo_set_vf_vlan)
2421 return err;
2422
2423 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
2424 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
2425 nla_len(attr) < NLA_HDRLEN) {
2426 return -EINVAL;
2427 }
2428 if (len >= MAX_VLAN_LIST_LEN)
2429 return -EOPNOTSUPP;
2430 ivvl[len] = nla_data(attr);
2431
2432 len++;
2433 }
2434 if (len == 0)
2435 return -EINVAL;
2436
2437 if (ivvl[0]->vf >= INT_MAX)
2438 return -EINVAL;
2439 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
2440 ivvl[0]->qos, ivvl[0]->vlan_proto);
2441 if (err < 0)
2442 return err;
2443 }
2444
2445 if (tb[IFLA_VF_TX_RATE]) {
2446 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
2447 struct ifla_vf_info ivf;
2448
2449 if (ivt->vf >= INT_MAX)
2450 return -EINVAL;
2451 err = -EOPNOTSUPP;
2452 if (ops->ndo_get_vf_config)
2453 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
2454 if (err < 0)
2455 return err;
2456
2457 err = rtnl_set_vf_rate(dev, ivt->vf,
2458 ivf.min_tx_rate, ivt->rate);
2459 if (err < 0)
2460 return err;
2461 }
2462
2463 if (tb[IFLA_VF_RATE]) {
2464 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
2465
2466 if (ivt->vf >= INT_MAX)
2467 return -EINVAL;
2468
2469 err = rtnl_set_vf_rate(dev, ivt->vf,
2470 ivt->min_tx_rate, ivt->max_tx_rate);
2471 if (err < 0)
2472 return err;
2473 }
2474
2475 if (tb[IFLA_VF_SPOOFCHK]) {
2476 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
2477
2478 if (ivs->vf >= INT_MAX)
2479 return -EINVAL;
2480 err = -EOPNOTSUPP;
2481 if (ops->ndo_set_vf_spoofchk)
2482 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
2483 ivs->setting);
2484 if (err < 0)
2485 return err;
2486 }
2487
2488 if (tb[IFLA_VF_LINK_STATE]) {
2489 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
2490
2491 if (ivl->vf >= INT_MAX)
2492 return -EINVAL;
2493 err = -EOPNOTSUPP;
2494 if (ops->ndo_set_vf_link_state)
2495 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
2496 ivl->link_state);
2497 if (err < 0)
2498 return err;
2499 }
2500
2501 if (tb[IFLA_VF_RSS_QUERY_EN]) {
2502 struct ifla_vf_rss_query_en *ivrssq_en;
2503
2504 err = -EOPNOTSUPP;
2505 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
2506 if (ivrssq_en->vf >= INT_MAX)
2507 return -EINVAL;
2508 if (ops->ndo_set_vf_rss_query_en)
2509 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
2510 ivrssq_en->setting);
2511 if (err < 0)
2512 return err;
2513 }
2514
2515 if (tb[IFLA_VF_TRUST]) {
2516 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
2517
2518 if (ivt->vf >= INT_MAX)
2519 return -EINVAL;
2520 err = -EOPNOTSUPP;
2521 if (ops->ndo_set_vf_trust)
2522 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
2523 if (err < 0)
2524 return err;
2525 }
2526
2527 if (tb[IFLA_VF_IB_NODE_GUID]) {
2528 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
2529
2530 if (ivt->vf >= INT_MAX)
2531 return -EINVAL;
2532 if (!ops->ndo_set_vf_guid)
2533 return -EOPNOTSUPP;
2534 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
2535 }
2536
2537 if (tb[IFLA_VF_IB_PORT_GUID]) {
2538 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
2539
2540 if (ivt->vf >= INT_MAX)
2541 return -EINVAL;
2542 if (!ops->ndo_set_vf_guid)
2543 return -EOPNOTSUPP;
2544
2545 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
2546 }
2547
2548 return err;
2549 }
2550
2551 static int do_set_master(struct net_device *dev, int ifindex,
2552 struct netlink_ext_ack *extack)
2553 {
2554 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
2555 const struct net_device_ops *ops;
2556 int err;
2557
2558 if (upper_dev) {
2559 if (upper_dev->ifindex == ifindex)
2560 return 0;
2561 ops = upper_dev->netdev_ops;
2562 if (ops->ndo_del_slave) {
2563 err = ops->ndo_del_slave(upper_dev, dev);
2564 if (err)
2565 return err;
2566 } else {
2567 return -EOPNOTSUPP;
2568 }
2569 }
2570
2571 if (ifindex) {
2572 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
2573 if (!upper_dev)
2574 return -EINVAL;
2575 ops = upper_dev->netdev_ops;
2576 if (ops->ndo_add_slave) {
2577 err = ops->ndo_add_slave(upper_dev, dev, extack);
2578 if (err)
2579 return err;
2580 } else {
2581 return -EOPNOTSUPP;
2582 }
2583 }
2584 return 0;
2585 }
2586
2587 static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
2588 [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
2589 [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
2590 };
2591
2592 static int do_set_proto_down(struct net_device *dev,
2593 struct nlattr *nl_proto_down,
2594 struct nlattr *nl_proto_down_reason,
2595 struct netlink_ext_ack *extack)
2596 {
2597 struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
2598 unsigned long mask = 0;
2599 u32 value;
2600 bool proto_down;
2601 int err;
2602
2603 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
2604 NL_SET_ERR_MSG(extack, "Protodown not supported by device");
2605 return -EOPNOTSUPP;
2606 }
2607
2608 if (nl_proto_down_reason) {
2609 err = nla_parse_nested_deprecated(pdreason,
2610 IFLA_PROTO_DOWN_REASON_MAX,
2611 nl_proto_down_reason,
2612 ifla_proto_down_reason_policy,
2613 NULL);
2614 if (err < 0)
2615 return err;
2616
2617 if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
2618 NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
2619 return -EINVAL;
2620 }
2621
2622 value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
2623
2624 if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
2625 mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
2626
2627 dev_change_proto_down_reason(dev, mask, value);
2628 }
2629
2630 if (nl_proto_down) {
2631 proto_down = nla_get_u8(nl_proto_down);
2632
2633
2634 if (!proto_down && dev->proto_down_reason) {
2635 NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
2636 return -EBUSY;
2637 }
2638 err = dev_change_proto_down(dev,
2639 proto_down);
2640 if (err)
2641 return err;
2642 }
2643
2644 return 0;
2645 }
2646
2647 #define DO_SETLINK_MODIFIED 0x01
2648
2649 #define DO_SETLINK_NOTIFY 0x03
2650 static int do_setlink(const struct sk_buff *skb,
2651 struct net_device *dev, struct ifinfomsg *ifm,
2652 struct netlink_ext_ack *extack,
2653 struct nlattr **tb, int status)
2654 {
2655 const struct net_device_ops *ops = dev->netdev_ops;
2656 char ifname[IFNAMSIZ];
2657 int err;
2658
2659 err = validate_linkmsg(dev, tb, extack);
2660 if (err < 0)
2661 return err;
2662
2663 if (tb[IFLA_IFNAME])
2664 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2665 else
2666 ifname[0] = '\0';
2667
2668 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
2669 const char *pat = ifname[0] ? ifname : NULL;
2670 struct net *net;
2671 int new_ifindex;
2672
2673 net = rtnl_link_get_net_capable(skb, dev_net(dev),
2674 tb, CAP_NET_ADMIN);
2675 if (IS_ERR(net)) {
2676 err = PTR_ERR(net);
2677 goto errout;
2678 }
2679
2680 if (tb[IFLA_NEW_IFINDEX])
2681 new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
2682 else
2683 new_ifindex = 0;
2684
2685 err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
2686 put_net(net);
2687 if (err)
2688 goto errout;
2689 status |= DO_SETLINK_MODIFIED;
2690 }
2691
2692 if (tb[IFLA_MAP]) {
2693 struct rtnl_link_ifmap *u_map;
2694 struct ifmap k_map;
2695
2696 if (!ops->ndo_set_config) {
2697 err = -EOPNOTSUPP;
2698 goto errout;
2699 }
2700
2701 if (!netif_device_present(dev)) {
2702 err = -ENODEV;
2703 goto errout;
2704 }
2705
2706 u_map = nla_data(tb[IFLA_MAP]);
2707 k_map.mem_start = (unsigned long) u_map->mem_start;
2708 k_map.mem_end = (unsigned long) u_map->mem_end;
2709 k_map.base_addr = (unsigned short) u_map->base_addr;
2710 k_map.irq = (unsigned char) u_map->irq;
2711 k_map.dma = (unsigned char) u_map->dma;
2712 k_map.port = (unsigned char) u_map->port;
2713
2714 err = ops->ndo_set_config(dev, &k_map);
2715 if (err < 0)
2716 goto errout;
2717
2718 status |= DO_SETLINK_NOTIFY;
2719 }
2720
2721 if (tb[IFLA_ADDRESS]) {
2722 struct sockaddr *sa;
2723 int len;
2724
2725 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
2726 sizeof(*sa));
2727 sa = kmalloc(len, GFP_KERNEL);
2728 if (!sa) {
2729 err = -ENOMEM;
2730 goto errout;
2731 }
2732 sa->sa_family = dev->type;
2733 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2734 dev->addr_len);
2735 err = dev_set_mac_address_user(dev, sa, extack);
2736 kfree(sa);
2737 if (err)
2738 goto errout;
2739 status |= DO_SETLINK_MODIFIED;
2740 }
2741
2742 if (tb[IFLA_MTU]) {
2743 err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
2744 if (err < 0)
2745 goto errout;
2746 status |= DO_SETLINK_MODIFIED;
2747 }
2748
2749 if (tb[IFLA_GROUP]) {
2750 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2751 status |= DO_SETLINK_NOTIFY;
2752 }
2753
2754
2755
2756
2757
2758
2759 if (ifm->ifi_index > 0 && ifname[0]) {
2760 err = dev_change_name(dev, ifname);
2761 if (err < 0)
2762 goto errout;
2763 status |= DO_SETLINK_MODIFIED;
2764 }
2765
2766 if (tb[IFLA_IFALIAS]) {
2767 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2768 nla_len(tb[IFLA_IFALIAS]));
2769 if (err < 0)
2770 goto errout;
2771 status |= DO_SETLINK_NOTIFY;
2772 }
2773
2774 if (tb[IFLA_BROADCAST]) {
2775 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2776 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2777 }
2778
2779 if (ifm->ifi_flags || ifm->ifi_change) {
2780 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
2781 extack);
2782 if (err < 0)
2783 goto errout;
2784 }
2785
2786 if (tb[IFLA_MASTER]) {
2787 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
2788 if (err)
2789 goto errout;
2790 status |= DO_SETLINK_MODIFIED;
2791 }
2792
2793 if (tb[IFLA_CARRIER]) {
2794 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2795 if (err)
2796 goto errout;
2797 status |= DO_SETLINK_MODIFIED;
2798 }
2799
2800 if (tb[IFLA_TXQLEN]) {
2801 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2802
2803 err = dev_change_tx_queue_len(dev, value);
2804 if (err)
2805 goto errout;
2806 status |= DO_SETLINK_MODIFIED;
2807 }
2808
2809 if (tb[IFLA_GSO_MAX_SIZE]) {
2810 u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
2811
2812 if (max_size > dev->tso_max_size) {
2813 err = -EINVAL;
2814 goto errout;
2815 }
2816
2817 if (dev->gso_max_size ^ max_size) {
2818 netif_set_gso_max_size(dev, max_size);
2819 status |= DO_SETLINK_MODIFIED;
2820 }
2821 }
2822
2823 if (tb[IFLA_GSO_MAX_SEGS]) {
2824 u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
2825
2826 if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
2827 err = -EINVAL;
2828 goto errout;
2829 }
2830
2831 if (dev->gso_max_segs ^ max_segs) {
2832 netif_set_gso_max_segs(dev, max_segs);
2833 status |= DO_SETLINK_MODIFIED;
2834 }
2835 }
2836
2837 if (tb[IFLA_GRO_MAX_SIZE]) {
2838 u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
2839
2840 if (dev->gro_max_size ^ gro_max_size) {
2841 netif_set_gro_max_size(dev, gro_max_size);
2842 status |= DO_SETLINK_MODIFIED;
2843 }
2844 }
2845
2846 if (tb[IFLA_OPERSTATE])
2847 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2848
2849 if (tb[IFLA_LINKMODE]) {
2850 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2851
2852 write_lock(&dev_base_lock);
2853 if (dev->link_mode ^ value)
2854 status |= DO_SETLINK_NOTIFY;
2855 dev->link_mode = value;
2856 write_unlock(&dev_base_lock);
2857 }
2858
2859 if (tb[IFLA_VFINFO_LIST]) {
2860 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2861 struct nlattr *attr;
2862 int rem;
2863
2864 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2865 if (nla_type(attr) != IFLA_VF_INFO ||
2866 nla_len(attr) < NLA_HDRLEN) {
2867 err = -EINVAL;
2868 goto errout;
2869 }
2870 err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
2871 attr,
2872 ifla_vf_policy,
2873 NULL);
2874 if (err < 0)
2875 goto errout;
2876 err = do_setvfinfo(dev, vfinfo);
2877 if (err < 0)
2878 goto errout;
2879 status |= DO_SETLINK_NOTIFY;
2880 }
2881 }
2882 err = 0;
2883
2884 if (tb[IFLA_VF_PORTS]) {
2885 struct nlattr *port[IFLA_PORT_MAX+1];
2886 struct nlattr *attr;
2887 int vf;
2888 int rem;
2889
2890 err = -EOPNOTSUPP;
2891 if (!ops->ndo_set_vf_port)
2892 goto errout;
2893
2894 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2895 if (nla_type(attr) != IFLA_VF_PORT ||
2896 nla_len(attr) < NLA_HDRLEN) {
2897 err = -EINVAL;
2898 goto errout;
2899 }
2900 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2901 attr,
2902 ifla_port_policy,
2903 NULL);
2904 if (err < 0)
2905 goto errout;
2906 if (!port[IFLA_PORT_VF]) {
2907 err = -EOPNOTSUPP;
2908 goto errout;
2909 }
2910 vf = nla_get_u32(port[IFLA_PORT_VF]);
2911 err = ops->ndo_set_vf_port(dev, vf, port);
2912 if (err < 0)
2913 goto errout;
2914 status |= DO_SETLINK_NOTIFY;
2915 }
2916 }
2917 err = 0;
2918
2919 if (tb[IFLA_PORT_SELF]) {
2920 struct nlattr *port[IFLA_PORT_MAX+1];
2921
2922 err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
2923 tb[IFLA_PORT_SELF],
2924 ifla_port_policy, NULL);
2925 if (err < 0)
2926 goto errout;
2927
2928 err = -EOPNOTSUPP;
2929 if (ops->ndo_set_vf_port)
2930 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2931 if (err < 0)
2932 goto errout;
2933 status |= DO_SETLINK_NOTIFY;
2934 }
2935
2936 if (tb[IFLA_AF_SPEC]) {
2937 struct nlattr *af;
2938 int rem;
2939
2940 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2941 const struct rtnl_af_ops *af_ops;
2942
2943 BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
2944
2945 err = af_ops->set_link_af(dev, af, extack);
2946 if (err < 0)
2947 goto errout;
2948
2949 status |= DO_SETLINK_NOTIFY;
2950 }
2951 }
2952 err = 0;
2953
2954 if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
2955 err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
2956 tb[IFLA_PROTO_DOWN_REASON], extack);
2957 if (err)
2958 goto errout;
2959 status |= DO_SETLINK_NOTIFY;
2960 }
2961
2962 if (tb[IFLA_XDP]) {
2963 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2964 u32 xdp_flags = 0;
2965
2966 err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
2967 tb[IFLA_XDP],
2968 ifla_xdp_policy, NULL);
2969 if (err < 0)
2970 goto errout;
2971
2972 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2973 err = -EINVAL;
2974 goto errout;
2975 }
2976
2977 if (xdp[IFLA_XDP_FLAGS]) {
2978 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2979 if (xdp_flags & ~XDP_FLAGS_MASK) {
2980 err = -EINVAL;
2981 goto errout;
2982 }
2983 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2984 err = -EINVAL;
2985 goto errout;
2986 }
2987 }
2988
2989 if (xdp[IFLA_XDP_FD]) {
2990 int expected_fd = -1;
2991
2992 if (xdp_flags & XDP_FLAGS_REPLACE) {
2993 if (!xdp[IFLA_XDP_EXPECTED_FD]) {
2994 err = -EINVAL;
2995 goto errout;
2996 }
2997 expected_fd =
2998 nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
2999 }
3000
3001 err = dev_change_xdp_fd(dev, extack,
3002 nla_get_s32(xdp[IFLA_XDP_FD]),
3003 expected_fd,
3004 xdp_flags);
3005 if (err)
3006 goto errout;
3007 status |= DO_SETLINK_NOTIFY;
3008 }
3009 }
3010
3011 errout:
3012 if (status & DO_SETLINK_MODIFIED) {
3013 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
3014 netdev_state_change(dev);
3015
3016 if (err < 0)
3017 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
3018 dev->name);
3019 }
3020
3021 return err;
3022 }
3023
3024 static struct net_device *rtnl_dev_get(struct net *net,
3025 struct nlattr *tb[])
3026 {
3027 char ifname[ALTIFNAMSIZ];
3028
3029 if (tb[IFLA_IFNAME])
3030 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3031 else if (tb[IFLA_ALT_IFNAME])
3032 nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
3033 else
3034 return NULL;
3035
3036 return __dev_get_by_name(net, ifname);
3037 }
3038
3039 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3040 struct netlink_ext_ack *extack)
3041 {
3042 struct net *net = sock_net(skb->sk);
3043 struct ifinfomsg *ifm;
3044 struct net_device *dev;
3045 int err;
3046 struct nlattr *tb[IFLA_MAX+1];
3047
3048 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3049 ifla_policy, extack);
3050 if (err < 0)
3051 goto errout;
3052
3053 err = rtnl_ensure_unique_netns(tb, extack, false);
3054 if (err < 0)
3055 goto errout;
3056
3057 err = -EINVAL;
3058 ifm = nlmsg_data(nlh);
3059 if (ifm->ifi_index > 0)
3060 dev = __dev_get_by_index(net, ifm->ifi_index);
3061 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3062 dev = rtnl_dev_get(net, tb);
3063 else
3064 goto errout;
3065
3066 if (dev == NULL) {
3067 err = -ENODEV;
3068 goto errout;
3069 }
3070
3071 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3072 errout:
3073 return err;
3074 }
3075
3076 static int rtnl_group_dellink(const struct net *net, int group)
3077 {
3078 struct net_device *dev, *aux;
3079 LIST_HEAD(list_kill);
3080 bool found = false;
3081
3082 if (!group)
3083 return -EPERM;
3084
3085 for_each_netdev(net, dev) {
3086 if (dev->group == group) {
3087 const struct rtnl_link_ops *ops;
3088
3089 found = true;
3090 ops = dev->rtnl_link_ops;
3091 if (!ops || !ops->dellink)
3092 return -EOPNOTSUPP;
3093 }
3094 }
3095
3096 if (!found)
3097 return -ENODEV;
3098
3099 for_each_netdev_safe(net, dev, aux) {
3100 if (dev->group == group) {
3101 const struct rtnl_link_ops *ops;
3102
3103 ops = dev->rtnl_link_ops;
3104 ops->dellink(dev, &list_kill);
3105 }
3106 }
3107 unregister_netdevice_many(&list_kill);
3108
3109 return 0;
3110 }
3111
3112 int rtnl_delete_link(struct net_device *dev)
3113 {
3114 const struct rtnl_link_ops *ops;
3115 LIST_HEAD(list_kill);
3116
3117 ops = dev->rtnl_link_ops;
3118 if (!ops || !ops->dellink)
3119 return -EOPNOTSUPP;
3120
3121 ops->dellink(dev, &list_kill);
3122 unregister_netdevice_many(&list_kill);
3123
3124 return 0;
3125 }
3126 EXPORT_SYMBOL_GPL(rtnl_delete_link);
3127
3128 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3129 struct netlink_ext_ack *extack)
3130 {
3131 struct net *net = sock_net(skb->sk);
3132 struct net *tgt_net = net;
3133 struct net_device *dev = NULL;
3134 struct ifinfomsg *ifm;
3135 struct nlattr *tb[IFLA_MAX+1];
3136 int err;
3137 int netnsid = -1;
3138
3139 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3140 ifla_policy, extack);
3141 if (err < 0)
3142 return err;
3143
3144 err = rtnl_ensure_unique_netns(tb, extack, true);
3145 if (err < 0)
3146 return err;
3147
3148 if (tb[IFLA_TARGET_NETNSID]) {
3149 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3150 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3151 if (IS_ERR(tgt_net))
3152 return PTR_ERR(tgt_net);
3153 }
3154
3155 err = -EINVAL;
3156 ifm = nlmsg_data(nlh);
3157 if (ifm->ifi_index > 0)
3158 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3159 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3160 dev = rtnl_dev_get(net, tb);
3161 else if (tb[IFLA_GROUP])
3162 err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
3163 else
3164 goto out;
3165
3166 if (!dev) {
3167 if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
3168 err = -ENODEV;
3169
3170 goto out;
3171 }
3172
3173 err = rtnl_delete_link(dev);
3174
3175 out:
3176 if (netnsid >= 0)
3177 put_net(tgt_net);
3178
3179 return err;
3180 }
3181
3182 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
3183 {
3184 unsigned int old_flags;
3185 int err;
3186
3187 old_flags = dev->flags;
3188 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
3189 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
3190 NULL);
3191 if (err < 0)
3192 return err;
3193 }
3194
3195 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
3196 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
3197 } else {
3198 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
3199 __dev_notify_flags(dev, old_flags, ~0U);
3200 }
3201 return 0;
3202 }
3203 EXPORT_SYMBOL(rtnl_configure_link);
3204
3205 struct net_device *rtnl_create_link(struct net *net, const char *ifname,
3206 unsigned char name_assign_type,
3207 const struct rtnl_link_ops *ops,
3208 struct nlattr *tb[],
3209 struct netlink_ext_ack *extack)
3210 {
3211 struct net_device *dev;
3212 unsigned int num_tx_queues = 1;
3213 unsigned int num_rx_queues = 1;
3214
3215 if (tb[IFLA_NUM_TX_QUEUES])
3216 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
3217 else if (ops->get_num_tx_queues)
3218 num_tx_queues = ops->get_num_tx_queues();
3219
3220 if (tb[IFLA_NUM_RX_QUEUES])
3221 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
3222 else if (ops->get_num_rx_queues)
3223 num_rx_queues = ops->get_num_rx_queues();
3224
3225 if (num_tx_queues < 1 || num_tx_queues > 4096) {
3226 NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
3227 return ERR_PTR(-EINVAL);
3228 }
3229
3230 if (num_rx_queues < 1 || num_rx_queues > 4096) {
3231 NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
3232 return ERR_PTR(-EINVAL);
3233 }
3234
3235 if (ops->alloc) {
3236 dev = ops->alloc(tb, ifname, name_assign_type,
3237 num_tx_queues, num_rx_queues);
3238 if (IS_ERR(dev))
3239 return dev;
3240 } else {
3241 dev = alloc_netdev_mqs(ops->priv_size, ifname,
3242 name_assign_type, ops->setup,
3243 num_tx_queues, num_rx_queues);
3244 }
3245
3246 if (!dev)
3247 return ERR_PTR(-ENOMEM);
3248
3249 dev_net_set(dev, net);
3250 dev->rtnl_link_ops = ops;
3251 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
3252
3253 if (tb[IFLA_MTU]) {
3254 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
3255 int err;
3256
3257 err = dev_validate_mtu(dev, mtu, extack);
3258 if (err) {
3259 free_netdev(dev);
3260 return ERR_PTR(err);
3261 }
3262 dev->mtu = mtu;
3263 }
3264 if (tb[IFLA_ADDRESS]) {
3265 __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
3266 nla_len(tb[IFLA_ADDRESS]));
3267 dev->addr_assign_type = NET_ADDR_SET;
3268 }
3269 if (tb[IFLA_BROADCAST])
3270 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
3271 nla_len(tb[IFLA_BROADCAST]));
3272 if (tb[IFLA_TXQLEN])
3273 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
3274 if (tb[IFLA_OPERSTATE])
3275 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
3276 if (tb[IFLA_LINKMODE])
3277 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
3278 if (tb[IFLA_GROUP])
3279 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
3280 if (tb[IFLA_GSO_MAX_SIZE])
3281 netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
3282 if (tb[IFLA_GSO_MAX_SEGS])
3283 netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
3284 if (tb[IFLA_GRO_MAX_SIZE])
3285 netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
3286
3287 return dev;
3288 }
3289 EXPORT_SYMBOL(rtnl_create_link);
3290
3291 static int rtnl_group_changelink(const struct sk_buff *skb,
3292 struct net *net, int group,
3293 struct ifinfomsg *ifm,
3294 struct netlink_ext_ack *extack,
3295 struct nlattr **tb)
3296 {
3297 struct net_device *dev, *aux;
3298 int err;
3299
3300 for_each_netdev_safe(net, dev, aux) {
3301 if (dev->group == group) {
3302 err = do_setlink(skb, dev, ifm, extack, tb, 0);
3303 if (err < 0)
3304 return err;
3305 }
3306 }
3307
3308 return 0;
3309 }
3310
3311 static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
3312 const struct rtnl_link_ops *ops,
3313 struct nlattr **tb, struct nlattr **data,
3314 struct netlink_ext_ack *extack)
3315 {
3316 unsigned char name_assign_type = NET_NAME_USER;
3317 struct net *net = sock_net(skb->sk);
3318 struct net *dest_net, *link_net;
3319 struct net_device *dev;
3320 char ifname[IFNAMSIZ];
3321 int err;
3322
3323 if (!ops->alloc && !ops->setup)
3324 return -EOPNOTSUPP;
3325
3326 if (tb[IFLA_IFNAME]) {
3327 nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
3328 } else {
3329 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
3330 name_assign_type = NET_NAME_ENUM;
3331 }
3332
3333 dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
3334 if (IS_ERR(dest_net))
3335 return PTR_ERR(dest_net);
3336
3337 if (tb[IFLA_LINK_NETNSID]) {
3338 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
3339
3340 link_net = get_net_ns_by_id(dest_net, id);
3341 if (!link_net) {
3342 NL_SET_ERR_MSG(extack, "Unknown network namespace id");
3343 err = -EINVAL;
3344 goto out;
3345 }
3346 err = -EPERM;
3347 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
3348 goto out;
3349 } else {
3350 link_net = NULL;
3351 }
3352
3353 dev = rtnl_create_link(link_net ? : dest_net, ifname,
3354 name_assign_type, ops, tb, extack);
3355 if (IS_ERR(dev)) {
3356 err = PTR_ERR(dev);
3357 goto out;
3358 }
3359
3360 dev->ifindex = ifm->ifi_index;
3361
3362 if (ops->newlink)
3363 err = ops->newlink(link_net ? : net, dev, tb, data, extack);
3364 else
3365 err = register_netdevice(dev);
3366 if (err < 0) {
3367 free_netdev(dev);
3368 goto out;
3369 }
3370
3371 err = rtnl_configure_link(dev, ifm);
3372 if (err < 0)
3373 goto out_unregister;
3374 if (link_net) {
3375 err = dev_change_net_namespace(dev, dest_net, ifname);
3376 if (err < 0)
3377 goto out_unregister;
3378 }
3379 if (tb[IFLA_MASTER]) {
3380 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
3381 if (err)
3382 goto out_unregister;
3383 }
3384 out:
3385 if (link_net)
3386 put_net(link_net);
3387 put_net(dest_net);
3388 return err;
3389 out_unregister:
3390 if (ops->newlink) {
3391 LIST_HEAD(list_kill);
3392
3393 ops->dellink(dev, &list_kill);
3394 unregister_netdevice_many(&list_kill);
3395 } else {
3396 unregister_netdevice(dev);
3397 }
3398 goto out;
3399 }
3400
3401 struct rtnl_newlink_tbs {
3402 struct nlattr *tb[IFLA_MAX + 1];
3403 struct nlattr *attr[RTNL_MAX_TYPE + 1];
3404 struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
3405 };
3406
3407 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3408 struct rtnl_newlink_tbs *tbs,
3409 struct netlink_ext_ack *extack)
3410 {
3411 struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
3412 struct nlattr ** const tb = tbs->tb;
3413 const struct rtnl_link_ops *m_ops;
3414 struct net_device *master_dev;
3415 struct net *net = sock_net(skb->sk);
3416 const struct rtnl_link_ops *ops;
3417 struct nlattr **slave_data;
3418 char kind[MODULE_NAME_LEN];
3419 struct net_device *dev;
3420 struct ifinfomsg *ifm;
3421 struct nlattr **data;
3422 bool link_specified;
3423 int err;
3424
3425 #ifdef CONFIG_MODULES
3426 replay:
3427 #endif
3428 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3429 ifla_policy, extack);
3430 if (err < 0)
3431 return err;
3432
3433 err = rtnl_ensure_unique_netns(tb, extack, false);
3434 if (err < 0)
3435 return err;
3436
3437 ifm = nlmsg_data(nlh);
3438 if (ifm->ifi_index > 0) {
3439 link_specified = true;
3440 dev = __dev_get_by_index(net, ifm->ifi_index);
3441 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
3442 link_specified = true;
3443 dev = rtnl_dev_get(net, tb);
3444 } else {
3445 link_specified = false;
3446 dev = NULL;
3447 }
3448
3449 master_dev = NULL;
3450 m_ops = NULL;
3451 if (dev) {
3452 master_dev = netdev_master_upper_dev_get(dev);
3453 if (master_dev)
3454 m_ops = master_dev->rtnl_link_ops;
3455 }
3456
3457 err = validate_linkmsg(dev, tb, extack);
3458 if (err < 0)
3459 return err;
3460
3461 if (tb[IFLA_LINKINFO]) {
3462 err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
3463 tb[IFLA_LINKINFO],
3464 ifla_info_policy, NULL);
3465 if (err < 0)
3466 return err;
3467 } else
3468 memset(linkinfo, 0, sizeof(linkinfo));
3469
3470 if (linkinfo[IFLA_INFO_KIND]) {
3471 nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
3472 ops = rtnl_link_ops_get(kind);
3473 } else {
3474 kind[0] = '\0';
3475 ops = NULL;
3476 }
3477
3478 data = NULL;
3479 if (ops) {
3480 if (ops->maxtype > RTNL_MAX_TYPE)
3481 return -EINVAL;
3482
3483 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
3484 err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
3485 linkinfo[IFLA_INFO_DATA],
3486 ops->policy, extack);
3487 if (err < 0)
3488 return err;
3489 data = tbs->attr;
3490 }
3491 if (ops->validate) {
3492 err = ops->validate(tb, data, extack);
3493 if (err < 0)
3494 return err;
3495 }
3496 }
3497
3498 slave_data = NULL;
3499 if (m_ops) {
3500 if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
3501 return -EINVAL;
3502
3503 if (m_ops->slave_maxtype &&
3504 linkinfo[IFLA_INFO_SLAVE_DATA]) {
3505 err = nla_parse_nested_deprecated(tbs->slave_attr,
3506 m_ops->slave_maxtype,
3507 linkinfo[IFLA_INFO_SLAVE_DATA],
3508 m_ops->slave_policy,
3509 extack);
3510 if (err < 0)
3511 return err;
3512 slave_data = tbs->slave_attr;
3513 }
3514 }
3515
3516 if (dev) {
3517 int status = 0;
3518
3519 if (nlh->nlmsg_flags & NLM_F_EXCL)
3520 return -EEXIST;
3521 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3522 return -EOPNOTSUPP;
3523
3524 if (linkinfo[IFLA_INFO_DATA]) {
3525 if (!ops || ops != dev->rtnl_link_ops ||
3526 !ops->changelink)
3527 return -EOPNOTSUPP;
3528
3529 err = ops->changelink(dev, tb, data, extack);
3530 if (err < 0)
3531 return err;
3532 status |= DO_SETLINK_NOTIFY;
3533 }
3534
3535 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
3536 if (!m_ops || !m_ops->slave_changelink)
3537 return -EOPNOTSUPP;
3538
3539 err = m_ops->slave_changelink(master_dev, dev, tb,
3540 slave_data, extack);
3541 if (err < 0)
3542 return err;
3543 status |= DO_SETLINK_NOTIFY;
3544 }
3545
3546 return do_setlink(skb, dev, ifm, extack, tb, status);
3547 }
3548
3549 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
3550
3551
3552
3553 if (link_specified)
3554 return -ENODEV;
3555 if (tb[IFLA_GROUP])
3556 return rtnl_group_changelink(skb, net,
3557 nla_get_u32(tb[IFLA_GROUP]),
3558 ifm, extack, tb);
3559 return -ENODEV;
3560 }
3561
3562 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
3563 return -EOPNOTSUPP;
3564
3565 if (!ops) {
3566 #ifdef CONFIG_MODULES
3567 if (kind[0]) {
3568 __rtnl_unlock();
3569 request_module("rtnl-link-%s", kind);
3570 rtnl_lock();
3571 ops = rtnl_link_ops_get(kind);
3572 if (ops)
3573 goto replay;
3574 }
3575 #endif
3576 NL_SET_ERR_MSG(extack, "Unknown device type");
3577 return -EOPNOTSUPP;
3578 }
3579
3580 return rtnl_newlink_create(skb, ifm, ops, tb, data, extack);
3581 }
3582
3583 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3584 struct netlink_ext_ack *extack)
3585 {
3586 struct rtnl_newlink_tbs *tbs;
3587 int ret;
3588
3589 tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
3590 if (!tbs)
3591 return -ENOMEM;
3592
3593 ret = __rtnl_newlink(skb, nlh, tbs, extack);
3594 kfree(tbs);
3595 return ret;
3596 }
3597
3598 static int rtnl_valid_getlink_req(struct sk_buff *skb,
3599 const struct nlmsghdr *nlh,
3600 struct nlattr **tb,
3601 struct netlink_ext_ack *extack)
3602 {
3603 struct ifinfomsg *ifm;
3604 int i, err;
3605
3606 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
3607 NL_SET_ERR_MSG(extack, "Invalid header for get link");
3608 return -EINVAL;
3609 }
3610
3611 if (!netlink_strict_get_check(skb))
3612 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
3613 ifla_policy, extack);
3614
3615 ifm = nlmsg_data(nlh);
3616 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
3617 ifm->ifi_change) {
3618 NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
3619 return -EINVAL;
3620 }
3621
3622 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
3623 ifla_policy, extack);
3624 if (err)
3625 return err;
3626
3627 for (i = 0; i <= IFLA_MAX; i++) {
3628 if (!tb[i])
3629 continue;
3630
3631 switch (i) {
3632 case IFLA_IFNAME:
3633 case IFLA_ALT_IFNAME:
3634 case IFLA_EXT_MASK:
3635 case IFLA_TARGET_NETNSID:
3636 break;
3637 default:
3638 NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
3639 return -EINVAL;
3640 }
3641 }
3642
3643 return 0;
3644 }
3645
3646 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3647 struct netlink_ext_ack *extack)
3648 {
3649 struct net *net = sock_net(skb->sk);
3650 struct net *tgt_net = net;
3651 struct ifinfomsg *ifm;
3652 struct nlattr *tb[IFLA_MAX+1];
3653 struct net_device *dev = NULL;
3654 struct sk_buff *nskb;
3655 int netnsid = -1;
3656 int err;
3657 u32 ext_filter_mask = 0;
3658
3659 err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
3660 if (err < 0)
3661 return err;
3662
3663 err = rtnl_ensure_unique_netns(tb, extack, true);
3664 if (err < 0)
3665 return err;
3666
3667 if (tb[IFLA_TARGET_NETNSID]) {
3668 netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
3669 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
3670 if (IS_ERR(tgt_net))
3671 return PTR_ERR(tgt_net);
3672 }
3673
3674 if (tb[IFLA_EXT_MASK])
3675 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3676
3677 err = -EINVAL;
3678 ifm = nlmsg_data(nlh);
3679 if (ifm->ifi_index > 0)
3680 dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
3681 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3682 dev = rtnl_dev_get(tgt_net, tb);
3683 else
3684 goto out;
3685
3686 err = -ENODEV;
3687 if (dev == NULL)
3688 goto out;
3689
3690 err = -ENOBUFS;
3691 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
3692 if (nskb == NULL)
3693 goto out;
3694
3695 err = rtnl_fill_ifinfo(nskb, dev, net,
3696 RTM_NEWLINK, NETLINK_CB(skb).portid,
3697 nlh->nlmsg_seq, 0, 0, ext_filter_mask,
3698 0, NULL, 0, netnsid, GFP_KERNEL);
3699 if (err < 0) {
3700
3701 WARN_ON(err == -EMSGSIZE);
3702 kfree_skb(nskb);
3703 } else
3704 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
3705 out:
3706 if (netnsid >= 0)
3707 put_net(tgt_net);
3708
3709 return err;
3710 }
3711
3712 static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
3713 bool *changed, struct netlink_ext_ack *extack)
3714 {
3715 char *alt_ifname;
3716 size_t size;
3717 int err;
3718
3719 err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
3720 if (err)
3721 return err;
3722
3723 if (cmd == RTM_NEWLINKPROP) {
3724 size = rtnl_prop_list_size(dev);
3725 size += nla_total_size(ALTIFNAMSIZ);
3726 if (size >= U16_MAX) {
3727 NL_SET_ERR_MSG(extack,
3728 "effective property list too long");
3729 return -EINVAL;
3730 }
3731 }
3732
3733 alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
3734 if (!alt_ifname)
3735 return -ENOMEM;
3736
3737 if (cmd == RTM_NEWLINKPROP) {
3738 err = netdev_name_node_alt_create(dev, alt_ifname);
3739 if (!err)
3740 alt_ifname = NULL;
3741 } else if (cmd == RTM_DELLINKPROP) {
3742 err = netdev_name_node_alt_destroy(dev, alt_ifname);
3743 } else {
3744 WARN_ON_ONCE(1);
3745 err = -EINVAL;
3746 }
3747
3748 kfree(alt_ifname);
3749 if (!err)
3750 *changed = true;
3751 return err;
3752 }
3753
3754 static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
3755 struct netlink_ext_ack *extack)
3756 {
3757 struct net *net = sock_net(skb->sk);
3758 struct nlattr *tb[IFLA_MAX + 1];
3759 struct net_device *dev;
3760 struct ifinfomsg *ifm;
3761 bool changed = false;
3762 struct nlattr *attr;
3763 int err, rem;
3764
3765 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
3766 if (err)
3767 return err;
3768
3769 err = rtnl_ensure_unique_netns(tb, extack, true);
3770 if (err)
3771 return err;
3772
3773 ifm = nlmsg_data(nlh);
3774 if (ifm->ifi_index > 0)
3775 dev = __dev_get_by_index(net, ifm->ifi_index);
3776 else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
3777 dev = rtnl_dev_get(net, tb);
3778 else
3779 return -EINVAL;
3780
3781 if (!dev)
3782 return -ENODEV;
3783
3784 if (!tb[IFLA_PROP_LIST])
3785 return 0;
3786
3787 nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
3788 switch (nla_type(attr)) {
3789 case IFLA_ALT_IFNAME:
3790 err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
3791 if (err)
3792 return err;
3793 break;
3794 }
3795 }
3796
3797 if (changed)
3798 netdev_state_change(dev);
3799 return 0;
3800 }
3801
3802 static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3803 struct netlink_ext_ack *extack)
3804 {
3805 return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
3806 }
3807
3808 static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
3809 struct netlink_ext_ack *extack)
3810 {
3811 return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
3812 }
3813
3814 static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
3815 {
3816 struct net *net = sock_net(skb->sk);
3817 size_t min_ifinfo_dump_size = 0;
3818 struct nlattr *tb[IFLA_MAX+1];
3819 u32 ext_filter_mask = 0;
3820 struct net_device *dev;
3821 int hdrlen;
3822
3823
3824 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
3825 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
3826
3827 if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
3828 if (tb[IFLA_EXT_MASK])
3829 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
3830 }
3831
3832 if (!ext_filter_mask)
3833 return NLMSG_GOODSIZE;
3834
3835
3836
3837
3838 rcu_read_lock();
3839 for_each_netdev_rcu(net, dev) {
3840 min_ifinfo_dump_size = max(min_ifinfo_dump_size,
3841 if_nlmsg_size(dev, ext_filter_mask));
3842 }
3843 rcu_read_unlock();
3844
3845 return nlmsg_total_size(min_ifinfo_dump_size);
3846 }
3847
3848 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
3849 {
3850 int idx;
3851 int s_idx = cb->family;
3852 int type = cb->nlh->nlmsg_type - RTM_BASE;
3853 int ret = 0;
3854
3855 if (s_idx == 0)
3856 s_idx = 1;
3857
3858 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
3859 struct rtnl_link __rcu **tab;
3860 struct rtnl_link *link;
3861 rtnl_dumpit_func dumpit;
3862
3863 if (idx < s_idx || idx == PF_PACKET)
3864 continue;
3865
3866 if (type < 0 || type >= RTM_NR_MSGTYPES)
3867 continue;
3868
3869 tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
3870 if (!tab)
3871 continue;
3872
3873 link = rcu_dereference_rtnl(tab[type]);
3874 if (!link)
3875 continue;
3876
3877 dumpit = link->dumpit;
3878 if (!dumpit)
3879 continue;
3880
3881 if (idx > s_idx) {
3882 memset(&cb->args[0], 0, sizeof(cb->args));
3883 cb->prev_seq = 0;
3884 cb->seq = 0;
3885 }
3886 ret = dumpit(skb, cb);
3887 if (ret)
3888 break;
3889 }
3890 cb->family = idx;
3891
3892 return skb->len ? : ret;
3893 }
3894
3895 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
3896 unsigned int change,
3897 u32 event, gfp_t flags, int *new_nsid,
3898 int new_ifindex)
3899 {
3900 struct net *net = dev_net(dev);
3901 struct sk_buff *skb;
3902 int err = -ENOBUFS;
3903
3904 skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
3905 if (skb == NULL)
3906 goto errout;
3907
3908 err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
3909 type, 0, 0, change, 0, 0, event,
3910 new_nsid, new_ifindex, -1, flags);
3911 if (err < 0) {
3912
3913 WARN_ON(err == -EMSGSIZE);
3914 kfree_skb(skb);
3915 goto errout;
3916 }
3917 return skb;
3918 errout:
3919 if (err < 0)
3920 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3921 return NULL;
3922 }
3923
3924 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
3925 {
3926 struct net *net = dev_net(dev);
3927
3928 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
3929 }
3930
3931 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
3932 unsigned int change, u32 event,
3933 gfp_t flags, int *new_nsid, int new_ifindex)
3934 {
3935 struct sk_buff *skb;
3936
3937 if (dev->reg_state != NETREG_REGISTERED)
3938 return;
3939
3940 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
3941 new_ifindex);
3942 if (skb)
3943 rtmsg_ifinfo_send(skb, dev, flags);
3944 }
3945
3946 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3947 gfp_t flags)
3948 {
3949 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3950 NULL, 0);
3951 }
3952
3953 void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
3954 gfp_t flags, int *new_nsid, int new_ifindex)
3955 {
3956 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
3957 new_nsid, new_ifindex);
3958 }
3959
3960 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
3961 struct net_device *dev,
3962 u8 *addr, u16 vid, u32 pid, u32 seq,
3963 int type, unsigned int flags,
3964 int nlflags, u16 ndm_state)
3965 {
3966 struct nlmsghdr *nlh;
3967 struct ndmsg *ndm;
3968
3969 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
3970 if (!nlh)
3971 return -EMSGSIZE;
3972
3973 ndm = nlmsg_data(nlh);
3974 ndm->ndm_family = AF_BRIDGE;
3975 ndm->ndm_pad1 = 0;
3976 ndm->ndm_pad2 = 0;
3977 ndm->ndm_flags = flags;
3978 ndm->ndm_type = 0;
3979 ndm->ndm_ifindex = dev->ifindex;
3980 ndm->ndm_state = ndm_state;
3981
3982 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3983 goto nla_put_failure;
3984 if (vid)
3985 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
3986 goto nla_put_failure;
3987
3988 nlmsg_end(skb, nlh);
3989 return 0;
3990
3991 nla_put_failure:
3992 nlmsg_cancel(skb, nlh);
3993 return -EMSGSIZE;
3994 }
3995
3996 static inline size_t rtnl_fdb_nlmsg_size(void)
3997 {
3998 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
3999 nla_total_size(ETH_ALEN) +
4000 nla_total_size(sizeof(u16)) +
4001 0;
4002 }
4003
4004 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
4005 u16 ndm_state)
4006 {
4007 struct net *net = dev_net(dev);
4008 struct sk_buff *skb;
4009 int err = -ENOBUFS;
4010
4011 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
4012 if (!skb)
4013 goto errout;
4014
4015 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
4016 0, 0, type, NTF_SELF, 0, ndm_state);
4017 if (err < 0) {
4018 kfree_skb(skb);
4019 goto errout;
4020 }
4021
4022 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
4023 return;
4024 errout:
4025 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
4026 }
4027
4028
4029
4030
4031 int ndo_dflt_fdb_add(struct ndmsg *ndm,
4032 struct nlattr *tb[],
4033 struct net_device *dev,
4034 const unsigned char *addr, u16 vid,
4035 u16 flags)
4036 {
4037 int err = -EINVAL;
4038
4039
4040
4041
4042 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
4043 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4044 return err;
4045 }
4046
4047 if (vid) {
4048 netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
4049 return err;
4050 }
4051
4052 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4053 err = dev_uc_add_excl(dev, addr);
4054 else if (is_multicast_ether_addr(addr))
4055 err = dev_mc_add_excl(dev, addr);
4056
4057
4058 if (err == -EEXIST && !(flags & NLM_F_EXCL))
4059 err = 0;
4060
4061 return err;
4062 }
4063 EXPORT_SYMBOL(ndo_dflt_fdb_add);
4064
4065 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
4066 struct netlink_ext_ack *extack)
4067 {
4068 u16 vid = 0;
4069
4070 if (vlan_attr) {
4071 if (nla_len(vlan_attr) != sizeof(u16)) {
4072 NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
4073 return -EINVAL;
4074 }
4075
4076 vid = nla_get_u16(vlan_attr);
4077
4078 if (!vid || vid >= VLAN_VID_MASK) {
4079 NL_SET_ERR_MSG(extack, "invalid vlan id");
4080 return -EINVAL;
4081 }
4082 }
4083 *p_vid = vid;
4084 return 0;
4085 }
4086
4087 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
4088 struct netlink_ext_ack *extack)
4089 {
4090 struct net *net = sock_net(skb->sk);
4091 struct ndmsg *ndm;
4092 struct nlattr *tb[NDA_MAX+1];
4093 struct net_device *dev;
4094 u8 *addr;
4095 u16 vid;
4096 int err;
4097
4098 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
4099 extack);
4100 if (err < 0)
4101 return err;
4102
4103 ndm = nlmsg_data(nlh);
4104 if (ndm->ndm_ifindex == 0) {
4105 NL_SET_ERR_MSG(extack, "invalid ifindex");
4106 return -EINVAL;
4107 }
4108
4109 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4110 if (dev == NULL) {
4111 NL_SET_ERR_MSG(extack, "unknown ifindex");
4112 return -ENODEV;
4113 }
4114
4115 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4116 NL_SET_ERR_MSG(extack, "invalid address");
4117 return -EINVAL;
4118 }
4119
4120 if (dev->type != ARPHRD_ETHER) {
4121 NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
4122 return -EINVAL;
4123 }
4124
4125 addr = nla_data(tb[NDA_LLADDR]);
4126
4127 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4128 if (err)
4129 return err;
4130
4131 err = -EOPNOTSUPP;
4132
4133
4134 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4135 netif_is_bridge_port(dev)) {
4136 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4137 const struct net_device_ops *ops = br_dev->netdev_ops;
4138
4139 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
4140 nlh->nlmsg_flags, extack);
4141 if (err)
4142 goto out;
4143 else
4144 ndm->ndm_flags &= ~NTF_MASTER;
4145 }
4146
4147
4148 if ((ndm->ndm_flags & NTF_SELF)) {
4149 if (dev->netdev_ops->ndo_fdb_add)
4150 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
4151 vid,
4152 nlh->nlmsg_flags,
4153 extack);
4154 else
4155 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
4156 nlh->nlmsg_flags);
4157
4158 if (!err) {
4159 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
4160 ndm->ndm_state);
4161 ndm->ndm_flags &= ~NTF_SELF;
4162 }
4163 }
4164 out:
4165 return err;
4166 }
4167
4168
4169
4170
4171 int ndo_dflt_fdb_del(struct ndmsg *ndm,
4172 struct nlattr *tb[],
4173 struct net_device *dev,
4174 const unsigned char *addr, u16 vid)
4175 {
4176 int err = -EINVAL;
4177
4178
4179
4180
4181 if (!(ndm->ndm_state & NUD_PERMANENT)) {
4182 netdev_info(dev, "default FDB implementation only supports local addresses\n");
4183 return err;
4184 }
4185
4186 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
4187 err = dev_uc_del(dev, addr);
4188 else if (is_multicast_ether_addr(addr))
4189 err = dev_mc_del(dev, addr);
4190
4191 return err;
4192 }
4193 EXPORT_SYMBOL(ndo_dflt_fdb_del);
4194
4195 static const struct nla_policy fdb_del_bulk_policy[NDA_MAX + 1] = {
4196 [NDA_VLAN] = { .type = NLA_U16 },
4197 [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
4198 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
4199 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
4200 };
4201
4202 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
4203 struct netlink_ext_ack *extack)
4204 {
4205 bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
4206 struct net *net = sock_net(skb->sk);
4207 const struct net_device_ops *ops;
4208 struct ndmsg *ndm;
4209 struct nlattr *tb[NDA_MAX+1];
4210 struct net_device *dev;
4211 __u8 *addr = NULL;
4212 int err;
4213 u16 vid;
4214
4215 if (!netlink_capable(skb, CAP_NET_ADMIN))
4216 return -EPERM;
4217
4218 if (!del_bulk) {
4219 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
4220 NULL, extack);
4221 } else {
4222 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
4223 fdb_del_bulk_policy, extack);
4224 }
4225 if (err < 0)
4226 return err;
4227
4228 ndm = nlmsg_data(nlh);
4229 if (ndm->ndm_ifindex == 0) {
4230 NL_SET_ERR_MSG(extack, "invalid ifindex");
4231 return -EINVAL;
4232 }
4233
4234 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
4235 if (dev == NULL) {
4236 NL_SET_ERR_MSG(extack, "unknown ifindex");
4237 return -ENODEV;
4238 }
4239
4240 if (!del_bulk) {
4241 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
4242 NL_SET_ERR_MSG(extack, "invalid address");
4243 return -EINVAL;
4244 }
4245 addr = nla_data(tb[NDA_LLADDR]);
4246 }
4247
4248 if (dev->type != ARPHRD_ETHER) {
4249 NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
4250 return -EINVAL;
4251 }
4252
4253 err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
4254 if (err)
4255 return err;
4256
4257 err = -EOPNOTSUPP;
4258
4259
4260 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
4261 netif_is_bridge_port(dev)) {
4262 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4263
4264 ops = br_dev->netdev_ops;
4265 if (!del_bulk) {
4266 if (ops->ndo_fdb_del)
4267 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4268 } else {
4269 if (ops->ndo_fdb_del_bulk)
4270 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4271 extack);
4272 }
4273
4274 if (err)
4275 goto out;
4276 else
4277 ndm->ndm_flags &= ~NTF_MASTER;
4278 }
4279
4280
4281 if (ndm->ndm_flags & NTF_SELF) {
4282 ops = dev->netdev_ops;
4283 if (!del_bulk) {
4284 if (ops->ndo_fdb_del)
4285 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
4286 else
4287 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
4288 } else {
4289
4290 err = -EOPNOTSUPP;
4291 if (ops->ndo_fdb_del_bulk)
4292 err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid,
4293 extack);
4294 }
4295
4296 if (!err) {
4297 if (!del_bulk)
4298 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
4299 ndm->ndm_state);
4300 ndm->ndm_flags &= ~NTF_SELF;
4301 }
4302 }
4303 out:
4304 return err;
4305 }
4306
4307 static int nlmsg_populate_fdb(struct sk_buff *skb,
4308 struct netlink_callback *cb,
4309 struct net_device *dev,
4310 int *idx,
4311 struct netdev_hw_addr_list *list)
4312 {
4313 struct netdev_hw_addr *ha;
4314 int err;
4315 u32 portid, seq;
4316
4317 portid = NETLINK_CB(cb->skb).portid;
4318 seq = cb->nlh->nlmsg_seq;
4319
4320 list_for_each_entry(ha, &list->list, list) {
4321 if (*idx < cb->args[2])
4322 goto skip;
4323
4324 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
4325 portid, seq,
4326 RTM_NEWNEIGH, NTF_SELF,
4327 NLM_F_MULTI, NUD_PERMANENT);
4328 if (err < 0)
4329 return err;
4330 skip:
4331 *idx += 1;
4332 }
4333 return 0;
4334 }
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347 int ndo_dflt_fdb_dump(struct sk_buff *skb,
4348 struct netlink_callback *cb,
4349 struct net_device *dev,
4350 struct net_device *filter_dev,
4351 int *idx)
4352 {
4353 int err;
4354
4355 if (dev->type != ARPHRD_ETHER)
4356 return -EINVAL;
4357
4358 netif_addr_lock_bh(dev);
4359 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
4360 if (err)
4361 goto out;
4362 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
4363 out:
4364 netif_addr_unlock_bh(dev);
4365 return err;
4366 }
4367 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
4368
4369 static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
4370 int *br_idx, int *brport_idx,
4371 struct netlink_ext_ack *extack)
4372 {
4373 struct nlattr *tb[NDA_MAX + 1];
4374 struct ndmsg *ndm;
4375 int err, i;
4376
4377 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4378 NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
4379 return -EINVAL;
4380 }
4381
4382 ndm = nlmsg_data(nlh);
4383 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4384 ndm->ndm_flags || ndm->ndm_type) {
4385 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
4386 return -EINVAL;
4387 }
4388
4389 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4390 NDA_MAX, NULL, extack);
4391 if (err < 0)
4392 return err;
4393
4394 *brport_idx = ndm->ndm_ifindex;
4395 for (i = 0; i <= NDA_MAX; ++i) {
4396 if (!tb[i])
4397 continue;
4398
4399 switch (i) {
4400 case NDA_IFINDEX:
4401 if (nla_len(tb[i]) != sizeof(u32)) {
4402 NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
4403 return -EINVAL;
4404 }
4405 *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
4406 break;
4407 case NDA_MASTER:
4408 if (nla_len(tb[i]) != sizeof(u32)) {
4409 NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
4410 return -EINVAL;
4411 }
4412 *br_idx = nla_get_u32(tb[NDA_MASTER]);
4413 break;
4414 default:
4415 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
4416 return -EINVAL;
4417 }
4418 }
4419
4420 return 0;
4421 }
4422
4423 static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
4424 int *br_idx, int *brport_idx,
4425 struct netlink_ext_ack *extack)
4426 {
4427 struct nlattr *tb[IFLA_MAX+1];
4428 int err;
4429
4430
4431
4432
4433
4434
4435
4436
4437 if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
4438 (nlmsg_len(nlh) != sizeof(struct ndmsg) +
4439 nla_attr_size(sizeof(u32)))) {
4440 struct ifinfomsg *ifm;
4441
4442 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4443 tb, IFLA_MAX, ifla_policy,
4444 extack);
4445 if (err < 0) {
4446 return -EINVAL;
4447 } else if (err == 0) {
4448 if (tb[IFLA_MASTER])
4449 *br_idx = nla_get_u32(tb[IFLA_MASTER]);
4450 }
4451
4452 ifm = nlmsg_data(nlh);
4453 *brport_idx = ifm->ifi_index;
4454 }
4455 return 0;
4456 }
4457
4458 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
4459 {
4460 struct net_device *dev;
4461 struct net_device *br_dev = NULL;
4462 const struct net_device_ops *ops = NULL;
4463 const struct net_device_ops *cops = NULL;
4464 struct net *net = sock_net(skb->sk);
4465 struct hlist_head *head;
4466 int brport_idx = 0;
4467 int br_idx = 0;
4468 int h, s_h;
4469 int idx = 0, s_idx;
4470 int err = 0;
4471 int fidx = 0;
4472
4473 if (cb->strict_check)
4474 err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
4475 cb->extack);
4476 else
4477 err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
4478 cb->extack);
4479 if (err < 0)
4480 return err;
4481
4482 if (br_idx) {
4483 br_dev = __dev_get_by_index(net, br_idx);
4484 if (!br_dev)
4485 return -ENODEV;
4486
4487 ops = br_dev->netdev_ops;
4488 }
4489
4490 s_h = cb->args[0];
4491 s_idx = cb->args[1];
4492
4493 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4494 idx = 0;
4495 head = &net->dev_index_head[h];
4496 hlist_for_each_entry(dev, head, index_hlist) {
4497
4498 if (brport_idx && (dev->ifindex != brport_idx))
4499 continue;
4500
4501 if (!br_idx) {
4502 if (netif_is_bridge_port(dev)) {
4503 br_dev = netdev_master_upper_dev_get(dev);
4504 cops = br_dev->netdev_ops;
4505 }
4506 } else {
4507 if (dev != br_dev &&
4508 !netif_is_bridge_port(dev))
4509 continue;
4510
4511 if (br_dev != netdev_master_upper_dev_get(dev) &&
4512 !netif_is_bridge_master(dev))
4513 continue;
4514 cops = ops;
4515 }
4516
4517 if (idx < s_idx)
4518 goto cont;
4519
4520 if (netif_is_bridge_port(dev)) {
4521 if (cops && cops->ndo_fdb_dump) {
4522 err = cops->ndo_fdb_dump(skb, cb,
4523 br_dev, dev,
4524 &fidx);
4525 if (err == -EMSGSIZE)
4526 goto out;
4527 }
4528 }
4529
4530 if (dev->netdev_ops->ndo_fdb_dump)
4531 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
4532 dev, NULL,
4533 &fidx);
4534 else
4535 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
4536 &fidx);
4537 if (err == -EMSGSIZE)
4538 goto out;
4539
4540 cops = NULL;
4541
4542
4543 cb->args[2] = 0;
4544 fidx = 0;
4545 cont:
4546 idx++;
4547 }
4548 }
4549
4550 out:
4551 cb->args[0] = h;
4552 cb->args[1] = idx;
4553 cb->args[2] = fidx;
4554
4555 return skb->len;
4556 }
4557
4558 static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
4559 struct nlattr **tb, u8 *ndm_flags,
4560 int *br_idx, int *brport_idx, u8 **addr,
4561 u16 *vid, struct netlink_ext_ack *extack)
4562 {
4563 struct ndmsg *ndm;
4564 int err, i;
4565
4566 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
4567 NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
4568 return -EINVAL;
4569 }
4570
4571 ndm = nlmsg_data(nlh);
4572 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
4573 ndm->ndm_type) {
4574 NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
4575 return -EINVAL;
4576 }
4577
4578 if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
4579 NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
4580 return -EINVAL;
4581 }
4582
4583 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
4584 NDA_MAX, nda_policy, extack);
4585 if (err < 0)
4586 return err;
4587
4588 *ndm_flags = ndm->ndm_flags;
4589 *brport_idx = ndm->ndm_ifindex;
4590 for (i = 0; i <= NDA_MAX; ++i) {
4591 if (!tb[i])
4592 continue;
4593
4594 switch (i) {
4595 case NDA_MASTER:
4596 *br_idx = nla_get_u32(tb[i]);
4597 break;
4598 case NDA_LLADDR:
4599 if (nla_len(tb[i]) != ETH_ALEN) {
4600 NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
4601 return -EINVAL;
4602 }
4603 *addr = nla_data(tb[i]);
4604 break;
4605 case NDA_VLAN:
4606 err = fdb_vid_parse(tb[i], vid, extack);
4607 if (err)
4608 return err;
4609 break;
4610 case NDA_VNI:
4611 break;
4612 default:
4613 NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
4614 return -EINVAL;
4615 }
4616 }
4617
4618 return 0;
4619 }
4620
4621 static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4622 struct netlink_ext_ack *extack)
4623 {
4624 struct net_device *dev = NULL, *br_dev = NULL;
4625 const struct net_device_ops *ops = NULL;
4626 struct net *net = sock_net(in_skb->sk);
4627 struct nlattr *tb[NDA_MAX + 1];
4628 struct sk_buff *skb;
4629 int brport_idx = 0;
4630 u8 ndm_flags = 0;
4631 int br_idx = 0;
4632 u8 *addr = NULL;
4633 u16 vid = 0;
4634 int err;
4635
4636 err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
4637 &brport_idx, &addr, &vid, extack);
4638 if (err < 0)
4639 return err;
4640
4641 if (!addr) {
4642 NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
4643 return -EINVAL;
4644 }
4645
4646 if (brport_idx) {
4647 dev = __dev_get_by_index(net, brport_idx);
4648 if (!dev) {
4649 NL_SET_ERR_MSG(extack, "Unknown device ifindex");
4650 return -ENODEV;
4651 }
4652 }
4653
4654 if (br_idx) {
4655 if (dev) {
4656 NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
4657 return -EINVAL;
4658 }
4659
4660 br_dev = __dev_get_by_index(net, br_idx);
4661 if (!br_dev) {
4662 NL_SET_ERR_MSG(extack, "Invalid master ifindex");
4663 return -EINVAL;
4664 }
4665 ops = br_dev->netdev_ops;
4666 }
4667
4668 if (dev) {
4669 if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
4670 if (!netif_is_bridge_port(dev)) {
4671 NL_SET_ERR_MSG(extack, "Device is not a bridge port");
4672 return -EINVAL;
4673 }
4674 br_dev = netdev_master_upper_dev_get(dev);
4675 if (!br_dev) {
4676 NL_SET_ERR_MSG(extack, "Master of device not found");
4677 return -EINVAL;
4678 }
4679 ops = br_dev->netdev_ops;
4680 } else {
4681 if (!(ndm_flags & NTF_SELF)) {
4682 NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
4683 return -EINVAL;
4684 }
4685 ops = dev->netdev_ops;
4686 }
4687 }
4688
4689 if (!br_dev && !dev) {
4690 NL_SET_ERR_MSG(extack, "No device specified");
4691 return -ENODEV;
4692 }
4693
4694 if (!ops || !ops->ndo_fdb_get) {
4695 NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
4696 return -EOPNOTSUPP;
4697 }
4698
4699 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4700 if (!skb)
4701 return -ENOBUFS;
4702
4703 if (br_dev)
4704 dev = br_dev;
4705 err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
4706 NETLINK_CB(in_skb).portid,
4707 nlh->nlmsg_seq, extack);
4708 if (err)
4709 goto out;
4710
4711 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4712 out:
4713 kfree_skb(skb);
4714 return err;
4715 }
4716
4717 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
4718 unsigned int attrnum, unsigned int flag)
4719 {
4720 if (mask & flag)
4721 return nla_put_u8(skb, attrnum, !!(flags & flag));
4722 return 0;
4723 }
4724
4725 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4726 struct net_device *dev, u16 mode,
4727 u32 flags, u32 mask, int nlflags,
4728 u32 filter_mask,
4729 int (*vlan_fill)(struct sk_buff *skb,
4730 struct net_device *dev,
4731 u32 filter_mask))
4732 {
4733 struct nlmsghdr *nlh;
4734 struct ifinfomsg *ifm;
4735 struct nlattr *br_afspec;
4736 struct nlattr *protinfo;
4737 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
4738 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4739 int err = 0;
4740
4741 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
4742 if (nlh == NULL)
4743 return -EMSGSIZE;
4744
4745 ifm = nlmsg_data(nlh);
4746 ifm->ifi_family = AF_BRIDGE;
4747 ifm->__ifi_pad = 0;
4748 ifm->ifi_type = dev->type;
4749 ifm->ifi_index = dev->ifindex;
4750 ifm->ifi_flags = dev_get_flags(dev);
4751 ifm->ifi_change = 0;
4752
4753
4754 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4755 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4756 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
4757 (br_dev &&
4758 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
4759 (dev->addr_len &&
4760 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4761 (dev->ifindex != dev_get_iflink(dev) &&
4762 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4763 goto nla_put_failure;
4764
4765 br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
4766 if (!br_afspec)
4767 goto nla_put_failure;
4768
4769 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
4770 nla_nest_cancel(skb, br_afspec);
4771 goto nla_put_failure;
4772 }
4773
4774 if (mode != BRIDGE_MODE_UNDEF) {
4775 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
4776 nla_nest_cancel(skb, br_afspec);
4777 goto nla_put_failure;
4778 }
4779 }
4780 if (vlan_fill) {
4781 err = vlan_fill(skb, dev, filter_mask);
4782 if (err) {
4783 nla_nest_cancel(skb, br_afspec);
4784 goto nla_put_failure;
4785 }
4786 }
4787 nla_nest_end(skb, br_afspec);
4788
4789 protinfo = nla_nest_start(skb, IFLA_PROTINFO);
4790 if (!protinfo)
4791 goto nla_put_failure;
4792
4793 if (brport_nla_put_flag(skb, flags, mask,
4794 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
4795 brport_nla_put_flag(skb, flags, mask,
4796 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
4797 brport_nla_put_flag(skb, flags, mask,
4798 IFLA_BRPORT_FAST_LEAVE,
4799 BR_MULTICAST_FAST_LEAVE) ||
4800 brport_nla_put_flag(skb, flags, mask,
4801 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
4802 brport_nla_put_flag(skb, flags, mask,
4803 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
4804 brport_nla_put_flag(skb, flags, mask,
4805 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
4806 brport_nla_put_flag(skb, flags, mask,
4807 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
4808 brport_nla_put_flag(skb, flags, mask,
4809 IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
4810 brport_nla_put_flag(skb, flags, mask,
4811 IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
4812 brport_nla_put_flag(skb, flags, mask,
4813 IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
4814 nla_nest_cancel(skb, protinfo);
4815 goto nla_put_failure;
4816 }
4817
4818 nla_nest_end(skb, protinfo);
4819
4820 nlmsg_end(skb, nlh);
4821 return 0;
4822 nla_put_failure:
4823 nlmsg_cancel(skb, nlh);
4824 return err ? err : -EMSGSIZE;
4825 }
4826 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
4827
4828 static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
4829 bool strict_check, u32 *filter_mask,
4830 struct netlink_ext_ack *extack)
4831 {
4832 struct nlattr *tb[IFLA_MAX+1];
4833 int err, i;
4834
4835 if (strict_check) {
4836 struct ifinfomsg *ifm;
4837
4838 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
4839 NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
4840 return -EINVAL;
4841 }
4842
4843 ifm = nlmsg_data(nlh);
4844 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
4845 ifm->ifi_change || ifm->ifi_index) {
4846 NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
4847 return -EINVAL;
4848 }
4849
4850 err = nlmsg_parse_deprecated_strict(nlh,
4851 sizeof(struct ifinfomsg),
4852 tb, IFLA_MAX, ifla_policy,
4853 extack);
4854 } else {
4855 err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
4856 tb, IFLA_MAX, ifla_policy,
4857 extack);
4858 }
4859 if (err < 0)
4860 return err;
4861
4862
4863 for (i = 0; i <= IFLA_MAX; ++i) {
4864 if (!tb[i])
4865 continue;
4866
4867 switch (i) {
4868 case IFLA_EXT_MASK:
4869 *filter_mask = nla_get_u32(tb[i]);
4870 break;
4871 default:
4872 if (strict_check) {
4873 NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
4874 return -EINVAL;
4875 }
4876 }
4877 }
4878
4879 return 0;
4880 }
4881
4882 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
4883 {
4884 const struct nlmsghdr *nlh = cb->nlh;
4885 struct net *net = sock_net(skb->sk);
4886 struct net_device *dev;
4887 int idx = 0;
4888 u32 portid = NETLINK_CB(cb->skb).portid;
4889 u32 seq = nlh->nlmsg_seq;
4890 u32 filter_mask = 0;
4891 int err;
4892
4893 err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
4894 cb->extack);
4895 if (err < 0 && cb->strict_check)
4896 return err;
4897
4898 rcu_read_lock();
4899 for_each_netdev_rcu(net, dev) {
4900 const struct net_device_ops *ops = dev->netdev_ops;
4901 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
4902
4903 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
4904 if (idx >= cb->args[0]) {
4905 err = br_dev->netdev_ops->ndo_bridge_getlink(
4906 skb, portid, seq, dev,
4907 filter_mask, NLM_F_MULTI);
4908 if (err < 0 && err != -EOPNOTSUPP) {
4909 if (likely(skb->len))
4910 break;
4911
4912 goto out_err;
4913 }
4914 }
4915 idx++;
4916 }
4917
4918 if (ops->ndo_bridge_getlink) {
4919 if (idx >= cb->args[0]) {
4920 err = ops->ndo_bridge_getlink(skb, portid,
4921 seq, dev,
4922 filter_mask,
4923 NLM_F_MULTI);
4924 if (err < 0 && err != -EOPNOTSUPP) {
4925 if (likely(skb->len))
4926 break;
4927
4928 goto out_err;
4929 }
4930 }
4931 idx++;
4932 }
4933 }
4934 err = skb->len;
4935 out_err:
4936 rcu_read_unlock();
4937 cb->args[0] = idx;
4938
4939 return err;
4940 }
4941
4942 static inline size_t bridge_nlmsg_size(void)
4943 {
4944 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4945 + nla_total_size(IFNAMSIZ)
4946 + nla_total_size(MAX_ADDR_LEN)
4947 + nla_total_size(sizeof(u32))
4948 + nla_total_size(sizeof(u32))
4949 + nla_total_size(sizeof(u32))
4950 + nla_total_size(sizeof(u32))
4951 + nla_total_size(sizeof(u8))
4952 + nla_total_size(sizeof(struct nlattr))
4953 + nla_total_size(sizeof(u16))
4954 + nla_total_size(sizeof(u16));
4955 }
4956
4957 static int rtnl_bridge_notify(struct net_device *dev)
4958 {
4959 struct net *net = dev_net(dev);
4960 struct sk_buff *skb;
4961 int err = -EOPNOTSUPP;
4962
4963 if (!dev->netdev_ops->ndo_bridge_getlink)
4964 return 0;
4965
4966 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
4967 if (!skb) {
4968 err = -ENOMEM;
4969 goto errout;
4970 }
4971
4972 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
4973 if (err < 0)
4974 goto errout;
4975
4976
4977
4978
4979
4980 if (!skb->len)
4981 goto errout;
4982
4983 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
4984 return 0;
4985 errout:
4986 WARN_ON(err == -EMSGSIZE);
4987 kfree_skb(skb);
4988 if (err)
4989 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
4990 return err;
4991 }
4992
4993 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
4994 struct netlink_ext_ack *extack)
4995 {
4996 struct net *net = sock_net(skb->sk);
4997 struct ifinfomsg *ifm;
4998 struct net_device *dev;
4999 struct nlattr *br_spec, *attr = NULL;
5000 int rem, err = -EOPNOTSUPP;
5001 u16 flags = 0;
5002 bool have_flags = false;
5003
5004 if (nlmsg_len(nlh) < sizeof(*ifm))
5005 return -EINVAL;
5006
5007 ifm = nlmsg_data(nlh);
5008 if (ifm->ifi_family != AF_BRIDGE)
5009 return -EPFNOSUPPORT;
5010
5011 dev = __dev_get_by_index(net, ifm->ifi_index);
5012 if (!dev) {
5013 NL_SET_ERR_MSG(extack, "unknown ifindex");
5014 return -ENODEV;
5015 }
5016
5017 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5018 if (br_spec) {
5019 nla_for_each_nested(attr, br_spec, rem) {
5020 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5021 if (nla_len(attr) < sizeof(flags))
5022 return -EINVAL;
5023
5024 have_flags = true;
5025 flags = nla_get_u16(attr);
5026 break;
5027 }
5028 }
5029 }
5030
5031 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5032 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5033
5034 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
5035 err = -EOPNOTSUPP;
5036 goto out;
5037 }
5038
5039 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
5040 extack);
5041 if (err)
5042 goto out;
5043
5044 flags &= ~BRIDGE_FLAGS_MASTER;
5045 }
5046
5047 if ((flags & BRIDGE_FLAGS_SELF)) {
5048 if (!dev->netdev_ops->ndo_bridge_setlink)
5049 err = -EOPNOTSUPP;
5050 else
5051 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
5052 flags,
5053 extack);
5054 if (!err) {
5055 flags &= ~BRIDGE_FLAGS_SELF;
5056
5057
5058
5059
5060 err = rtnl_bridge_notify(dev);
5061 }
5062 }
5063
5064 if (have_flags)
5065 memcpy(nla_data(attr), &flags, sizeof(flags));
5066 out:
5067 return err;
5068 }
5069
5070 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
5071 struct netlink_ext_ack *extack)
5072 {
5073 struct net *net = sock_net(skb->sk);
5074 struct ifinfomsg *ifm;
5075 struct net_device *dev;
5076 struct nlattr *br_spec, *attr = NULL;
5077 int rem, err = -EOPNOTSUPP;
5078 u16 flags = 0;
5079 bool have_flags = false;
5080
5081 if (nlmsg_len(nlh) < sizeof(*ifm))
5082 return -EINVAL;
5083
5084 ifm = nlmsg_data(nlh);
5085 if (ifm->ifi_family != AF_BRIDGE)
5086 return -EPFNOSUPPORT;
5087
5088 dev = __dev_get_by_index(net, ifm->ifi_index);
5089 if (!dev) {
5090 NL_SET_ERR_MSG(extack, "unknown ifindex");
5091 return -ENODEV;
5092 }
5093
5094 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5095 if (br_spec) {
5096 nla_for_each_nested(attr, br_spec, rem) {
5097 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
5098 if (nla_len(attr) < sizeof(flags))
5099 return -EINVAL;
5100
5101 have_flags = true;
5102 flags = nla_get_u16(attr);
5103 break;
5104 }
5105 }
5106 }
5107
5108 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
5109 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
5110
5111 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
5112 err = -EOPNOTSUPP;
5113 goto out;
5114 }
5115
5116 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
5117 if (err)
5118 goto out;
5119
5120 flags &= ~BRIDGE_FLAGS_MASTER;
5121 }
5122
5123 if ((flags & BRIDGE_FLAGS_SELF)) {
5124 if (!dev->netdev_ops->ndo_bridge_dellink)
5125 err = -EOPNOTSUPP;
5126 else
5127 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
5128 flags);
5129
5130 if (!err) {
5131 flags &= ~BRIDGE_FLAGS_SELF;
5132
5133
5134
5135
5136 err = rtnl_bridge_notify(dev);
5137 }
5138 }
5139
5140 if (have_flags)
5141 memcpy(nla_data(attr), &flags, sizeof(flags));
5142 out:
5143 return err;
5144 }
5145
5146 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
5147 {
5148 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
5149 (!idxattr || idxattr == attrid);
5150 }
5151
5152 static bool
5153 rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
5154 {
5155 return dev->netdev_ops &&
5156 dev->netdev_ops->ndo_has_offload_stats &&
5157 dev->netdev_ops->ndo_get_offload_stats &&
5158 dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
5159 }
5160
5161 static unsigned int
5162 rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
5163 {
5164 return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
5165 sizeof(struct rtnl_link_stats64) : 0;
5166 }
5167
5168 static int
5169 rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
5170 struct sk_buff *skb)
5171 {
5172 unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
5173 struct nlattr *attr = NULL;
5174 void *attr_data;
5175 int err;
5176
5177 if (!size)
5178 return -ENODATA;
5179
5180 attr = nla_reserve_64bit(skb, attr_id, size,
5181 IFLA_OFFLOAD_XSTATS_UNSPEC);
5182 if (!attr)
5183 return -EMSGSIZE;
5184
5185 attr_data = nla_data(attr);
5186 memset(attr_data, 0, size);
5187
5188 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
5189 if (err)
5190 return err;
5191
5192 return 0;
5193 }
5194
5195 static unsigned int
5196 rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
5197 enum netdev_offload_xstats_type type)
5198 {
5199 bool enabled = netdev_offload_xstats_enabled(dev, type);
5200
5201 return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
5202 }
5203
5204 struct rtnl_offload_xstats_request_used {
5205 bool request;
5206 bool used;
5207 };
5208
5209 static int
5210 rtnl_offload_xstats_get_stats(struct net_device *dev,
5211 enum netdev_offload_xstats_type type,
5212 struct rtnl_offload_xstats_request_used *ru,
5213 struct rtnl_hw_stats64 *stats,
5214 struct netlink_ext_ack *extack)
5215 {
5216 bool request;
5217 bool used;
5218 int err;
5219
5220 request = netdev_offload_xstats_enabled(dev, type);
5221 if (!request) {
5222 used = false;
5223 goto out;
5224 }
5225
5226 err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
5227 if (err)
5228 return err;
5229
5230 out:
5231 if (ru) {
5232 ru->request = request;
5233 ru->used = used;
5234 }
5235 return 0;
5236 }
5237
5238 static int
5239 rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
5240 struct rtnl_offload_xstats_request_used *ru)
5241 {
5242 struct nlattr *nest;
5243
5244 nest = nla_nest_start(skb, attr_id);
5245 if (!nest)
5246 return -EMSGSIZE;
5247
5248 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
5249 goto nla_put_failure;
5250
5251 if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
5252 goto nla_put_failure;
5253
5254 nla_nest_end(skb, nest);
5255 return 0;
5256
5257 nla_put_failure:
5258 nla_nest_cancel(skb, nest);
5259 return -EMSGSIZE;
5260 }
5261
5262 static int
5263 rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
5264 struct netlink_ext_ack *extack)
5265 {
5266 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5267 struct rtnl_offload_xstats_request_used ru_l3;
5268 struct nlattr *nest;
5269 int err;
5270
5271 err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
5272 if (err)
5273 return err;
5274
5275 nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5276 if (!nest)
5277 return -EMSGSIZE;
5278
5279 if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
5280 IFLA_OFFLOAD_XSTATS_L3_STATS,
5281 &ru_l3))
5282 goto nla_put_failure;
5283
5284 nla_nest_end(skb, nest);
5285 return 0;
5286
5287 nla_put_failure:
5288 nla_nest_cancel(skb, nest);
5289 return -EMSGSIZE;
5290 }
5291
5292 static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
5293 int *prividx, u32 off_filter_mask,
5294 struct netlink_ext_ack *extack)
5295 {
5296 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5297 int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
5298 int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
5299 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5300 bool have_data = false;
5301 int err;
5302
5303 if (*prividx <= attr_id_cpu_hit &&
5304 (off_filter_mask &
5305 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
5306 err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
5307 if (!err) {
5308 have_data = true;
5309 } else if (err != -ENODATA) {
5310 *prividx = attr_id_cpu_hit;
5311 return err;
5312 }
5313 }
5314
5315 if (*prividx <= attr_id_hw_s_info &&
5316 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
5317 *prividx = attr_id_hw_s_info;
5318
5319 err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
5320 if (err)
5321 return err;
5322
5323 have_data = true;
5324 *prividx = 0;
5325 }
5326
5327 if (*prividx <= attr_id_l3_stats &&
5328 (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
5329 unsigned int size_l3;
5330 struct nlattr *attr;
5331
5332 *prividx = attr_id_l3_stats;
5333
5334 size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5335 if (!size_l3)
5336 goto skip_l3_stats;
5337 attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
5338 IFLA_OFFLOAD_XSTATS_UNSPEC);
5339 if (!attr)
5340 return -EMSGSIZE;
5341
5342 err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
5343 nla_data(attr), extack);
5344 if (err)
5345 return err;
5346
5347 have_data = true;
5348 skip_l3_stats:
5349 *prividx = 0;
5350 }
5351
5352 if (!have_data)
5353 return -ENODATA;
5354
5355 *prividx = 0;
5356 return 0;
5357 }
5358
5359 static unsigned int
5360 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
5361 enum netdev_offload_xstats_type type)
5362 {
5363 bool enabled = netdev_offload_xstats_enabled(dev, type);
5364
5365 return nla_total_size(0) +
5366
5367 nla_total_size(sizeof(u8)) +
5368
5369 (enabled ? nla_total_size(sizeof(u8)) : 0) +
5370 0;
5371 }
5372
5373 static unsigned int
5374 rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
5375 {
5376 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5377
5378 return nla_total_size(0) +
5379
5380 rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
5381 0;
5382 }
5383
5384 static int rtnl_offload_xstats_get_size(const struct net_device *dev,
5385 u32 off_filter_mask)
5386 {
5387 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5388 int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
5389 int nla_size = 0;
5390 int size;
5391
5392 if (off_filter_mask &
5393 IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
5394 size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
5395 nla_size += nla_total_size_64bit(size);
5396 }
5397
5398 if (off_filter_mask &
5399 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
5400 nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
5401
5402 if (off_filter_mask &
5403 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
5404 size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
5405 nla_size += nla_total_size_64bit(size);
5406 }
5407
5408 if (nla_size != 0)
5409 nla_size += nla_total_size(0);
5410
5411 return nla_size;
5412 }
5413
5414 struct rtnl_stats_dump_filters {
5415
5416
5417
5418 u32 mask[IFLA_STATS_MAX + 1];
5419 };
5420
5421 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
5422 int type, u32 pid, u32 seq, u32 change,
5423 unsigned int flags,
5424 const struct rtnl_stats_dump_filters *filters,
5425 int *idxattr, int *prividx,
5426 struct netlink_ext_ack *extack)
5427 {
5428 unsigned int filter_mask = filters->mask[0];
5429 struct if_stats_msg *ifsm;
5430 struct nlmsghdr *nlh;
5431 struct nlattr *attr;
5432 int s_prividx = *prividx;
5433 int err;
5434
5435 ASSERT_RTNL();
5436
5437 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
5438 if (!nlh)
5439 return -EMSGSIZE;
5440
5441 ifsm = nlmsg_data(nlh);
5442 ifsm->family = PF_UNSPEC;
5443 ifsm->pad1 = 0;
5444 ifsm->pad2 = 0;
5445 ifsm->ifindex = dev->ifindex;
5446 ifsm->filter_mask = filter_mask;
5447
5448 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
5449 struct rtnl_link_stats64 *sp;
5450
5451 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
5452 sizeof(struct rtnl_link_stats64),
5453 IFLA_STATS_UNSPEC);
5454 if (!attr) {
5455 err = -EMSGSIZE;
5456 goto nla_put_failure;
5457 }
5458
5459 sp = nla_data(attr);
5460 dev_get_stats(dev, sp);
5461 }
5462
5463 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
5464 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5465
5466 if (ops && ops->fill_linkxstats) {
5467 *idxattr = IFLA_STATS_LINK_XSTATS;
5468 attr = nla_nest_start_noflag(skb,
5469 IFLA_STATS_LINK_XSTATS);
5470 if (!attr) {
5471 err = -EMSGSIZE;
5472 goto nla_put_failure;
5473 }
5474
5475 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5476 nla_nest_end(skb, attr);
5477 if (err)
5478 goto nla_put_failure;
5479 *idxattr = 0;
5480 }
5481 }
5482
5483 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
5484 *idxattr)) {
5485 const struct rtnl_link_ops *ops = NULL;
5486 const struct net_device *master;
5487
5488 master = netdev_master_upper_dev_get(dev);
5489 if (master)
5490 ops = master->rtnl_link_ops;
5491 if (ops && ops->fill_linkxstats) {
5492 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
5493 attr = nla_nest_start_noflag(skb,
5494 IFLA_STATS_LINK_XSTATS_SLAVE);
5495 if (!attr) {
5496 err = -EMSGSIZE;
5497 goto nla_put_failure;
5498 }
5499
5500 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
5501 nla_nest_end(skb, attr);
5502 if (err)
5503 goto nla_put_failure;
5504 *idxattr = 0;
5505 }
5506 }
5507
5508 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
5509 *idxattr)) {
5510 u32 off_filter_mask;
5511
5512 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5513 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
5514 attr = nla_nest_start_noflag(skb,
5515 IFLA_STATS_LINK_OFFLOAD_XSTATS);
5516 if (!attr) {
5517 err = -EMSGSIZE;
5518 goto nla_put_failure;
5519 }
5520
5521 err = rtnl_offload_xstats_fill(skb, dev, prividx,
5522 off_filter_mask, extack);
5523 if (err == -ENODATA)
5524 nla_nest_cancel(skb, attr);
5525 else
5526 nla_nest_end(skb, attr);
5527
5528 if (err && err != -ENODATA)
5529 goto nla_put_failure;
5530 *idxattr = 0;
5531 }
5532
5533 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
5534 struct rtnl_af_ops *af_ops;
5535
5536 *idxattr = IFLA_STATS_AF_SPEC;
5537 attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
5538 if (!attr) {
5539 err = -EMSGSIZE;
5540 goto nla_put_failure;
5541 }
5542
5543 rcu_read_lock();
5544 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5545 if (af_ops->fill_stats_af) {
5546 struct nlattr *af;
5547
5548 af = nla_nest_start_noflag(skb,
5549 af_ops->family);
5550 if (!af) {
5551 rcu_read_unlock();
5552 err = -EMSGSIZE;
5553 goto nla_put_failure;
5554 }
5555 err = af_ops->fill_stats_af(skb, dev);
5556
5557 if (err == -ENODATA) {
5558 nla_nest_cancel(skb, af);
5559 } else if (err < 0) {
5560 rcu_read_unlock();
5561 goto nla_put_failure;
5562 }
5563
5564 nla_nest_end(skb, af);
5565 }
5566 }
5567 rcu_read_unlock();
5568
5569 nla_nest_end(skb, attr);
5570
5571 *idxattr = 0;
5572 }
5573
5574 nlmsg_end(skb, nlh);
5575
5576 return 0;
5577
5578 nla_put_failure:
5579
5580 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
5581 nlmsg_cancel(skb, nlh);
5582 else
5583 nlmsg_end(skb, nlh);
5584
5585 return err;
5586 }
5587
5588 static size_t if_nlmsg_stats_size(const struct net_device *dev,
5589 const struct rtnl_stats_dump_filters *filters)
5590 {
5591 size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
5592 unsigned int filter_mask = filters->mask[0];
5593
5594 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
5595 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
5596
5597 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
5598 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
5599 int attr = IFLA_STATS_LINK_XSTATS;
5600
5601 if (ops && ops->get_linkxstats_size) {
5602 size += nla_total_size(ops->get_linkxstats_size(dev,
5603 attr));
5604
5605 size += nla_total_size(0);
5606 }
5607 }
5608
5609 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
5610 struct net_device *_dev = (struct net_device *)dev;
5611 const struct rtnl_link_ops *ops = NULL;
5612 const struct net_device *master;
5613
5614
5615 master = netdev_master_upper_dev_get(_dev);
5616 if (master)
5617 ops = master->rtnl_link_ops;
5618 if (ops && ops->get_linkxstats_size) {
5619 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
5620
5621 size += nla_total_size(ops->get_linkxstats_size(dev,
5622 attr));
5623
5624 size += nla_total_size(0);
5625 }
5626 }
5627
5628 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
5629 u32 off_filter_mask;
5630
5631 off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
5632 size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
5633 }
5634
5635 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
5636 struct rtnl_af_ops *af_ops;
5637
5638
5639 size += nla_total_size(0);
5640
5641 rcu_read_lock();
5642 list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
5643 if (af_ops->get_stats_af_size) {
5644 size += nla_total_size(
5645 af_ops->get_stats_af_size(dev));
5646
5647
5648 size += nla_total_size(0);
5649 }
5650 }
5651 rcu_read_unlock();
5652 }
5653
5654 return size;
5655 }
5656
5657 #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
5658
5659 static const struct nla_policy
5660 rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
5661 [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
5662 NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
5663 };
5664
5665 static const struct nla_policy
5666 rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
5667 [IFLA_STATS_GET_FILTERS] =
5668 NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
5669 };
5670
5671 static const struct nla_policy
5672 ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
5673 [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
5674 };
5675
5676 static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
5677 struct rtnl_stats_dump_filters *filters,
5678 struct netlink_ext_ack *extack)
5679 {
5680 struct nlattr *tb[IFLA_STATS_MAX + 1];
5681 int err;
5682 int at;
5683
5684 err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
5685 rtnl_stats_get_policy_filters, extack);
5686 if (err < 0)
5687 return err;
5688
5689 for (at = 1; at <= IFLA_STATS_MAX; at++) {
5690 if (tb[at]) {
5691 if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
5692 NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
5693 return -EINVAL;
5694 }
5695 filters->mask[at] = nla_get_u32(tb[at]);
5696 }
5697 }
5698
5699 return 0;
5700 }
5701
5702 static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
5703 u32 filter_mask,
5704 struct rtnl_stats_dump_filters *filters,
5705 struct netlink_ext_ack *extack)
5706 {
5707 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5708 int err;
5709 int i;
5710
5711 filters->mask[0] = filter_mask;
5712 for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
5713 filters->mask[i] = -1U;
5714
5715 err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
5716 IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
5717 if (err < 0)
5718 return err;
5719
5720 if (tb[IFLA_STATS_GET_FILTERS]) {
5721 err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
5722 filters, extack);
5723 if (err)
5724 return err;
5725 }
5726
5727 return 0;
5728 }
5729
5730 static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
5731 bool is_dump, struct netlink_ext_ack *extack)
5732 {
5733 struct if_stats_msg *ifsm;
5734
5735 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
5736 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
5737 return -EINVAL;
5738 }
5739
5740 if (!strict_check)
5741 return 0;
5742
5743 ifsm = nlmsg_data(nlh);
5744
5745
5746
5747
5748 if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
5749 NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
5750 return -EINVAL;
5751 }
5752 if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
5753 NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
5754 return -EINVAL;
5755 }
5756
5757 return 0;
5758 }
5759
5760 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
5761 struct netlink_ext_ack *extack)
5762 {
5763 struct rtnl_stats_dump_filters filters;
5764 struct net *net = sock_net(skb->sk);
5765 struct net_device *dev = NULL;
5766 int idxattr = 0, prividx = 0;
5767 struct if_stats_msg *ifsm;
5768 struct sk_buff *nskb;
5769 int err;
5770
5771 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5772 false, extack);
5773 if (err)
5774 return err;
5775
5776 ifsm = nlmsg_data(nlh);
5777 if (ifsm->ifindex > 0)
5778 dev = __dev_get_by_index(net, ifsm->ifindex);
5779 else
5780 return -EINVAL;
5781
5782 if (!dev)
5783 return -ENODEV;
5784
5785 if (!ifsm->filter_mask) {
5786 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
5787 return -EINVAL;
5788 }
5789
5790 err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
5791 if (err)
5792 return err;
5793
5794 nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
5795 if (!nskb)
5796 return -ENOBUFS;
5797
5798 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
5799 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
5800 0, &filters, &idxattr, &prividx, extack);
5801 if (err < 0) {
5802
5803 WARN_ON(err == -EMSGSIZE);
5804 kfree_skb(nskb);
5805 } else {
5806 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
5807 }
5808
5809 return err;
5810 }
5811
5812 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
5813 {
5814 struct netlink_ext_ack *extack = cb->extack;
5815 int h, s_h, err, s_idx, s_idxattr, s_prividx;
5816 struct rtnl_stats_dump_filters filters;
5817 struct net *net = sock_net(skb->sk);
5818 unsigned int flags = NLM_F_MULTI;
5819 struct if_stats_msg *ifsm;
5820 struct hlist_head *head;
5821 struct net_device *dev;
5822 int idx = 0;
5823
5824 s_h = cb->args[0];
5825 s_idx = cb->args[1];
5826 s_idxattr = cb->args[2];
5827 s_prividx = cb->args[3];
5828
5829 cb->seq = net->dev_base_seq;
5830
5831 err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
5832 if (err)
5833 return err;
5834
5835 ifsm = nlmsg_data(cb->nlh);
5836 if (!ifsm->filter_mask) {
5837 NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
5838 return -EINVAL;
5839 }
5840
5841 err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
5842 extack);
5843 if (err)
5844 return err;
5845
5846 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5847 idx = 0;
5848 head = &net->dev_index_head[h];
5849 hlist_for_each_entry(dev, head, index_hlist) {
5850 if (idx < s_idx)
5851 goto cont;
5852 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
5853 NETLINK_CB(cb->skb).portid,
5854 cb->nlh->nlmsg_seq, 0,
5855 flags, &filters,
5856 &s_idxattr, &s_prividx,
5857 extack);
5858
5859
5860
5861 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
5862
5863 if (err < 0)
5864 goto out;
5865 s_prividx = 0;
5866 s_idxattr = 0;
5867 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5868 cont:
5869 idx++;
5870 }
5871 }
5872 out:
5873 cb->args[3] = s_prividx;
5874 cb->args[2] = s_idxattr;
5875 cb->args[1] = idx;
5876 cb->args[0] = h;
5877
5878 return skb->len;
5879 }
5880
5881 void rtnl_offload_xstats_notify(struct net_device *dev)
5882 {
5883 struct rtnl_stats_dump_filters response_filters = {};
5884 struct net *net = dev_net(dev);
5885 int idxattr = 0, prividx = 0;
5886 struct sk_buff *skb;
5887 int err = -ENOBUFS;
5888
5889 ASSERT_RTNL();
5890
5891 response_filters.mask[0] |=
5892 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5893 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5894 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5895
5896 skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
5897 GFP_KERNEL);
5898 if (!skb)
5899 goto errout;
5900
5901 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
5902 &response_filters, &idxattr, &prividx, NULL);
5903 if (err < 0) {
5904 kfree_skb(skb);
5905 goto errout;
5906 }
5907
5908 rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
5909 return;
5910
5911 errout:
5912 rtnl_set_sk_err(net, RTNLGRP_STATS, err);
5913 }
5914 EXPORT_SYMBOL(rtnl_offload_xstats_notify);
5915
5916 static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
5917 struct netlink_ext_ack *extack)
5918 {
5919 enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
5920 struct rtnl_stats_dump_filters response_filters = {};
5921 struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
5922 struct net *net = sock_net(skb->sk);
5923 struct net_device *dev = NULL;
5924 struct if_stats_msg *ifsm;
5925 bool notify = false;
5926 int err;
5927
5928 err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
5929 false, extack);
5930 if (err)
5931 return err;
5932
5933 ifsm = nlmsg_data(nlh);
5934 if (ifsm->family != AF_UNSPEC) {
5935 NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
5936 return -EINVAL;
5937 }
5938
5939 if (ifsm->ifindex > 0)
5940 dev = __dev_get_by_index(net, ifsm->ifindex);
5941 else
5942 return -EINVAL;
5943
5944 if (!dev)
5945 return -ENODEV;
5946
5947 if (ifsm->filter_mask) {
5948 NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
5949 return -EINVAL;
5950 }
5951
5952 err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
5953 ifla_stats_set_policy, extack);
5954 if (err < 0)
5955 return err;
5956
5957 if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
5958 u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
5959
5960 if (req)
5961 err = netdev_offload_xstats_enable(dev, t_l3, extack);
5962 else
5963 err = netdev_offload_xstats_disable(dev, t_l3);
5964
5965 if (!err)
5966 notify = true;
5967 else if (err != -EALREADY)
5968 return err;
5969
5970 response_filters.mask[0] |=
5971 IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
5972 response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
5973 IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
5974 }
5975
5976 if (notify)
5977 rtnl_offload_xstats_notify(dev);
5978
5979 return 0;
5980 }
5981
5982
5983
5984 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
5985 struct netlink_ext_ack *extack)
5986 {
5987 struct net *net = sock_net(skb->sk);
5988 struct rtnl_link *link;
5989 enum rtnl_kinds kind;
5990 struct module *owner;
5991 int err = -EOPNOTSUPP;
5992 rtnl_doit_func doit;
5993 unsigned int flags;
5994 int family;
5995 int type;
5996
5997 type = nlh->nlmsg_type;
5998 if (type > RTM_MAX)
5999 return -EOPNOTSUPP;
6000
6001 type -= RTM_BASE;
6002
6003
6004 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
6005 return 0;
6006
6007 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
6008 kind = rtnl_msgtype_kind(type);
6009
6010 if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
6011 return -EPERM;
6012
6013 rcu_read_lock();
6014 if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
6015 struct sock *rtnl;
6016 rtnl_dumpit_func dumpit;
6017 u32 min_dump_alloc = 0;
6018
6019 link = rtnl_get_link(family, type);
6020 if (!link || !link->dumpit) {
6021 family = PF_UNSPEC;
6022 link = rtnl_get_link(family, type);
6023 if (!link || !link->dumpit)
6024 goto err_unlock;
6025 }
6026 owner = link->owner;
6027 dumpit = link->dumpit;
6028
6029 if (type == RTM_GETLINK - RTM_BASE)
6030 min_dump_alloc = rtnl_calcit(skb, nlh);
6031
6032 err = 0;
6033
6034 if (!try_module_get(owner))
6035 err = -EPROTONOSUPPORT;
6036
6037 rcu_read_unlock();
6038
6039 rtnl = net->rtnl;
6040 if (err == 0) {
6041 struct netlink_dump_control c = {
6042 .dump = dumpit,
6043 .min_dump_alloc = min_dump_alloc,
6044 .module = owner,
6045 };
6046 err = netlink_dump_start(rtnl, skb, nlh, &c);
6047
6048
6049
6050 module_put(owner);
6051 }
6052 return err;
6053 }
6054
6055 link = rtnl_get_link(family, type);
6056 if (!link || !link->doit) {
6057 family = PF_UNSPEC;
6058 link = rtnl_get_link(PF_UNSPEC, type);
6059 if (!link || !link->doit)
6060 goto out_unlock;
6061 }
6062
6063 owner = link->owner;
6064 if (!try_module_get(owner)) {
6065 err = -EPROTONOSUPPORT;
6066 goto out_unlock;
6067 }
6068
6069 flags = link->flags;
6070 if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
6071 !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
6072 NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
6073 module_put(owner);
6074 goto err_unlock;
6075 }
6076
6077 if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
6078 doit = link->doit;
6079 rcu_read_unlock();
6080 if (doit)
6081 err = doit(skb, nlh, extack);
6082 module_put(owner);
6083 return err;
6084 }
6085 rcu_read_unlock();
6086
6087 rtnl_lock();
6088 link = rtnl_get_link(family, type);
6089 if (link && link->doit)
6090 err = link->doit(skb, nlh, extack);
6091 rtnl_unlock();
6092
6093 module_put(owner);
6094
6095 return err;
6096
6097 out_unlock:
6098 rcu_read_unlock();
6099 return err;
6100
6101 err_unlock:
6102 rcu_read_unlock();
6103 return -EOPNOTSUPP;
6104 }
6105
6106 static void rtnetlink_rcv(struct sk_buff *skb)
6107 {
6108 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
6109 }
6110
6111 static int rtnetlink_bind(struct net *net, int group)
6112 {
6113 switch (group) {
6114 case RTNLGRP_IPV4_MROUTE_R:
6115 case RTNLGRP_IPV6_MROUTE_R:
6116 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
6117 return -EPERM;
6118 break;
6119 }
6120 return 0;
6121 }
6122
6123 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
6124 {
6125 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6126
6127 switch (event) {
6128 case NETDEV_REBOOT:
6129 case NETDEV_CHANGEMTU:
6130 case NETDEV_CHANGEADDR:
6131 case NETDEV_CHANGENAME:
6132 case NETDEV_FEAT_CHANGE:
6133 case NETDEV_BONDING_FAILOVER:
6134 case NETDEV_POST_TYPE_CHANGE:
6135 case NETDEV_NOTIFY_PEERS:
6136 case NETDEV_CHANGEUPPER:
6137 case NETDEV_RESEND_IGMP:
6138 case NETDEV_CHANGEINFODATA:
6139 case NETDEV_CHANGELOWERSTATE:
6140 case NETDEV_CHANGE_TX_QUEUE_LEN:
6141 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
6142 GFP_KERNEL, NULL, 0);
6143 break;
6144 default:
6145 break;
6146 }
6147 return NOTIFY_DONE;
6148 }
6149
6150 static struct notifier_block rtnetlink_dev_notifier = {
6151 .notifier_call = rtnetlink_event,
6152 };
6153
6154
6155 static int __net_init rtnetlink_net_init(struct net *net)
6156 {
6157 struct sock *sk;
6158 struct netlink_kernel_cfg cfg = {
6159 .groups = RTNLGRP_MAX,
6160 .input = rtnetlink_rcv,
6161 .cb_mutex = &rtnl_mutex,
6162 .flags = NL_CFG_F_NONROOT_RECV,
6163 .bind = rtnetlink_bind,
6164 };
6165
6166 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
6167 if (!sk)
6168 return -ENOMEM;
6169 net->rtnl = sk;
6170 return 0;
6171 }
6172
6173 static void __net_exit rtnetlink_net_exit(struct net *net)
6174 {
6175 netlink_kernel_release(net->rtnl);
6176 net->rtnl = NULL;
6177 }
6178
6179 static struct pernet_operations rtnetlink_net_ops = {
6180 .init = rtnetlink_net_init,
6181 .exit = rtnetlink_net_exit,
6182 };
6183
6184 void __init rtnetlink_init(void)
6185 {
6186 if (register_pernet_subsys(&rtnetlink_net_ops))
6187 panic("rtnetlink_init: cannot initialize rtnetlink\n");
6188
6189 register_netdevice_notifier(&rtnetlink_dev_notifier);
6190
6191 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
6192 rtnl_dump_ifinfo, 0);
6193 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
6194 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
6195 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
6196
6197 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
6198 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
6199 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
6200
6201 rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
6202 rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
6203
6204 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
6205 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
6206 RTNL_FLAG_BULK_DEL_SUPPORTED);
6207 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
6208
6209 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
6210 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
6211 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
6212
6213 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
6214 0);
6215 rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
6216 }