0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/types.h>
0011 #include <linux/kernel.h>
0012 #include <linux/string.h>
0013 #include <linux/errno.h>
0014 #include <linux/skbuff.h>
0015 #include <linux/rtnetlink.h>
0016 #include <linux/module.h>
0017 #include <linux/init.h>
0018 #include <linux/gfp.h>
0019 #include <linux/if_arp.h>
0020 #include <net/net_namespace.h>
0021 #include <net/netlink.h>
0022 #include <net/dst.h>
0023 #include <net/pkt_sched.h>
0024 #include <net/pkt_cls.h>
0025 #include <linux/tc_act/tc_mirred.h>
0026 #include <net/tc_act/tc_mirred.h>
0027
0028 static LIST_HEAD(mirred_list);
0029 static DEFINE_SPINLOCK(mirred_list_lock);
0030
0031 #define MIRRED_RECURSION_LIMIT 4
0032 static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
0033
0034 static bool tcf_mirred_is_act_redirect(int action)
0035 {
0036 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
0037 }
0038
0039 static bool tcf_mirred_act_wants_ingress(int action)
0040 {
0041 switch (action) {
0042 case TCA_EGRESS_REDIR:
0043 case TCA_EGRESS_MIRROR:
0044 return false;
0045 case TCA_INGRESS_REDIR:
0046 case TCA_INGRESS_MIRROR:
0047 return true;
0048 default:
0049 BUG();
0050 }
0051 }
0052
0053 static bool tcf_mirred_can_reinsert(int action)
0054 {
0055 switch (action) {
0056 case TC_ACT_SHOT:
0057 case TC_ACT_STOLEN:
0058 case TC_ACT_QUEUED:
0059 case TC_ACT_TRAP:
0060 return true;
0061 }
0062 return false;
0063 }
0064
0065 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
0066 {
0067 return rcu_dereference_protected(m->tcfm_dev,
0068 lockdep_is_held(&m->tcf_lock));
0069 }
0070
0071 static void tcf_mirred_release(struct tc_action *a)
0072 {
0073 struct tcf_mirred *m = to_mirred(a);
0074 struct net_device *dev;
0075
0076 spin_lock(&mirred_list_lock);
0077 list_del(&m->tcfm_list);
0078 spin_unlock(&mirred_list_lock);
0079
0080
0081 dev = rcu_dereference_protected(m->tcfm_dev, 1);
0082 netdev_put(dev, &m->tcfm_dev_tracker);
0083 }
0084
0085 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
0086 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
0087 };
0088
0089 static unsigned int mirred_net_id;
0090 static struct tc_action_ops act_mirred_ops;
0091
0092 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
0093 struct nlattr *est, struct tc_action **a,
0094 struct tcf_proto *tp,
0095 u32 flags, struct netlink_ext_ack *extack)
0096 {
0097 struct tc_action_net *tn = net_generic(net, mirred_net_id);
0098 bool bind = flags & TCA_ACT_FLAGS_BIND;
0099 struct nlattr *tb[TCA_MIRRED_MAX + 1];
0100 struct tcf_chain *goto_ch = NULL;
0101 bool mac_header_xmit = false;
0102 struct tc_mirred *parm;
0103 struct tcf_mirred *m;
0104 bool exists = false;
0105 int ret, err;
0106 u32 index;
0107
0108 if (!nla) {
0109 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
0110 return -EINVAL;
0111 }
0112 ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
0113 mirred_policy, extack);
0114 if (ret < 0)
0115 return ret;
0116 if (!tb[TCA_MIRRED_PARMS]) {
0117 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
0118 return -EINVAL;
0119 }
0120 parm = nla_data(tb[TCA_MIRRED_PARMS]);
0121 index = parm->index;
0122 err = tcf_idr_check_alloc(tn, &index, a, bind);
0123 if (err < 0)
0124 return err;
0125 exists = err;
0126 if (exists && bind)
0127 return 0;
0128
0129 switch (parm->eaction) {
0130 case TCA_EGRESS_MIRROR:
0131 case TCA_EGRESS_REDIR:
0132 case TCA_INGRESS_REDIR:
0133 case TCA_INGRESS_MIRROR:
0134 break;
0135 default:
0136 if (exists)
0137 tcf_idr_release(*a, bind);
0138 else
0139 tcf_idr_cleanup(tn, index);
0140 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
0141 return -EINVAL;
0142 }
0143
0144 if (!exists) {
0145 if (!parm->ifindex) {
0146 tcf_idr_cleanup(tn, index);
0147 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
0148 return -EINVAL;
0149 }
0150 ret = tcf_idr_create_from_flags(tn, index, est, a,
0151 &act_mirred_ops, bind, flags);
0152 if (ret) {
0153 tcf_idr_cleanup(tn, index);
0154 return ret;
0155 }
0156 ret = ACT_P_CREATED;
0157 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
0158 tcf_idr_release(*a, bind);
0159 return -EEXIST;
0160 }
0161
0162 m = to_mirred(*a);
0163 if (ret == ACT_P_CREATED)
0164 INIT_LIST_HEAD(&m->tcfm_list);
0165
0166 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
0167 if (err < 0)
0168 goto release_idr;
0169
0170 spin_lock_bh(&m->tcf_lock);
0171
0172 if (parm->ifindex) {
0173 struct net_device *odev, *ndev;
0174
0175 ndev = dev_get_by_index(net, parm->ifindex);
0176 if (!ndev) {
0177 spin_unlock_bh(&m->tcf_lock);
0178 err = -ENODEV;
0179 goto put_chain;
0180 }
0181 mac_header_xmit = dev_is_mac_header_xmit(ndev);
0182 odev = rcu_replace_pointer(m->tcfm_dev, ndev,
0183 lockdep_is_held(&m->tcf_lock));
0184 netdev_put(odev, &m->tcfm_dev_tracker);
0185 netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
0186 m->tcfm_mac_header_xmit = mac_header_xmit;
0187 }
0188 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
0189 m->tcfm_eaction = parm->eaction;
0190 spin_unlock_bh(&m->tcf_lock);
0191 if (goto_ch)
0192 tcf_chain_put_by_act(goto_ch);
0193
0194 if (ret == ACT_P_CREATED) {
0195 spin_lock(&mirred_list_lock);
0196 list_add(&m->tcfm_list, &mirred_list);
0197 spin_unlock(&mirred_list_lock);
0198 }
0199
0200 return ret;
0201 put_chain:
0202 if (goto_ch)
0203 tcf_chain_put_by_act(goto_ch);
0204 release_idr:
0205 tcf_idr_release(*a, bind);
0206 return err;
0207 }
0208
0209 static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
0210 {
0211 int err;
0212
0213 if (!want_ingress)
0214 err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
0215 else
0216 err = netif_receive_skb(skb);
0217
0218 return err;
0219 }
0220
0221 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
0222 struct tcf_result *res)
0223 {
0224 struct tcf_mirred *m = to_mirred(a);
0225 struct sk_buff *skb2 = skb;
0226 bool m_mac_header_xmit;
0227 struct net_device *dev;
0228 unsigned int rec_level;
0229 int retval, err = 0;
0230 bool use_reinsert;
0231 bool want_ingress;
0232 bool is_redirect;
0233 bool expects_nh;
0234 bool at_ingress;
0235 int m_eaction;
0236 int mac_len;
0237 bool at_nh;
0238
0239 rec_level = __this_cpu_inc_return(mirred_rec_level);
0240 if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
0241 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
0242 netdev_name(skb->dev));
0243 __this_cpu_dec(mirred_rec_level);
0244 return TC_ACT_SHOT;
0245 }
0246
0247 tcf_lastuse_update(&m->tcf_tm);
0248 tcf_action_update_bstats(&m->common, skb);
0249
0250 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
0251 m_eaction = READ_ONCE(m->tcfm_eaction);
0252 retval = READ_ONCE(m->tcf_action);
0253 dev = rcu_dereference_bh(m->tcfm_dev);
0254 if (unlikely(!dev)) {
0255 pr_notice_once("tc mirred: target device is gone\n");
0256 goto out;
0257 }
0258
0259 if (unlikely(!(dev->flags & IFF_UP))) {
0260 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
0261 dev->name);
0262 goto out;
0263 }
0264
0265
0266
0267
0268
0269 is_redirect = tcf_mirred_is_act_redirect(m_eaction);
0270 at_ingress = skb_at_tc_ingress(skb);
0271 use_reinsert = at_ingress && is_redirect &&
0272 tcf_mirred_can_reinsert(retval);
0273 if (!use_reinsert) {
0274 skb2 = skb_clone(skb, GFP_ATOMIC);
0275 if (!skb2)
0276 goto out;
0277 }
0278
0279 want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
0280
0281
0282 nf_reset_ct(skb2);
0283 if (want_ingress && !at_ingress)
0284 skb_dst_drop(skb2);
0285
0286 expects_nh = want_ingress || !m_mac_header_xmit;
0287 at_nh = skb->data == skb_network_header(skb);
0288 if (at_nh != expects_nh) {
0289 mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
0290 skb_network_header(skb) - skb_mac_header(skb);
0291 if (expects_nh) {
0292
0293 skb_pull_rcsum(skb2, mac_len);
0294 } else {
0295
0296 skb_push_rcsum(skb2, mac_len);
0297 }
0298 }
0299
0300 skb2->skb_iif = skb->dev->ifindex;
0301 skb2->dev = dev;
0302
0303
0304 if (is_redirect) {
0305 skb_set_redirected(skb2, skb2->tc_at_ingress);
0306
0307
0308 if (use_reinsert) {
0309 res->ingress = want_ingress;
0310 err = tcf_mirred_forward(res->ingress, skb);
0311 if (err)
0312 tcf_action_inc_overlimit_qstats(&m->common);
0313 __this_cpu_dec(mirred_rec_level);
0314 return TC_ACT_CONSUMED;
0315 }
0316 }
0317
0318 err = tcf_mirred_forward(want_ingress, skb2);
0319 if (err) {
0320 out:
0321 tcf_action_inc_overlimit_qstats(&m->common);
0322 if (tcf_mirred_is_act_redirect(m_eaction))
0323 retval = TC_ACT_SHOT;
0324 }
0325 __this_cpu_dec(mirred_rec_level);
0326
0327 return retval;
0328 }
0329
0330 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
0331 u64 drops, u64 lastuse, bool hw)
0332 {
0333 struct tcf_mirred *m = to_mirred(a);
0334 struct tcf_t *tm = &m->tcf_tm;
0335
0336 tcf_action_update_stats(a, bytes, packets, drops, hw);
0337 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
0338 }
0339
0340 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
0341 int ref)
0342 {
0343 unsigned char *b = skb_tail_pointer(skb);
0344 struct tcf_mirred *m = to_mirred(a);
0345 struct tc_mirred opt = {
0346 .index = m->tcf_index,
0347 .refcnt = refcount_read(&m->tcf_refcnt) - ref,
0348 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
0349 };
0350 struct net_device *dev;
0351 struct tcf_t t;
0352
0353 spin_lock_bh(&m->tcf_lock);
0354 opt.action = m->tcf_action;
0355 opt.eaction = m->tcfm_eaction;
0356 dev = tcf_mirred_dev_dereference(m);
0357 if (dev)
0358 opt.ifindex = dev->ifindex;
0359
0360 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
0361 goto nla_put_failure;
0362
0363 tcf_tm_dump(&t, &m->tcf_tm);
0364 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
0365 goto nla_put_failure;
0366 spin_unlock_bh(&m->tcf_lock);
0367
0368 return skb->len;
0369
0370 nla_put_failure:
0371 spin_unlock_bh(&m->tcf_lock);
0372 nlmsg_trim(skb, b);
0373 return -1;
0374 }
0375
0376 static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
0377 struct netlink_callback *cb, int type,
0378 const struct tc_action_ops *ops,
0379 struct netlink_ext_ack *extack)
0380 {
0381 struct tc_action_net *tn = net_generic(net, mirred_net_id);
0382
0383 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
0384 }
0385
0386 static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
0387 {
0388 struct tc_action_net *tn = net_generic(net, mirred_net_id);
0389
0390 return tcf_idr_search(tn, a, index);
0391 }
0392
0393 static int mirred_device_event(struct notifier_block *unused,
0394 unsigned long event, void *ptr)
0395 {
0396 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
0397 struct tcf_mirred *m;
0398
0399 ASSERT_RTNL();
0400 if (event == NETDEV_UNREGISTER) {
0401 spin_lock(&mirred_list_lock);
0402 list_for_each_entry(m, &mirred_list, tcfm_list) {
0403 spin_lock_bh(&m->tcf_lock);
0404 if (tcf_mirred_dev_dereference(m) == dev) {
0405 netdev_put(dev, &m->tcfm_dev_tracker);
0406
0407
0408
0409 RCU_INIT_POINTER(m->tcfm_dev, NULL);
0410 }
0411 spin_unlock_bh(&m->tcf_lock);
0412 }
0413 spin_unlock(&mirred_list_lock);
0414 }
0415
0416 return NOTIFY_DONE;
0417 }
0418
0419 static struct notifier_block mirred_device_notifier = {
0420 .notifier_call = mirred_device_event,
0421 };
0422
0423 static void tcf_mirred_dev_put(void *priv)
0424 {
0425 struct net_device *dev = priv;
0426
0427 dev_put(dev);
0428 }
0429
0430 static struct net_device *
0431 tcf_mirred_get_dev(const struct tc_action *a,
0432 tc_action_priv_destructor *destructor)
0433 {
0434 struct tcf_mirred *m = to_mirred(a);
0435 struct net_device *dev;
0436
0437 rcu_read_lock();
0438 dev = rcu_dereference(m->tcfm_dev);
0439 if (dev) {
0440 dev_hold(dev);
0441 *destructor = tcf_mirred_dev_put;
0442 }
0443 rcu_read_unlock();
0444
0445 return dev;
0446 }
0447
0448 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
0449 {
0450 return nla_total_size(sizeof(struct tc_mirred));
0451 }
0452
0453 static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
0454 const struct tc_action *act)
0455 {
0456 entry->dev = act->ops->get_dev(act, &entry->destructor);
0457 if (!entry->dev)
0458 return;
0459 entry->destructor_priv = entry->dev;
0460 }
0461
0462 static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
0463 u32 *index_inc, bool bind,
0464 struct netlink_ext_ack *extack)
0465 {
0466 if (bind) {
0467 struct flow_action_entry *entry = entry_data;
0468
0469 if (is_tcf_mirred_egress_redirect(act)) {
0470 entry->id = FLOW_ACTION_REDIRECT;
0471 tcf_offload_mirred_get_dev(entry, act);
0472 } else if (is_tcf_mirred_egress_mirror(act)) {
0473 entry->id = FLOW_ACTION_MIRRED;
0474 tcf_offload_mirred_get_dev(entry, act);
0475 } else if (is_tcf_mirred_ingress_redirect(act)) {
0476 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
0477 tcf_offload_mirred_get_dev(entry, act);
0478 } else if (is_tcf_mirred_ingress_mirror(act)) {
0479 entry->id = FLOW_ACTION_MIRRED_INGRESS;
0480 tcf_offload_mirred_get_dev(entry, act);
0481 } else {
0482 NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
0483 return -EOPNOTSUPP;
0484 }
0485 *index_inc = 1;
0486 } else {
0487 struct flow_offload_action *fl_action = entry_data;
0488
0489 if (is_tcf_mirred_egress_redirect(act))
0490 fl_action->id = FLOW_ACTION_REDIRECT;
0491 else if (is_tcf_mirred_egress_mirror(act))
0492 fl_action->id = FLOW_ACTION_MIRRED;
0493 else if (is_tcf_mirred_ingress_redirect(act))
0494 fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
0495 else if (is_tcf_mirred_ingress_mirror(act))
0496 fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
0497 else
0498 return -EOPNOTSUPP;
0499 }
0500
0501 return 0;
0502 }
0503
0504 static struct tc_action_ops act_mirred_ops = {
0505 .kind = "mirred",
0506 .id = TCA_ID_MIRRED,
0507 .owner = THIS_MODULE,
0508 .act = tcf_mirred_act,
0509 .stats_update = tcf_stats_update,
0510 .dump = tcf_mirred_dump,
0511 .cleanup = tcf_mirred_release,
0512 .init = tcf_mirred_init,
0513 .walk = tcf_mirred_walker,
0514 .lookup = tcf_mirred_search,
0515 .get_fill_size = tcf_mirred_get_fill_size,
0516 .offload_act_setup = tcf_mirred_offload_act_setup,
0517 .size = sizeof(struct tcf_mirred),
0518 .get_dev = tcf_mirred_get_dev,
0519 };
0520
0521 static __net_init int mirred_init_net(struct net *net)
0522 {
0523 struct tc_action_net *tn = net_generic(net, mirred_net_id);
0524
0525 return tc_action_net_init(net, tn, &act_mirred_ops);
0526 }
0527
0528 static void __net_exit mirred_exit_net(struct list_head *net_list)
0529 {
0530 tc_action_net_exit(net_list, mirred_net_id);
0531 }
0532
0533 static struct pernet_operations mirred_net_ops = {
0534 .init = mirred_init_net,
0535 .exit_batch = mirred_exit_net,
0536 .id = &mirred_net_id,
0537 .size = sizeof(struct tc_action_net),
0538 };
0539
0540 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
0541 MODULE_DESCRIPTION("Device Mirror/redirect actions");
0542 MODULE_LICENSE("GPL");
0543
0544 static int __init mirred_init_module(void)
0545 {
0546 int err = register_netdevice_notifier(&mirred_device_notifier);
0547 if (err)
0548 return err;
0549
0550 pr_info("Mirror/redirect action on\n");
0551 err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
0552 if (err)
0553 unregister_netdevice_notifier(&mirred_device_notifier);
0554
0555 return err;
0556 }
0557
0558 static void __exit mirred_cleanup_module(void)
0559 {
0560 tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
0561 unregister_netdevice_notifier(&mirred_device_notifier);
0562 }
0563
0564 module_init(mirred_init_module);
0565 module_exit(mirred_cleanup_module);