0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/module.h>
0013 #include <linux/types.h>
0014 #include <linux/kernel.h>
0015 #include <linux/string.h>
0016 #include <linux/errno.h>
0017 #include <linux/err.h>
0018 #include <linux/skbuff.h>
0019 #include <linux/init.h>
0020 #include <linux/kmod.h>
0021 #include <linux/slab.h>
0022 #include <linux/idr.h>
0023 #include <linux/jhash.h>
0024 #include <linux/rculist.h>
0025 #include <net/net_namespace.h>
0026 #include <net/sock.h>
0027 #include <net/netlink.h>
0028 #include <net/pkt_sched.h>
0029 #include <net/pkt_cls.h>
0030 #include <net/tc_act/tc_pedit.h>
0031 #include <net/tc_act/tc_mirred.h>
0032 #include <net/tc_act/tc_vlan.h>
0033 #include <net/tc_act/tc_tunnel_key.h>
0034 #include <net/tc_act/tc_csum.h>
0035 #include <net/tc_act/tc_gact.h>
0036 #include <net/tc_act/tc_police.h>
0037 #include <net/tc_act/tc_sample.h>
0038 #include <net/tc_act/tc_skbedit.h>
0039 #include <net/tc_act/tc_ct.h>
0040 #include <net/tc_act/tc_mpls.h>
0041 #include <net/tc_act/tc_gate.h>
0042 #include <net/flow_offload.h>
0043
0044 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
0045
0046
0047 static LIST_HEAD(tcf_proto_base);
0048
0049
0050 static DEFINE_RWLOCK(cls_mod_lock);
0051
0052 #ifdef CONFIG_NET_CLS_ACT
0053 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
0054 EXPORT_SYMBOL(tc_skb_ext_tc);
0055
0056 void tc_skb_ext_tc_enable(void)
0057 {
0058 static_branch_inc(&tc_skb_ext_tc);
0059 }
0060 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
0061
0062 void tc_skb_ext_tc_disable(void)
0063 {
0064 static_branch_dec(&tc_skb_ext_tc);
0065 }
0066 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
0067 #endif
0068
0069 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
0070 {
0071 return jhash_3words(tp->chain->index, tp->prio,
0072 (__force __u32)tp->protocol, 0);
0073 }
0074
0075 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
0076 struct tcf_proto *tp)
0077 {
0078 struct tcf_block *block = chain->block;
0079
0080 mutex_lock(&block->proto_destroy_lock);
0081 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
0082 destroy_obj_hashfn(tp));
0083 mutex_unlock(&block->proto_destroy_lock);
0084 }
0085
0086 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
0087 const struct tcf_proto *tp2)
0088 {
0089 return tp1->chain->index == tp2->chain->index &&
0090 tp1->prio == tp2->prio &&
0091 tp1->protocol == tp2->protocol;
0092 }
0093
0094 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
0095 struct tcf_proto *tp)
0096 {
0097 u32 hash = destroy_obj_hashfn(tp);
0098 struct tcf_proto *iter;
0099 bool found = false;
0100
0101 rcu_read_lock();
0102 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
0103 destroy_ht_node, hash) {
0104 if (tcf_proto_cmp(tp, iter)) {
0105 found = true;
0106 break;
0107 }
0108 }
0109 rcu_read_unlock();
0110
0111 return found;
0112 }
0113
0114 static void
0115 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
0116 {
0117 struct tcf_block *block = chain->block;
0118
0119 mutex_lock(&block->proto_destroy_lock);
0120 if (hash_hashed(&tp->destroy_ht_node))
0121 hash_del_rcu(&tp->destroy_ht_node);
0122 mutex_unlock(&block->proto_destroy_lock);
0123 }
0124
0125
0126
0127 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
0128 {
0129 const struct tcf_proto_ops *t, *res = NULL;
0130
0131 if (kind) {
0132 read_lock(&cls_mod_lock);
0133 list_for_each_entry(t, &tcf_proto_base, head) {
0134 if (strcmp(kind, t->kind) == 0) {
0135 if (try_module_get(t->owner))
0136 res = t;
0137 break;
0138 }
0139 }
0140 read_unlock(&cls_mod_lock);
0141 }
0142 return res;
0143 }
0144
0145 static const struct tcf_proto_ops *
0146 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
0147 struct netlink_ext_ack *extack)
0148 {
0149 const struct tcf_proto_ops *ops;
0150
0151 ops = __tcf_proto_lookup_ops(kind);
0152 if (ops)
0153 return ops;
0154 #ifdef CONFIG_MODULES
0155 if (rtnl_held)
0156 rtnl_unlock();
0157 request_module("cls_%s", kind);
0158 if (rtnl_held)
0159 rtnl_lock();
0160 ops = __tcf_proto_lookup_ops(kind);
0161
0162
0163
0164
0165
0166 if (ops) {
0167 module_put(ops->owner);
0168 return ERR_PTR(-EAGAIN);
0169 }
0170 #endif
0171 NL_SET_ERR_MSG(extack, "TC classifier not found");
0172 return ERR_PTR(-ENOENT);
0173 }
0174
0175
0176
0177 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
0178 {
0179 struct tcf_proto_ops *t;
0180 int rc = -EEXIST;
0181
0182 write_lock(&cls_mod_lock);
0183 list_for_each_entry(t, &tcf_proto_base, head)
0184 if (!strcmp(ops->kind, t->kind))
0185 goto out;
0186
0187 list_add_tail(&ops->head, &tcf_proto_base);
0188 rc = 0;
0189 out:
0190 write_unlock(&cls_mod_lock);
0191 return rc;
0192 }
0193 EXPORT_SYMBOL(register_tcf_proto_ops);
0194
0195 static struct workqueue_struct *tc_filter_wq;
0196
0197 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
0198 {
0199 struct tcf_proto_ops *t;
0200 int rc = -ENOENT;
0201
0202
0203
0204
0205 rcu_barrier();
0206 flush_workqueue(tc_filter_wq);
0207
0208 write_lock(&cls_mod_lock);
0209 list_for_each_entry(t, &tcf_proto_base, head) {
0210 if (t == ops) {
0211 list_del(&t->head);
0212 rc = 0;
0213 break;
0214 }
0215 }
0216 write_unlock(&cls_mod_lock);
0217
0218 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
0219 }
0220 EXPORT_SYMBOL(unregister_tcf_proto_ops);
0221
0222 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
0223 {
0224 INIT_RCU_WORK(rwork, func);
0225 return queue_rcu_work(tc_filter_wq, rwork);
0226 }
0227 EXPORT_SYMBOL(tcf_queue_work);
0228
0229
0230
0231 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
0232 {
0233 u32 first = TC_H_MAKE(0xC0000000U, 0U);
0234
0235 if (tp)
0236 first = tp->prio - 1;
0237
0238 return TC_H_MAJ(first);
0239 }
0240
0241 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
0242 {
0243 if (kind)
0244 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
0245 memset(name, 0, IFNAMSIZ);
0246 return false;
0247 }
0248
0249 static bool tcf_proto_is_unlocked(const char *kind)
0250 {
0251 const struct tcf_proto_ops *ops;
0252 bool ret;
0253
0254 if (strlen(kind) == 0)
0255 return false;
0256
0257 ops = tcf_proto_lookup_ops(kind, false, NULL);
0258
0259
0260
0261 if (IS_ERR(ops))
0262 return false;
0263
0264 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
0265 module_put(ops->owner);
0266 return ret;
0267 }
0268
0269 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
0270 u32 prio, struct tcf_chain *chain,
0271 bool rtnl_held,
0272 struct netlink_ext_ack *extack)
0273 {
0274 struct tcf_proto *tp;
0275 int err;
0276
0277 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
0278 if (!tp)
0279 return ERR_PTR(-ENOBUFS);
0280
0281 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
0282 if (IS_ERR(tp->ops)) {
0283 err = PTR_ERR(tp->ops);
0284 goto errout;
0285 }
0286 tp->classify = tp->ops->classify;
0287 tp->protocol = protocol;
0288 tp->prio = prio;
0289 tp->chain = chain;
0290 spin_lock_init(&tp->lock);
0291 refcount_set(&tp->refcnt, 1);
0292
0293 err = tp->ops->init(tp);
0294 if (err) {
0295 module_put(tp->ops->owner);
0296 goto errout;
0297 }
0298 return tp;
0299
0300 errout:
0301 kfree(tp);
0302 return ERR_PTR(err);
0303 }
0304
0305 static void tcf_proto_get(struct tcf_proto *tp)
0306 {
0307 refcount_inc(&tp->refcnt);
0308 }
0309
0310 static void tcf_chain_put(struct tcf_chain *chain);
0311
0312 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
0313 bool sig_destroy, struct netlink_ext_ack *extack)
0314 {
0315 tp->ops->destroy(tp, rtnl_held, extack);
0316 if (sig_destroy)
0317 tcf_proto_signal_destroyed(tp->chain, tp);
0318 tcf_chain_put(tp->chain);
0319 module_put(tp->ops->owner);
0320 kfree_rcu(tp, rcu);
0321 }
0322
0323 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
0324 struct netlink_ext_ack *extack)
0325 {
0326 if (refcount_dec_and_test(&tp->refcnt))
0327 tcf_proto_destroy(tp, rtnl_held, true, extack);
0328 }
0329
0330 static bool tcf_proto_check_delete(struct tcf_proto *tp)
0331 {
0332 if (tp->ops->delete_empty)
0333 return tp->ops->delete_empty(tp);
0334
0335 tp->deleting = true;
0336 return tp->deleting;
0337 }
0338
0339 static void tcf_proto_mark_delete(struct tcf_proto *tp)
0340 {
0341 spin_lock(&tp->lock);
0342 tp->deleting = true;
0343 spin_unlock(&tp->lock);
0344 }
0345
0346 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
0347 {
0348 bool deleting;
0349
0350 spin_lock(&tp->lock);
0351 deleting = tp->deleting;
0352 spin_unlock(&tp->lock);
0353
0354 return deleting;
0355 }
0356
0357 #define ASSERT_BLOCK_LOCKED(block) \
0358 lockdep_assert_held(&(block)->lock)
0359
0360 struct tcf_filter_chain_list_item {
0361 struct list_head list;
0362 tcf_chain_head_change_t *chain_head_change;
0363 void *chain_head_change_priv;
0364 };
0365
0366 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
0367 u32 chain_index)
0368 {
0369 struct tcf_chain *chain;
0370
0371 ASSERT_BLOCK_LOCKED(block);
0372
0373 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
0374 if (!chain)
0375 return NULL;
0376 list_add_tail_rcu(&chain->list, &block->chain_list);
0377 mutex_init(&chain->filter_chain_lock);
0378 chain->block = block;
0379 chain->index = chain_index;
0380 chain->refcnt = 1;
0381 if (!chain->index)
0382 block->chain0.chain = chain;
0383 return chain;
0384 }
0385
0386 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
0387 struct tcf_proto *tp_head)
0388 {
0389 if (item->chain_head_change)
0390 item->chain_head_change(tp_head, item->chain_head_change_priv);
0391 }
0392
0393 static void tcf_chain0_head_change(struct tcf_chain *chain,
0394 struct tcf_proto *tp_head)
0395 {
0396 struct tcf_filter_chain_list_item *item;
0397 struct tcf_block *block = chain->block;
0398
0399 if (chain->index)
0400 return;
0401
0402 mutex_lock(&block->lock);
0403 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
0404 tcf_chain_head_change_item(item, tp_head);
0405 mutex_unlock(&block->lock);
0406 }
0407
0408
0409
0410 static bool tcf_chain_detach(struct tcf_chain *chain)
0411 {
0412 struct tcf_block *block = chain->block;
0413
0414 ASSERT_BLOCK_LOCKED(block);
0415
0416 list_del_rcu(&chain->list);
0417 if (!chain->index)
0418 block->chain0.chain = NULL;
0419
0420 if (list_empty(&block->chain_list) &&
0421 refcount_read(&block->refcnt) == 0)
0422 return true;
0423
0424 return false;
0425 }
0426
0427 static void tcf_block_destroy(struct tcf_block *block)
0428 {
0429 mutex_destroy(&block->lock);
0430 mutex_destroy(&block->proto_destroy_lock);
0431 kfree_rcu(block, rcu);
0432 }
0433
0434 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
0435 {
0436 struct tcf_block *block = chain->block;
0437
0438 mutex_destroy(&chain->filter_chain_lock);
0439 kfree_rcu(chain, rcu);
0440 if (free_block)
0441 tcf_block_destroy(block);
0442 }
0443
0444 static void tcf_chain_hold(struct tcf_chain *chain)
0445 {
0446 ASSERT_BLOCK_LOCKED(chain->block);
0447
0448 ++chain->refcnt;
0449 }
0450
0451 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
0452 {
0453 ASSERT_BLOCK_LOCKED(chain->block);
0454
0455
0456
0457
0458 return chain->refcnt == chain->action_refcnt;
0459 }
0460
0461 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
0462 u32 chain_index)
0463 {
0464 struct tcf_chain *chain;
0465
0466 ASSERT_BLOCK_LOCKED(block);
0467
0468 list_for_each_entry(chain, &block->chain_list, list) {
0469 if (chain->index == chain_index)
0470 return chain;
0471 }
0472 return NULL;
0473 }
0474
0475 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
0476 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
0477 u32 chain_index)
0478 {
0479 struct tcf_chain *chain;
0480
0481 list_for_each_entry_rcu(chain, &block->chain_list, list) {
0482 if (chain->index == chain_index)
0483 return chain;
0484 }
0485 return NULL;
0486 }
0487 #endif
0488
0489 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
0490 u32 seq, u16 flags, int event, bool unicast);
0491
0492 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
0493 u32 chain_index, bool create,
0494 bool by_act)
0495 {
0496 struct tcf_chain *chain = NULL;
0497 bool is_first_reference;
0498
0499 mutex_lock(&block->lock);
0500 chain = tcf_chain_lookup(block, chain_index);
0501 if (chain) {
0502 tcf_chain_hold(chain);
0503 } else {
0504 if (!create)
0505 goto errout;
0506 chain = tcf_chain_create(block, chain_index);
0507 if (!chain)
0508 goto errout;
0509 }
0510
0511 if (by_act)
0512 ++chain->action_refcnt;
0513 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
0514 mutex_unlock(&block->lock);
0515
0516
0517
0518
0519
0520
0521 if (is_first_reference && !by_act)
0522 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
0523 RTM_NEWCHAIN, false);
0524
0525 return chain;
0526
0527 errout:
0528 mutex_unlock(&block->lock);
0529 return chain;
0530 }
0531
0532 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
0533 bool create)
0534 {
0535 return __tcf_chain_get(block, chain_index, create, false);
0536 }
0537
0538 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
0539 {
0540 return __tcf_chain_get(block, chain_index, true, true);
0541 }
0542 EXPORT_SYMBOL(tcf_chain_get_by_act);
0543
0544 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
0545 void *tmplt_priv);
0546 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
0547 void *tmplt_priv, u32 chain_index,
0548 struct tcf_block *block, struct sk_buff *oskb,
0549 u32 seq, u16 flags, bool unicast);
0550
0551 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
0552 bool explicitly_created)
0553 {
0554 struct tcf_block *block = chain->block;
0555 const struct tcf_proto_ops *tmplt_ops;
0556 bool free_block = false;
0557 unsigned int refcnt;
0558 void *tmplt_priv;
0559
0560 mutex_lock(&block->lock);
0561 if (explicitly_created) {
0562 if (!chain->explicitly_created) {
0563 mutex_unlock(&block->lock);
0564 return;
0565 }
0566 chain->explicitly_created = false;
0567 }
0568
0569 if (by_act)
0570 chain->action_refcnt--;
0571
0572
0573
0574
0575
0576 refcnt = --chain->refcnt;
0577 tmplt_ops = chain->tmplt_ops;
0578 tmplt_priv = chain->tmplt_priv;
0579
0580
0581 if (refcnt - chain->action_refcnt == 0 && !by_act) {
0582 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
0583 block, NULL, 0, 0, false);
0584
0585 chain->flushing = false;
0586 }
0587
0588 if (refcnt == 0)
0589 free_block = tcf_chain_detach(chain);
0590 mutex_unlock(&block->lock);
0591
0592 if (refcnt == 0) {
0593 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
0594 tcf_chain_destroy(chain, free_block);
0595 }
0596 }
0597
0598 static void tcf_chain_put(struct tcf_chain *chain)
0599 {
0600 __tcf_chain_put(chain, false, false);
0601 }
0602
0603 void tcf_chain_put_by_act(struct tcf_chain *chain)
0604 {
0605 __tcf_chain_put(chain, true, false);
0606 }
0607 EXPORT_SYMBOL(tcf_chain_put_by_act);
0608
0609 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
0610 {
0611 __tcf_chain_put(chain, false, true);
0612 }
0613
0614 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
0615 {
0616 struct tcf_proto *tp, *tp_next;
0617
0618 mutex_lock(&chain->filter_chain_lock);
0619 tp = tcf_chain_dereference(chain->filter_chain, chain);
0620 while (tp) {
0621 tp_next = rcu_dereference_protected(tp->next, 1);
0622 tcf_proto_signal_destroying(chain, tp);
0623 tp = tp_next;
0624 }
0625 tp = tcf_chain_dereference(chain->filter_chain, chain);
0626 RCU_INIT_POINTER(chain->filter_chain, NULL);
0627 tcf_chain0_head_change(chain, NULL);
0628 chain->flushing = true;
0629 mutex_unlock(&chain->filter_chain_lock);
0630
0631 while (tp) {
0632 tp_next = rcu_dereference_protected(tp->next, 1);
0633 tcf_proto_put(tp, rtnl_held, NULL);
0634 tp = tp_next;
0635 }
0636 }
0637
0638 static int tcf_block_setup(struct tcf_block *block,
0639 struct flow_block_offload *bo);
0640
0641 static void tcf_block_offload_init(struct flow_block_offload *bo,
0642 struct net_device *dev, struct Qdisc *sch,
0643 enum flow_block_command command,
0644 enum flow_block_binder_type binder_type,
0645 struct flow_block *flow_block,
0646 bool shared, struct netlink_ext_ack *extack)
0647 {
0648 bo->net = dev_net(dev);
0649 bo->command = command;
0650 bo->binder_type = binder_type;
0651 bo->block = flow_block;
0652 bo->block_shared = shared;
0653 bo->extack = extack;
0654 bo->sch = sch;
0655 bo->cb_list_head = &flow_block->cb_list;
0656 INIT_LIST_HEAD(&bo->cb_list);
0657 }
0658
0659 static void tcf_block_unbind(struct tcf_block *block,
0660 struct flow_block_offload *bo);
0661
0662 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
0663 {
0664 struct tcf_block *block = block_cb->indr.data;
0665 struct net_device *dev = block_cb->indr.dev;
0666 struct Qdisc *sch = block_cb->indr.sch;
0667 struct netlink_ext_ack extack = {};
0668 struct flow_block_offload bo = {};
0669
0670 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
0671 block_cb->indr.binder_type,
0672 &block->flow_block, tcf_block_shared(block),
0673 &extack);
0674 rtnl_lock();
0675 down_write(&block->cb_lock);
0676 list_del(&block_cb->driver_list);
0677 list_move(&block_cb->list, &bo.cb_list);
0678 tcf_block_unbind(block, &bo);
0679 up_write(&block->cb_lock);
0680 rtnl_unlock();
0681 }
0682
0683 static bool tcf_block_offload_in_use(struct tcf_block *block)
0684 {
0685 return atomic_read(&block->offloadcnt);
0686 }
0687
0688 static int tcf_block_offload_cmd(struct tcf_block *block,
0689 struct net_device *dev, struct Qdisc *sch,
0690 struct tcf_block_ext_info *ei,
0691 enum flow_block_command command,
0692 struct netlink_ext_ack *extack)
0693 {
0694 struct flow_block_offload bo = {};
0695
0696 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
0697 &block->flow_block, tcf_block_shared(block),
0698 extack);
0699
0700 if (dev->netdev_ops->ndo_setup_tc) {
0701 int err;
0702
0703 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
0704 if (err < 0) {
0705 if (err != -EOPNOTSUPP)
0706 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
0707 return err;
0708 }
0709
0710 return tcf_block_setup(block, &bo);
0711 }
0712
0713 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
0714 tc_block_indr_cleanup);
0715 tcf_block_setup(block, &bo);
0716
0717 return -EOPNOTSUPP;
0718 }
0719
0720 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
0721 struct tcf_block_ext_info *ei,
0722 struct netlink_ext_ack *extack)
0723 {
0724 struct net_device *dev = q->dev_queue->dev;
0725 int err;
0726
0727 down_write(&block->cb_lock);
0728
0729
0730
0731
0732 if (dev->netdev_ops->ndo_setup_tc &&
0733 !tc_can_offload(dev) &&
0734 tcf_block_offload_in_use(block)) {
0735 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
0736 err = -EOPNOTSUPP;
0737 goto err_unlock;
0738 }
0739
0740 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
0741 if (err == -EOPNOTSUPP)
0742 goto no_offload_dev_inc;
0743 if (err)
0744 goto err_unlock;
0745
0746 up_write(&block->cb_lock);
0747 return 0;
0748
0749 no_offload_dev_inc:
0750 if (tcf_block_offload_in_use(block))
0751 goto err_unlock;
0752
0753 err = 0;
0754 block->nooffloaddevcnt++;
0755 err_unlock:
0756 up_write(&block->cb_lock);
0757 return err;
0758 }
0759
0760 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
0761 struct tcf_block_ext_info *ei)
0762 {
0763 struct net_device *dev = q->dev_queue->dev;
0764 int err;
0765
0766 down_write(&block->cb_lock);
0767 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
0768 if (err == -EOPNOTSUPP)
0769 goto no_offload_dev_dec;
0770 up_write(&block->cb_lock);
0771 return;
0772
0773 no_offload_dev_dec:
0774 WARN_ON(block->nooffloaddevcnt-- == 0);
0775 up_write(&block->cb_lock);
0776 }
0777
0778 static int
0779 tcf_chain0_head_change_cb_add(struct tcf_block *block,
0780 struct tcf_block_ext_info *ei,
0781 struct netlink_ext_ack *extack)
0782 {
0783 struct tcf_filter_chain_list_item *item;
0784 struct tcf_chain *chain0;
0785
0786 item = kmalloc(sizeof(*item), GFP_KERNEL);
0787 if (!item) {
0788 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
0789 return -ENOMEM;
0790 }
0791 item->chain_head_change = ei->chain_head_change;
0792 item->chain_head_change_priv = ei->chain_head_change_priv;
0793
0794 mutex_lock(&block->lock);
0795 chain0 = block->chain0.chain;
0796 if (chain0)
0797 tcf_chain_hold(chain0);
0798 else
0799 list_add(&item->list, &block->chain0.filter_chain_list);
0800 mutex_unlock(&block->lock);
0801
0802 if (chain0) {
0803 struct tcf_proto *tp_head;
0804
0805 mutex_lock(&chain0->filter_chain_lock);
0806
0807 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
0808 if (tp_head)
0809 tcf_chain_head_change_item(item, tp_head);
0810
0811 mutex_lock(&block->lock);
0812 list_add(&item->list, &block->chain0.filter_chain_list);
0813 mutex_unlock(&block->lock);
0814
0815 mutex_unlock(&chain0->filter_chain_lock);
0816 tcf_chain_put(chain0);
0817 }
0818
0819 return 0;
0820 }
0821
0822 static void
0823 tcf_chain0_head_change_cb_del(struct tcf_block *block,
0824 struct tcf_block_ext_info *ei)
0825 {
0826 struct tcf_filter_chain_list_item *item;
0827
0828 mutex_lock(&block->lock);
0829 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
0830 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
0831 (item->chain_head_change == ei->chain_head_change &&
0832 item->chain_head_change_priv == ei->chain_head_change_priv)) {
0833 if (block->chain0.chain)
0834 tcf_chain_head_change_item(item, NULL);
0835 list_del(&item->list);
0836 mutex_unlock(&block->lock);
0837
0838 kfree(item);
0839 return;
0840 }
0841 }
0842 mutex_unlock(&block->lock);
0843 WARN_ON(1);
0844 }
0845
0846 struct tcf_net {
0847 spinlock_t idr_lock;
0848 struct idr idr;
0849 };
0850
0851 static unsigned int tcf_net_id;
0852
0853 static int tcf_block_insert(struct tcf_block *block, struct net *net,
0854 struct netlink_ext_ack *extack)
0855 {
0856 struct tcf_net *tn = net_generic(net, tcf_net_id);
0857 int err;
0858
0859 idr_preload(GFP_KERNEL);
0860 spin_lock(&tn->idr_lock);
0861 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
0862 GFP_NOWAIT);
0863 spin_unlock(&tn->idr_lock);
0864 idr_preload_end();
0865
0866 return err;
0867 }
0868
0869 static void tcf_block_remove(struct tcf_block *block, struct net *net)
0870 {
0871 struct tcf_net *tn = net_generic(net, tcf_net_id);
0872
0873 spin_lock(&tn->idr_lock);
0874 idr_remove(&tn->idr, block->index);
0875 spin_unlock(&tn->idr_lock);
0876 }
0877
0878 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
0879 u32 block_index,
0880 struct netlink_ext_ack *extack)
0881 {
0882 struct tcf_block *block;
0883
0884 block = kzalloc(sizeof(*block), GFP_KERNEL);
0885 if (!block) {
0886 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
0887 return ERR_PTR(-ENOMEM);
0888 }
0889 mutex_init(&block->lock);
0890 mutex_init(&block->proto_destroy_lock);
0891 init_rwsem(&block->cb_lock);
0892 flow_block_init(&block->flow_block);
0893 INIT_LIST_HEAD(&block->chain_list);
0894 INIT_LIST_HEAD(&block->owner_list);
0895 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
0896
0897 refcount_set(&block->refcnt, 1);
0898 block->net = net;
0899 block->index = block_index;
0900
0901
0902 if (!tcf_block_shared(block))
0903 block->q = q;
0904 return block;
0905 }
0906
0907 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
0908 {
0909 struct tcf_net *tn = net_generic(net, tcf_net_id);
0910
0911 return idr_find(&tn->idr, block_index);
0912 }
0913
0914 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
0915 {
0916 struct tcf_block *block;
0917
0918 rcu_read_lock();
0919 block = tcf_block_lookup(net, block_index);
0920 if (block && !refcount_inc_not_zero(&block->refcnt))
0921 block = NULL;
0922 rcu_read_unlock();
0923
0924 return block;
0925 }
0926
0927 static struct tcf_chain *
0928 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
0929 {
0930 mutex_lock(&block->lock);
0931 if (chain)
0932 chain = list_is_last(&chain->list, &block->chain_list) ?
0933 NULL : list_next_entry(chain, list);
0934 else
0935 chain = list_first_entry_or_null(&block->chain_list,
0936 struct tcf_chain, list);
0937
0938
0939 while (chain && tcf_chain_held_by_acts_only(chain))
0940 chain = list_is_last(&chain->list, &block->chain_list) ?
0941 NULL : list_next_entry(chain, list);
0942
0943 if (chain)
0944 tcf_chain_hold(chain);
0945 mutex_unlock(&block->lock);
0946
0947 return chain;
0948 }
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959 struct tcf_chain *
0960 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
0961 {
0962 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
0963
0964 if (chain)
0965 tcf_chain_put(chain);
0966
0967 return chain_next;
0968 }
0969 EXPORT_SYMBOL(tcf_get_next_chain);
0970
0971 static struct tcf_proto *
0972 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
0973 {
0974 u32 prio = 0;
0975
0976 ASSERT_RTNL();
0977 mutex_lock(&chain->filter_chain_lock);
0978
0979 if (!tp) {
0980 tp = tcf_chain_dereference(chain->filter_chain, chain);
0981 } else if (tcf_proto_is_deleting(tp)) {
0982
0983
0984
0985
0986 prio = tp->prio + 1;
0987 tp = tcf_chain_dereference(chain->filter_chain, chain);
0988
0989 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
0990 if (!tp->deleting && tp->prio >= prio)
0991 break;
0992 } else {
0993 tp = tcf_chain_dereference(tp->next, chain);
0994 }
0995
0996 if (tp)
0997 tcf_proto_get(tp);
0998
0999 mutex_unlock(&chain->filter_chain_lock);
1000
1001 return tp;
1002 }
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 struct tcf_proto *
1013 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1014 {
1015 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1016
1017 if (tp)
1018 tcf_proto_put(tp, true, NULL);
1019
1020 return tp_next;
1021 }
1022 EXPORT_SYMBOL(tcf_get_next_proto);
1023
1024 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1025 {
1026 struct tcf_chain *chain;
1027
1028
1029
1030
1031 for (chain = tcf_get_next_chain(block, NULL);
1032 chain;
1033 chain = tcf_get_next_chain(block, chain)) {
1034 tcf_chain_put_explicitly_created(chain);
1035 tcf_chain_flush(chain, rtnl_held);
1036 }
1037 }
1038
1039
1040
1041
1042
1043 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1044 u32 *parent, int ifindex, bool rtnl_held,
1045 struct netlink_ext_ack *extack)
1046 {
1047 const struct Qdisc_class_ops *cops;
1048 struct net_device *dev;
1049 int err = 0;
1050
1051 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1052 return 0;
1053
1054 rcu_read_lock();
1055
1056
1057 dev = dev_get_by_index_rcu(net, ifindex);
1058 if (!dev) {
1059 rcu_read_unlock();
1060 return -ENODEV;
1061 }
1062
1063
1064 if (!*parent) {
1065 *q = rcu_dereference(dev->qdisc);
1066 *parent = (*q)->handle;
1067 } else {
1068 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1069 if (!*q) {
1070 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1071 err = -EINVAL;
1072 goto errout_rcu;
1073 }
1074 }
1075
1076 *q = qdisc_refcount_inc_nz(*q);
1077 if (!*q) {
1078 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1079 err = -EINVAL;
1080 goto errout_rcu;
1081 }
1082
1083
1084 cops = (*q)->ops->cl_ops;
1085 if (!cops) {
1086 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1087 err = -EINVAL;
1088 goto errout_qdisc;
1089 }
1090
1091 if (!cops->tcf_block) {
1092 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1093 err = -EOPNOTSUPP;
1094 goto errout_qdisc;
1095 }
1096
1097 errout_rcu:
1098
1099
1100
1101
1102
1103 rcu_read_unlock();
1104 return err;
1105
1106 errout_qdisc:
1107 rcu_read_unlock();
1108
1109 if (rtnl_held)
1110 qdisc_put(*q);
1111 else
1112 qdisc_put_unlocked(*q);
1113 *q = NULL;
1114
1115 return err;
1116 }
1117
1118 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1119 int ifindex, struct netlink_ext_ack *extack)
1120 {
1121 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1122 return 0;
1123
1124
1125 if (TC_H_MIN(parent)) {
1126 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1127
1128 *cl = cops->find(q, parent);
1129 if (*cl == 0) {
1130 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1131 return -ENOENT;
1132 }
1133 }
1134
1135 return 0;
1136 }
1137
1138 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1139 unsigned long cl, int ifindex,
1140 u32 block_index,
1141 struct netlink_ext_ack *extack)
1142 {
1143 struct tcf_block *block;
1144
1145 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1146 block = tcf_block_refcnt_get(net, block_index);
1147 if (!block) {
1148 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1149 return ERR_PTR(-EINVAL);
1150 }
1151 } else {
1152 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1153
1154 block = cops->tcf_block(q, cl, extack);
1155 if (!block)
1156 return ERR_PTR(-EINVAL);
1157
1158 if (tcf_block_shared(block)) {
1159 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1160 return ERR_PTR(-EOPNOTSUPP);
1161 }
1162
1163
1164
1165
1166
1167
1168
1169 refcount_inc(&block->refcnt);
1170 }
1171
1172 return block;
1173 }
1174
1175 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1176 struct tcf_block_ext_info *ei, bool rtnl_held)
1177 {
1178 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1179
1180
1181
1182
1183
1184
1185 bool free_block = list_empty(&block->chain_list);
1186
1187 mutex_unlock(&block->lock);
1188 if (tcf_block_shared(block))
1189 tcf_block_remove(block, block->net);
1190
1191 if (q)
1192 tcf_block_offload_unbind(block, q, ei);
1193
1194 if (free_block)
1195 tcf_block_destroy(block);
1196 else
1197 tcf_block_flush_all_chains(block, rtnl_held);
1198 } else if (q) {
1199 tcf_block_offload_unbind(block, q, ei);
1200 }
1201 }
1202
1203 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1204 {
1205 __tcf_block_put(block, NULL, NULL, rtnl_held);
1206 }
1207
1208
1209
1210
1211
1212 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1213 u32 *parent, unsigned long *cl,
1214 int ifindex, u32 block_index,
1215 struct netlink_ext_ack *extack)
1216 {
1217 struct tcf_block *block;
1218 int err = 0;
1219
1220 ASSERT_RTNL();
1221
1222 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1223 if (err)
1224 goto errout;
1225
1226 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1227 if (err)
1228 goto errout_qdisc;
1229
1230 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1231 if (IS_ERR(block)) {
1232 err = PTR_ERR(block);
1233 goto errout_qdisc;
1234 }
1235
1236 return block;
1237
1238 errout_qdisc:
1239 if (*q)
1240 qdisc_put(*q);
1241 errout:
1242 *q = NULL;
1243 return ERR_PTR(err);
1244 }
1245
1246 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1247 bool rtnl_held)
1248 {
1249 if (!IS_ERR_OR_NULL(block))
1250 tcf_block_refcnt_put(block, rtnl_held);
1251
1252 if (q) {
1253 if (rtnl_held)
1254 qdisc_put(q);
1255 else
1256 qdisc_put_unlocked(q);
1257 }
1258 }
1259
1260 struct tcf_block_owner_item {
1261 struct list_head list;
1262 struct Qdisc *q;
1263 enum flow_block_binder_type binder_type;
1264 };
1265
1266 static void
1267 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1268 struct Qdisc *q,
1269 enum flow_block_binder_type binder_type)
1270 {
1271 if (block->keep_dst &&
1272 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1273 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1274 netif_keep_dst(qdisc_dev(q));
1275 }
1276
1277 void tcf_block_netif_keep_dst(struct tcf_block *block)
1278 {
1279 struct tcf_block_owner_item *item;
1280
1281 block->keep_dst = true;
1282 list_for_each_entry(item, &block->owner_list, list)
1283 tcf_block_owner_netif_keep_dst(block, item->q,
1284 item->binder_type);
1285 }
1286 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1287
1288 static int tcf_block_owner_add(struct tcf_block *block,
1289 struct Qdisc *q,
1290 enum flow_block_binder_type binder_type)
1291 {
1292 struct tcf_block_owner_item *item;
1293
1294 item = kmalloc(sizeof(*item), GFP_KERNEL);
1295 if (!item)
1296 return -ENOMEM;
1297 item->q = q;
1298 item->binder_type = binder_type;
1299 list_add(&item->list, &block->owner_list);
1300 return 0;
1301 }
1302
1303 static void tcf_block_owner_del(struct tcf_block *block,
1304 struct Qdisc *q,
1305 enum flow_block_binder_type binder_type)
1306 {
1307 struct tcf_block_owner_item *item;
1308
1309 list_for_each_entry(item, &block->owner_list, list) {
1310 if (item->q == q && item->binder_type == binder_type) {
1311 list_del(&item->list);
1312 kfree(item);
1313 return;
1314 }
1315 }
1316 WARN_ON(1);
1317 }
1318
1319 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1320 struct tcf_block_ext_info *ei,
1321 struct netlink_ext_ack *extack)
1322 {
1323 struct net *net = qdisc_net(q);
1324 struct tcf_block *block = NULL;
1325 int err;
1326
1327 if (ei->block_index)
1328
1329 block = tcf_block_refcnt_get(net, ei->block_index);
1330
1331 if (!block) {
1332 block = tcf_block_create(net, q, ei->block_index, extack);
1333 if (IS_ERR(block))
1334 return PTR_ERR(block);
1335 if (tcf_block_shared(block)) {
1336 err = tcf_block_insert(block, net, extack);
1337 if (err)
1338 goto err_block_insert;
1339 }
1340 }
1341
1342 err = tcf_block_owner_add(block, q, ei->binder_type);
1343 if (err)
1344 goto err_block_owner_add;
1345
1346 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1347
1348 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1349 if (err)
1350 goto err_chain0_head_change_cb_add;
1351
1352 err = tcf_block_offload_bind(block, q, ei, extack);
1353 if (err)
1354 goto err_block_offload_bind;
1355
1356 *p_block = block;
1357 return 0;
1358
1359 err_block_offload_bind:
1360 tcf_chain0_head_change_cb_del(block, ei);
1361 err_chain0_head_change_cb_add:
1362 tcf_block_owner_del(block, q, ei->binder_type);
1363 err_block_owner_add:
1364 err_block_insert:
1365 tcf_block_refcnt_put(block, true);
1366 return err;
1367 }
1368 EXPORT_SYMBOL(tcf_block_get_ext);
1369
1370 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1371 {
1372 struct tcf_proto __rcu **p_filter_chain = priv;
1373
1374 rcu_assign_pointer(*p_filter_chain, tp_head);
1375 }
1376
1377 int tcf_block_get(struct tcf_block **p_block,
1378 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1379 struct netlink_ext_ack *extack)
1380 {
1381 struct tcf_block_ext_info ei = {
1382 .chain_head_change = tcf_chain_head_change_dflt,
1383 .chain_head_change_priv = p_filter_chain,
1384 };
1385
1386 WARN_ON(!p_filter_chain);
1387 return tcf_block_get_ext(p_block, q, &ei, extack);
1388 }
1389 EXPORT_SYMBOL(tcf_block_get);
1390
1391
1392
1393
1394 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1395 struct tcf_block_ext_info *ei)
1396 {
1397 if (!block)
1398 return;
1399 tcf_chain0_head_change_cb_del(block, ei);
1400 tcf_block_owner_del(block, q, ei->binder_type);
1401
1402 __tcf_block_put(block, q, ei, true);
1403 }
1404 EXPORT_SYMBOL(tcf_block_put_ext);
1405
1406 void tcf_block_put(struct tcf_block *block)
1407 {
1408 struct tcf_block_ext_info ei = {0, };
1409
1410 if (!block)
1411 return;
1412 tcf_block_put_ext(block, block->q, &ei);
1413 }
1414
1415 EXPORT_SYMBOL(tcf_block_put);
1416
1417 static int
1418 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1419 void *cb_priv, bool add, bool offload_in_use,
1420 struct netlink_ext_ack *extack)
1421 {
1422 struct tcf_chain *chain, *chain_prev;
1423 struct tcf_proto *tp, *tp_prev;
1424 int err;
1425
1426 lockdep_assert_held(&block->cb_lock);
1427
1428 for (chain = __tcf_get_next_chain(block, NULL);
1429 chain;
1430 chain_prev = chain,
1431 chain = __tcf_get_next_chain(block, chain),
1432 tcf_chain_put(chain_prev)) {
1433 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1434 tp_prev = tp,
1435 tp = __tcf_get_next_proto(chain, tp),
1436 tcf_proto_put(tp_prev, true, NULL)) {
1437 if (tp->ops->reoffload) {
1438 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1439 extack);
1440 if (err && add)
1441 goto err_playback_remove;
1442 } else if (add && offload_in_use) {
1443 err = -EOPNOTSUPP;
1444 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1445 goto err_playback_remove;
1446 }
1447 }
1448 }
1449
1450 return 0;
1451
1452 err_playback_remove:
1453 tcf_proto_put(tp, true, NULL);
1454 tcf_chain_put(chain);
1455 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1456 extack);
1457 return err;
1458 }
1459
1460 static int tcf_block_bind(struct tcf_block *block,
1461 struct flow_block_offload *bo)
1462 {
1463 struct flow_block_cb *block_cb, *next;
1464 int err, i = 0;
1465
1466 lockdep_assert_held(&block->cb_lock);
1467
1468 list_for_each_entry(block_cb, &bo->cb_list, list) {
1469 err = tcf_block_playback_offloads(block, block_cb->cb,
1470 block_cb->cb_priv, true,
1471 tcf_block_offload_in_use(block),
1472 bo->extack);
1473 if (err)
1474 goto err_unroll;
1475 if (!bo->unlocked_driver_cb)
1476 block->lockeddevcnt++;
1477
1478 i++;
1479 }
1480 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1481
1482 return 0;
1483
1484 err_unroll:
1485 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1486 if (i-- > 0) {
1487 list_del(&block_cb->list);
1488 tcf_block_playback_offloads(block, block_cb->cb,
1489 block_cb->cb_priv, false,
1490 tcf_block_offload_in_use(block),
1491 NULL);
1492 if (!bo->unlocked_driver_cb)
1493 block->lockeddevcnt--;
1494 }
1495 flow_block_cb_free(block_cb);
1496 }
1497
1498 return err;
1499 }
1500
1501 static void tcf_block_unbind(struct tcf_block *block,
1502 struct flow_block_offload *bo)
1503 {
1504 struct flow_block_cb *block_cb, *next;
1505
1506 lockdep_assert_held(&block->cb_lock);
1507
1508 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1509 tcf_block_playback_offloads(block, block_cb->cb,
1510 block_cb->cb_priv, false,
1511 tcf_block_offload_in_use(block),
1512 NULL);
1513 list_del(&block_cb->list);
1514 flow_block_cb_free(block_cb);
1515 if (!bo->unlocked_driver_cb)
1516 block->lockeddevcnt--;
1517 }
1518 }
1519
1520 static int tcf_block_setup(struct tcf_block *block,
1521 struct flow_block_offload *bo)
1522 {
1523 int err;
1524
1525 switch (bo->command) {
1526 case FLOW_BLOCK_BIND:
1527 err = tcf_block_bind(block, bo);
1528 break;
1529 case FLOW_BLOCK_UNBIND:
1530 err = 0;
1531 tcf_block_unbind(block, bo);
1532 break;
1533 default:
1534 WARN_ON_ONCE(1);
1535 err = -EOPNOTSUPP;
1536 }
1537
1538 return err;
1539 }
1540
1541
1542
1543
1544
1545 static inline int __tcf_classify(struct sk_buff *skb,
1546 const struct tcf_proto *tp,
1547 const struct tcf_proto *orig_tp,
1548 struct tcf_result *res,
1549 bool compat_mode,
1550 u32 *last_executed_chain)
1551 {
1552 #ifdef CONFIG_NET_CLS_ACT
1553 const int max_reclassify_loop = 16;
1554 const struct tcf_proto *first_tp;
1555 int limit = 0;
1556
1557 reclassify:
1558 #endif
1559 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1560 __be16 protocol = skb_protocol(skb, false);
1561 int err;
1562
1563 if (tp->protocol != protocol &&
1564 tp->protocol != htons(ETH_P_ALL))
1565 continue;
1566
1567 err = tp->classify(skb, tp, res);
1568 #ifdef CONFIG_NET_CLS_ACT
1569 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1570 first_tp = orig_tp;
1571 *last_executed_chain = first_tp->chain->index;
1572 goto reset;
1573 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1574 first_tp = res->goto_tp;
1575 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1576 goto reset;
1577 }
1578 #endif
1579 if (err >= 0)
1580 return err;
1581 }
1582
1583 return TC_ACT_UNSPEC;
1584 #ifdef CONFIG_NET_CLS_ACT
1585 reset:
1586 if (unlikely(limit++ >= max_reclassify_loop)) {
1587 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1588 tp->chain->block->index,
1589 tp->prio & 0xffff,
1590 ntohs(tp->protocol));
1591 return TC_ACT_SHOT;
1592 }
1593
1594 tp = first_tp;
1595 goto reclassify;
1596 #endif
1597 }
1598
1599 int tcf_classify(struct sk_buff *skb,
1600 const struct tcf_block *block,
1601 const struct tcf_proto *tp,
1602 struct tcf_result *res, bool compat_mode)
1603 {
1604 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1605 u32 last_executed_chain = 0;
1606
1607 return __tcf_classify(skb, tp, tp, res, compat_mode,
1608 &last_executed_chain);
1609 #else
1610 u32 last_executed_chain = tp ? tp->chain->index : 0;
1611 const struct tcf_proto *orig_tp = tp;
1612 struct tc_skb_ext *ext;
1613 int ret;
1614
1615 if (block) {
1616 ext = skb_ext_find(skb, TC_SKB_EXT);
1617
1618 if (ext && ext->chain) {
1619 struct tcf_chain *fchain;
1620
1621 fchain = tcf_chain_lookup_rcu(block, ext->chain);
1622 if (!fchain)
1623 return TC_ACT_SHOT;
1624
1625
1626 skb_ext_del(skb, TC_SKB_EXT);
1627
1628 tp = rcu_dereference_bh(fchain->filter_chain);
1629 last_executed_chain = fchain->index;
1630 }
1631 }
1632
1633 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1634 &last_executed_chain);
1635
1636 if (tc_skb_ext_tc_enabled()) {
1637
1638 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1639 struct tc_skb_cb *cb = tc_skb_cb(skb);
1640
1641 ext = tc_skb_ext_alloc(skb);
1642 if (WARN_ON_ONCE(!ext))
1643 return TC_ACT_SHOT;
1644 ext->chain = last_executed_chain;
1645 ext->mru = cb->mru;
1646 ext->post_ct = cb->post_ct;
1647 ext->post_ct_snat = cb->post_ct_snat;
1648 ext->post_ct_dnat = cb->post_ct_dnat;
1649 ext->zone = cb->zone;
1650 }
1651 }
1652
1653 return ret;
1654 #endif
1655 }
1656 EXPORT_SYMBOL(tcf_classify);
1657
1658 struct tcf_chain_info {
1659 struct tcf_proto __rcu **pprev;
1660 struct tcf_proto __rcu *next;
1661 };
1662
1663 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1664 struct tcf_chain_info *chain_info)
1665 {
1666 return tcf_chain_dereference(*chain_info->pprev, chain);
1667 }
1668
1669 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1670 struct tcf_chain_info *chain_info,
1671 struct tcf_proto *tp)
1672 {
1673 if (chain->flushing)
1674 return -EAGAIN;
1675
1676 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1677 if (*chain_info->pprev == chain->filter_chain)
1678 tcf_chain0_head_change(chain, tp);
1679 tcf_proto_get(tp);
1680 rcu_assign_pointer(*chain_info->pprev, tp);
1681
1682 return 0;
1683 }
1684
1685 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1686 struct tcf_chain_info *chain_info,
1687 struct tcf_proto *tp)
1688 {
1689 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1690
1691 tcf_proto_mark_delete(tp);
1692 if (tp == chain->filter_chain)
1693 tcf_chain0_head_change(chain, next);
1694 RCU_INIT_POINTER(*chain_info->pprev, next);
1695 }
1696
1697 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1698 struct tcf_chain_info *chain_info,
1699 u32 protocol, u32 prio,
1700 bool prio_allocate);
1701
1702
1703
1704
1705
1706
1707 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1708 struct tcf_proto *tp_new,
1709 u32 protocol, u32 prio,
1710 bool rtnl_held)
1711 {
1712 struct tcf_chain_info chain_info;
1713 struct tcf_proto *tp;
1714 int err = 0;
1715
1716 mutex_lock(&chain->filter_chain_lock);
1717
1718 if (tcf_proto_exists_destroying(chain, tp_new)) {
1719 mutex_unlock(&chain->filter_chain_lock);
1720 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1721 return ERR_PTR(-EAGAIN);
1722 }
1723
1724 tp = tcf_chain_tp_find(chain, &chain_info,
1725 protocol, prio, false);
1726 if (!tp)
1727 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1728 mutex_unlock(&chain->filter_chain_lock);
1729
1730 if (tp) {
1731 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1732 tp_new = tp;
1733 } else if (err) {
1734 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1735 tp_new = ERR_PTR(err);
1736 }
1737
1738 return tp_new;
1739 }
1740
1741 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1742 struct tcf_proto *tp, bool rtnl_held,
1743 struct netlink_ext_ack *extack)
1744 {
1745 struct tcf_chain_info chain_info;
1746 struct tcf_proto *tp_iter;
1747 struct tcf_proto **pprev;
1748 struct tcf_proto *next;
1749
1750 mutex_lock(&chain->filter_chain_lock);
1751
1752
1753 for (pprev = &chain->filter_chain;
1754 (tp_iter = tcf_chain_dereference(*pprev, chain));
1755 pprev = &tp_iter->next) {
1756 if (tp_iter == tp) {
1757 chain_info.pprev = pprev;
1758 chain_info.next = tp_iter->next;
1759 WARN_ON(tp_iter->deleting);
1760 break;
1761 }
1762 }
1763
1764
1765
1766
1767 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1768 mutex_unlock(&chain->filter_chain_lock);
1769 return;
1770 }
1771
1772 tcf_proto_signal_destroying(chain, tp);
1773 next = tcf_chain_dereference(chain_info.next, chain);
1774 if (tp == chain->filter_chain)
1775 tcf_chain0_head_change(chain, next);
1776 RCU_INIT_POINTER(*chain_info.pprev, next);
1777 mutex_unlock(&chain->filter_chain_lock);
1778
1779 tcf_proto_put(tp, rtnl_held, extack);
1780 }
1781
1782 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1783 struct tcf_chain_info *chain_info,
1784 u32 protocol, u32 prio,
1785 bool prio_allocate)
1786 {
1787 struct tcf_proto **pprev;
1788 struct tcf_proto *tp;
1789
1790
1791 for (pprev = &chain->filter_chain;
1792 (tp = tcf_chain_dereference(*pprev, chain));
1793 pprev = &tp->next) {
1794 if (tp->prio >= prio) {
1795 if (tp->prio == prio) {
1796 if (prio_allocate ||
1797 (tp->protocol != protocol && protocol))
1798 return ERR_PTR(-EINVAL);
1799 } else {
1800 tp = NULL;
1801 }
1802 break;
1803 }
1804 }
1805 chain_info->pprev = pprev;
1806 if (tp) {
1807 chain_info->next = tp->next;
1808 tcf_proto_get(tp);
1809 } else {
1810 chain_info->next = NULL;
1811 }
1812 return tp;
1813 }
1814
1815 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1816 struct tcf_proto *tp, struct tcf_block *block,
1817 struct Qdisc *q, u32 parent, void *fh,
1818 u32 portid, u32 seq, u16 flags, int event,
1819 bool terse_dump, bool rtnl_held)
1820 {
1821 struct tcmsg *tcm;
1822 struct nlmsghdr *nlh;
1823 unsigned char *b = skb_tail_pointer(skb);
1824
1825 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1826 if (!nlh)
1827 goto out_nlmsg_trim;
1828 tcm = nlmsg_data(nlh);
1829 tcm->tcm_family = AF_UNSPEC;
1830 tcm->tcm__pad1 = 0;
1831 tcm->tcm__pad2 = 0;
1832 if (q) {
1833 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1834 tcm->tcm_parent = parent;
1835 } else {
1836 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1837 tcm->tcm_block_index = block->index;
1838 }
1839 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1840 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1841 goto nla_put_failure;
1842 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1843 goto nla_put_failure;
1844 if (!fh) {
1845 tcm->tcm_handle = 0;
1846 } else if (terse_dump) {
1847 if (tp->ops->terse_dump) {
1848 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1849 rtnl_held) < 0)
1850 goto nla_put_failure;
1851 } else {
1852 goto cls_op_not_supp;
1853 }
1854 } else {
1855 if (tp->ops->dump &&
1856 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1857 goto nla_put_failure;
1858 }
1859 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1860 return skb->len;
1861
1862 out_nlmsg_trim:
1863 nla_put_failure:
1864 cls_op_not_supp:
1865 nlmsg_trim(skb, b);
1866 return -1;
1867 }
1868
1869 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1870 struct nlmsghdr *n, struct tcf_proto *tp,
1871 struct tcf_block *block, struct Qdisc *q,
1872 u32 parent, void *fh, int event, bool unicast,
1873 bool rtnl_held)
1874 {
1875 struct sk_buff *skb;
1876 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1877 int err = 0;
1878
1879 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1880 if (!skb)
1881 return -ENOBUFS;
1882
1883 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1884 n->nlmsg_seq, n->nlmsg_flags, event,
1885 false, rtnl_held) <= 0) {
1886 kfree_skb(skb);
1887 return -EINVAL;
1888 }
1889
1890 if (unicast)
1891 err = rtnl_unicast(skb, net, portid);
1892 else
1893 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1894 n->nlmsg_flags & NLM_F_ECHO);
1895 return err;
1896 }
1897
1898 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1899 struct nlmsghdr *n, struct tcf_proto *tp,
1900 struct tcf_block *block, struct Qdisc *q,
1901 u32 parent, void *fh, bool unicast, bool *last,
1902 bool rtnl_held, struct netlink_ext_ack *extack)
1903 {
1904 struct sk_buff *skb;
1905 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1906 int err;
1907
1908 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1909 if (!skb)
1910 return -ENOBUFS;
1911
1912 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1913 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1914 false, rtnl_held) <= 0) {
1915 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1916 kfree_skb(skb);
1917 return -EINVAL;
1918 }
1919
1920 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1921 if (err) {
1922 kfree_skb(skb);
1923 return err;
1924 }
1925
1926 if (unicast)
1927 err = rtnl_unicast(skb, net, portid);
1928 else
1929 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1930 n->nlmsg_flags & NLM_F_ECHO);
1931 if (err < 0)
1932 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1933
1934 return err;
1935 }
1936
1937 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1938 struct tcf_block *block, struct Qdisc *q,
1939 u32 parent, struct nlmsghdr *n,
1940 struct tcf_chain *chain, int event)
1941 {
1942 struct tcf_proto *tp;
1943
1944 for (tp = tcf_get_next_proto(chain, NULL);
1945 tp; tp = tcf_get_next_proto(chain, tp))
1946 tfilter_notify(net, oskb, n, tp, block,
1947 q, parent, NULL, event, false, true);
1948 }
1949
1950 static void tfilter_put(struct tcf_proto *tp, void *fh)
1951 {
1952 if (tp->ops->put && fh)
1953 tp->ops->put(tp, fh);
1954 }
1955
1956 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1957 struct netlink_ext_ack *extack)
1958 {
1959 struct net *net = sock_net(skb->sk);
1960 struct nlattr *tca[TCA_MAX + 1];
1961 char name[IFNAMSIZ];
1962 struct tcmsg *t;
1963 u32 protocol;
1964 u32 prio;
1965 bool prio_allocate;
1966 u32 parent;
1967 u32 chain_index;
1968 struct Qdisc *q;
1969 struct tcf_chain_info chain_info;
1970 struct tcf_chain *chain;
1971 struct tcf_block *block;
1972 struct tcf_proto *tp;
1973 unsigned long cl;
1974 void *fh;
1975 int err;
1976 int tp_created;
1977 bool rtnl_held = false;
1978 u32 flags;
1979
1980 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1981 return -EPERM;
1982
1983 replay:
1984 tp_created = 0;
1985
1986 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1987 rtm_tca_policy, extack);
1988 if (err < 0)
1989 return err;
1990
1991 t = nlmsg_data(n);
1992 protocol = TC_H_MIN(t->tcm_info);
1993 prio = TC_H_MAJ(t->tcm_info);
1994 prio_allocate = false;
1995 parent = t->tcm_parent;
1996 tp = NULL;
1997 cl = 0;
1998 block = NULL;
1999 q = NULL;
2000 chain = NULL;
2001 flags = 0;
2002
2003 if (prio == 0) {
2004
2005
2006
2007 if (n->nlmsg_flags & NLM_F_CREATE) {
2008 prio = TC_H_MAKE(0x80000000U, 0U);
2009 prio_allocate = true;
2010 } else {
2011 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2012 return -ENOENT;
2013 }
2014 }
2015
2016
2017
2018 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2019 if (err)
2020 return err;
2021
2022 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2023 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2024 err = -EINVAL;
2025 goto errout;
2026 }
2027
2028
2029
2030
2031
2032 if (rtnl_held ||
2033 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2034 !tcf_proto_is_unlocked(name)) {
2035 rtnl_held = true;
2036 rtnl_lock();
2037 }
2038
2039 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2040 if (err)
2041 goto errout;
2042
2043 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2044 extack);
2045 if (IS_ERR(block)) {
2046 err = PTR_ERR(block);
2047 goto errout;
2048 }
2049 block->classid = parent;
2050
2051 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2052 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2053 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2054 err = -EINVAL;
2055 goto errout;
2056 }
2057 chain = tcf_chain_get(block, chain_index, true);
2058 if (!chain) {
2059 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2060 err = -ENOMEM;
2061 goto errout;
2062 }
2063
2064 mutex_lock(&chain->filter_chain_lock);
2065 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2066 prio, prio_allocate);
2067 if (IS_ERR(tp)) {
2068 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2069 err = PTR_ERR(tp);
2070 goto errout_locked;
2071 }
2072
2073 if (tp == NULL) {
2074 struct tcf_proto *tp_new = NULL;
2075
2076 if (chain->flushing) {
2077 err = -EAGAIN;
2078 goto errout_locked;
2079 }
2080
2081
2082
2083 if (tca[TCA_KIND] == NULL || !protocol) {
2084 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2085 err = -EINVAL;
2086 goto errout_locked;
2087 }
2088
2089 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2090 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2091 err = -ENOENT;
2092 goto errout_locked;
2093 }
2094
2095 if (prio_allocate)
2096 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2097 &chain_info));
2098
2099 mutex_unlock(&chain->filter_chain_lock);
2100 tp_new = tcf_proto_create(name, protocol, prio, chain,
2101 rtnl_held, extack);
2102 if (IS_ERR(tp_new)) {
2103 err = PTR_ERR(tp_new);
2104 goto errout_tp;
2105 }
2106
2107 tp_created = 1;
2108 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2109 rtnl_held);
2110 if (IS_ERR(tp)) {
2111 err = PTR_ERR(tp);
2112 goto errout_tp;
2113 }
2114 } else {
2115 mutex_unlock(&chain->filter_chain_lock);
2116 }
2117
2118 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2119 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2120 err = -EINVAL;
2121 goto errout;
2122 }
2123
2124 fh = tp->ops->get(tp, t->tcm_handle);
2125
2126 if (!fh) {
2127 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2128 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2129 err = -ENOENT;
2130 goto errout;
2131 }
2132 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2133 tfilter_put(tp, fh);
2134 NL_SET_ERR_MSG(extack, "Filter already exists");
2135 err = -EEXIST;
2136 goto errout;
2137 }
2138
2139 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2140 tfilter_put(tp, fh);
2141 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2142 err = -EINVAL;
2143 goto errout;
2144 }
2145
2146 if (!(n->nlmsg_flags & NLM_F_CREATE))
2147 flags |= TCA_ACT_FLAGS_REPLACE;
2148 if (!rtnl_held)
2149 flags |= TCA_ACT_FLAGS_NO_RTNL;
2150 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2151 flags, extack);
2152 if (err == 0) {
2153 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2154 RTM_NEWTFILTER, false, rtnl_held);
2155 tfilter_put(tp, fh);
2156
2157 if (q)
2158 q->flags &= ~TCQ_F_CAN_BYPASS;
2159 }
2160
2161 errout:
2162 if (err && tp_created)
2163 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2164 errout_tp:
2165 if (chain) {
2166 if (tp && !IS_ERR(tp))
2167 tcf_proto_put(tp, rtnl_held, NULL);
2168 if (!tp_created)
2169 tcf_chain_put(chain);
2170 }
2171 tcf_block_release(q, block, rtnl_held);
2172
2173 if (rtnl_held)
2174 rtnl_unlock();
2175
2176 if (err == -EAGAIN) {
2177
2178
2179
2180 rtnl_held = true;
2181
2182 goto replay;
2183 }
2184 return err;
2185
2186 errout_locked:
2187 mutex_unlock(&chain->filter_chain_lock);
2188 goto errout;
2189 }
2190
2191 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2192 struct netlink_ext_ack *extack)
2193 {
2194 struct net *net = sock_net(skb->sk);
2195 struct nlattr *tca[TCA_MAX + 1];
2196 char name[IFNAMSIZ];
2197 struct tcmsg *t;
2198 u32 protocol;
2199 u32 prio;
2200 u32 parent;
2201 u32 chain_index;
2202 struct Qdisc *q = NULL;
2203 struct tcf_chain_info chain_info;
2204 struct tcf_chain *chain = NULL;
2205 struct tcf_block *block = NULL;
2206 struct tcf_proto *tp = NULL;
2207 unsigned long cl = 0;
2208 void *fh = NULL;
2209 int err;
2210 bool rtnl_held = false;
2211
2212 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2213 return -EPERM;
2214
2215 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2216 rtm_tca_policy, extack);
2217 if (err < 0)
2218 return err;
2219
2220 t = nlmsg_data(n);
2221 protocol = TC_H_MIN(t->tcm_info);
2222 prio = TC_H_MAJ(t->tcm_info);
2223 parent = t->tcm_parent;
2224
2225 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2226 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2227 return -ENOENT;
2228 }
2229
2230
2231
2232 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2233 if (err)
2234 return err;
2235
2236 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2237 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2238 err = -EINVAL;
2239 goto errout;
2240 }
2241
2242
2243
2244
2245 if (!prio ||
2246 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2247 !tcf_proto_is_unlocked(name)) {
2248 rtnl_held = true;
2249 rtnl_lock();
2250 }
2251
2252 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2253 if (err)
2254 goto errout;
2255
2256 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2257 extack);
2258 if (IS_ERR(block)) {
2259 err = PTR_ERR(block);
2260 goto errout;
2261 }
2262
2263 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2264 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2265 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2266 err = -EINVAL;
2267 goto errout;
2268 }
2269 chain = tcf_chain_get(block, chain_index, false);
2270 if (!chain) {
2271
2272
2273
2274 if (prio == 0) {
2275 err = 0;
2276 goto errout;
2277 }
2278 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2279 err = -ENOENT;
2280 goto errout;
2281 }
2282
2283 if (prio == 0) {
2284 tfilter_notify_chain(net, skb, block, q, parent, n,
2285 chain, RTM_DELTFILTER);
2286 tcf_chain_flush(chain, rtnl_held);
2287 err = 0;
2288 goto errout;
2289 }
2290
2291 mutex_lock(&chain->filter_chain_lock);
2292 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2293 prio, false);
2294 if (!tp || IS_ERR(tp)) {
2295 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2296 err = tp ? PTR_ERR(tp) : -ENOENT;
2297 goto errout_locked;
2298 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2299 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2300 err = -EINVAL;
2301 goto errout_locked;
2302 } else if (t->tcm_handle == 0) {
2303 tcf_proto_signal_destroying(chain, tp);
2304 tcf_chain_tp_remove(chain, &chain_info, tp);
2305 mutex_unlock(&chain->filter_chain_lock);
2306
2307 tcf_proto_put(tp, rtnl_held, NULL);
2308 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2309 RTM_DELTFILTER, false, rtnl_held);
2310 err = 0;
2311 goto errout;
2312 }
2313 mutex_unlock(&chain->filter_chain_lock);
2314
2315 fh = tp->ops->get(tp, t->tcm_handle);
2316
2317 if (!fh) {
2318 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2319 err = -ENOENT;
2320 } else {
2321 bool last;
2322
2323 err = tfilter_del_notify(net, skb, n, tp, block,
2324 q, parent, fh, false, &last,
2325 rtnl_held, extack);
2326
2327 if (err)
2328 goto errout;
2329 if (last)
2330 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2331 }
2332
2333 errout:
2334 if (chain) {
2335 if (tp && !IS_ERR(tp))
2336 tcf_proto_put(tp, rtnl_held, NULL);
2337 tcf_chain_put(chain);
2338 }
2339 tcf_block_release(q, block, rtnl_held);
2340
2341 if (rtnl_held)
2342 rtnl_unlock();
2343
2344 return err;
2345
2346 errout_locked:
2347 mutex_unlock(&chain->filter_chain_lock);
2348 goto errout;
2349 }
2350
2351 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2352 struct netlink_ext_ack *extack)
2353 {
2354 struct net *net = sock_net(skb->sk);
2355 struct nlattr *tca[TCA_MAX + 1];
2356 char name[IFNAMSIZ];
2357 struct tcmsg *t;
2358 u32 protocol;
2359 u32 prio;
2360 u32 parent;
2361 u32 chain_index;
2362 struct Qdisc *q = NULL;
2363 struct tcf_chain_info chain_info;
2364 struct tcf_chain *chain = NULL;
2365 struct tcf_block *block = NULL;
2366 struct tcf_proto *tp = NULL;
2367 unsigned long cl = 0;
2368 void *fh = NULL;
2369 int err;
2370 bool rtnl_held = false;
2371
2372 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2373 rtm_tca_policy, extack);
2374 if (err < 0)
2375 return err;
2376
2377 t = nlmsg_data(n);
2378 protocol = TC_H_MIN(t->tcm_info);
2379 prio = TC_H_MAJ(t->tcm_info);
2380 parent = t->tcm_parent;
2381
2382 if (prio == 0) {
2383 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2384 return -ENOENT;
2385 }
2386
2387
2388
2389 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2390 if (err)
2391 return err;
2392
2393 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2394 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2395 err = -EINVAL;
2396 goto errout;
2397 }
2398
2399
2400
2401
2402 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2403 !tcf_proto_is_unlocked(name)) {
2404 rtnl_held = true;
2405 rtnl_lock();
2406 }
2407
2408 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2409 if (err)
2410 goto errout;
2411
2412 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2413 extack);
2414 if (IS_ERR(block)) {
2415 err = PTR_ERR(block);
2416 goto errout;
2417 }
2418
2419 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2420 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2421 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2422 err = -EINVAL;
2423 goto errout;
2424 }
2425 chain = tcf_chain_get(block, chain_index, false);
2426 if (!chain) {
2427 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2428 err = -EINVAL;
2429 goto errout;
2430 }
2431
2432 mutex_lock(&chain->filter_chain_lock);
2433 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2434 prio, false);
2435 mutex_unlock(&chain->filter_chain_lock);
2436 if (!tp || IS_ERR(tp)) {
2437 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2438 err = tp ? PTR_ERR(tp) : -ENOENT;
2439 goto errout;
2440 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2441 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2442 err = -EINVAL;
2443 goto errout;
2444 }
2445
2446 fh = tp->ops->get(tp, t->tcm_handle);
2447
2448 if (!fh) {
2449 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2450 err = -ENOENT;
2451 } else {
2452 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2453 fh, RTM_NEWTFILTER, true, rtnl_held);
2454 if (err < 0)
2455 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2456 }
2457
2458 tfilter_put(tp, fh);
2459 errout:
2460 if (chain) {
2461 if (tp && !IS_ERR(tp))
2462 tcf_proto_put(tp, rtnl_held, NULL);
2463 tcf_chain_put(chain);
2464 }
2465 tcf_block_release(q, block, rtnl_held);
2466
2467 if (rtnl_held)
2468 rtnl_unlock();
2469
2470 return err;
2471 }
2472
2473 struct tcf_dump_args {
2474 struct tcf_walker w;
2475 struct sk_buff *skb;
2476 struct netlink_callback *cb;
2477 struct tcf_block *block;
2478 struct Qdisc *q;
2479 u32 parent;
2480 bool terse_dump;
2481 };
2482
2483 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2484 {
2485 struct tcf_dump_args *a = (void *)arg;
2486 struct net *net = sock_net(a->skb->sk);
2487
2488 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2489 n, NETLINK_CB(a->cb->skb).portid,
2490 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2491 RTM_NEWTFILTER, a->terse_dump, true);
2492 }
2493
2494 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2495 struct sk_buff *skb, struct netlink_callback *cb,
2496 long index_start, long *p_index, bool terse)
2497 {
2498 struct net *net = sock_net(skb->sk);
2499 struct tcf_block *block = chain->block;
2500 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2501 struct tcf_proto *tp, *tp_prev;
2502 struct tcf_dump_args arg;
2503
2504 for (tp = __tcf_get_next_proto(chain, NULL);
2505 tp;
2506 tp_prev = tp,
2507 tp = __tcf_get_next_proto(chain, tp),
2508 tcf_proto_put(tp_prev, true, NULL),
2509 (*p_index)++) {
2510 if (*p_index < index_start)
2511 continue;
2512 if (TC_H_MAJ(tcm->tcm_info) &&
2513 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2514 continue;
2515 if (TC_H_MIN(tcm->tcm_info) &&
2516 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2517 continue;
2518 if (*p_index > index_start)
2519 memset(&cb->args[1], 0,
2520 sizeof(cb->args) - sizeof(cb->args[0]));
2521 if (cb->args[1] == 0) {
2522 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2523 NETLINK_CB(cb->skb).portid,
2524 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2525 RTM_NEWTFILTER, false, true) <= 0)
2526 goto errout;
2527 cb->args[1] = 1;
2528 }
2529 if (!tp->ops->walk)
2530 continue;
2531 arg.w.fn = tcf_node_dump;
2532 arg.skb = skb;
2533 arg.cb = cb;
2534 arg.block = block;
2535 arg.q = q;
2536 arg.parent = parent;
2537 arg.w.stop = 0;
2538 arg.w.skip = cb->args[1] - 1;
2539 arg.w.count = 0;
2540 arg.w.cookie = cb->args[2];
2541 arg.terse_dump = terse;
2542 tp->ops->walk(tp, &arg.w, true);
2543 cb->args[2] = arg.w.cookie;
2544 cb->args[1] = arg.w.count + 1;
2545 if (arg.w.stop)
2546 goto errout;
2547 }
2548 return true;
2549
2550 errout:
2551 tcf_proto_put(tp, true, NULL);
2552 return false;
2553 }
2554
2555 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2556 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2557 };
2558
2559
2560 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2561 {
2562 struct tcf_chain *chain, *chain_prev;
2563 struct net *net = sock_net(skb->sk);
2564 struct nlattr *tca[TCA_MAX + 1];
2565 struct Qdisc *q = NULL;
2566 struct tcf_block *block;
2567 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2568 bool terse_dump = false;
2569 long index_start;
2570 long index;
2571 u32 parent;
2572 int err;
2573
2574 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2575 return skb->len;
2576
2577 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2578 tcf_tfilter_dump_policy, cb->extack);
2579 if (err)
2580 return err;
2581
2582 if (tca[TCA_DUMP_FLAGS]) {
2583 struct nla_bitfield32 flags =
2584 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2585
2586 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2587 }
2588
2589 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2590 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2591 if (!block)
2592 goto out;
2593
2594
2595
2596
2597
2598
2599 parent = 0;
2600 } else {
2601 const struct Qdisc_class_ops *cops;
2602 struct net_device *dev;
2603 unsigned long cl = 0;
2604
2605 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2606 if (!dev)
2607 return skb->len;
2608
2609 parent = tcm->tcm_parent;
2610 if (!parent)
2611 q = rtnl_dereference(dev->qdisc);
2612 else
2613 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2614 if (!q)
2615 goto out;
2616 cops = q->ops->cl_ops;
2617 if (!cops)
2618 goto out;
2619 if (!cops->tcf_block)
2620 goto out;
2621 if (TC_H_MIN(tcm->tcm_parent)) {
2622 cl = cops->find(q, tcm->tcm_parent);
2623 if (cl == 0)
2624 goto out;
2625 }
2626 block = cops->tcf_block(q, cl, NULL);
2627 if (!block)
2628 goto out;
2629 parent = block->classid;
2630 if (tcf_block_shared(block))
2631 q = NULL;
2632 }
2633
2634 index_start = cb->args[0];
2635 index = 0;
2636
2637 for (chain = __tcf_get_next_chain(block, NULL);
2638 chain;
2639 chain_prev = chain,
2640 chain = __tcf_get_next_chain(block, chain),
2641 tcf_chain_put(chain_prev)) {
2642 if (tca[TCA_CHAIN] &&
2643 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2644 continue;
2645 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2646 index_start, &index, terse_dump)) {
2647 tcf_chain_put(chain);
2648 err = -EMSGSIZE;
2649 break;
2650 }
2651 }
2652
2653 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2654 tcf_block_refcnt_put(block, true);
2655 cb->args[0] = index;
2656
2657 out:
2658
2659 if (skb->len == 0 && err)
2660 return err;
2661 return skb->len;
2662 }
2663
2664 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2665 void *tmplt_priv, u32 chain_index,
2666 struct net *net, struct sk_buff *skb,
2667 struct tcf_block *block,
2668 u32 portid, u32 seq, u16 flags, int event)
2669 {
2670 unsigned char *b = skb_tail_pointer(skb);
2671 const struct tcf_proto_ops *ops;
2672 struct nlmsghdr *nlh;
2673 struct tcmsg *tcm;
2674 void *priv;
2675
2676 ops = tmplt_ops;
2677 priv = tmplt_priv;
2678
2679 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2680 if (!nlh)
2681 goto out_nlmsg_trim;
2682 tcm = nlmsg_data(nlh);
2683 tcm->tcm_family = AF_UNSPEC;
2684 tcm->tcm__pad1 = 0;
2685 tcm->tcm__pad2 = 0;
2686 tcm->tcm_handle = 0;
2687 if (block->q) {
2688 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2689 tcm->tcm_parent = block->q->handle;
2690 } else {
2691 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2692 tcm->tcm_block_index = block->index;
2693 }
2694
2695 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2696 goto nla_put_failure;
2697
2698 if (ops) {
2699 if (nla_put_string(skb, TCA_KIND, ops->kind))
2700 goto nla_put_failure;
2701 if (ops->tmplt_dump(skb, net, priv) < 0)
2702 goto nla_put_failure;
2703 }
2704
2705 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2706 return skb->len;
2707
2708 out_nlmsg_trim:
2709 nla_put_failure:
2710 nlmsg_trim(skb, b);
2711 return -EMSGSIZE;
2712 }
2713
2714 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2715 u32 seq, u16 flags, int event, bool unicast)
2716 {
2717 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2718 struct tcf_block *block = chain->block;
2719 struct net *net = block->net;
2720 struct sk_buff *skb;
2721 int err = 0;
2722
2723 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2724 if (!skb)
2725 return -ENOBUFS;
2726
2727 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2728 chain->index, net, skb, block, portid,
2729 seq, flags, event) <= 0) {
2730 kfree_skb(skb);
2731 return -EINVAL;
2732 }
2733
2734 if (unicast)
2735 err = rtnl_unicast(skb, net, portid);
2736 else
2737 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2738 flags & NLM_F_ECHO);
2739
2740 return err;
2741 }
2742
2743 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2744 void *tmplt_priv, u32 chain_index,
2745 struct tcf_block *block, struct sk_buff *oskb,
2746 u32 seq, u16 flags, bool unicast)
2747 {
2748 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2749 struct net *net = block->net;
2750 struct sk_buff *skb;
2751
2752 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2753 if (!skb)
2754 return -ENOBUFS;
2755
2756 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2757 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2758 kfree_skb(skb);
2759 return -EINVAL;
2760 }
2761
2762 if (unicast)
2763 return rtnl_unicast(skb, net, portid);
2764
2765 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2766 }
2767
2768 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2769 struct nlattr **tca,
2770 struct netlink_ext_ack *extack)
2771 {
2772 const struct tcf_proto_ops *ops;
2773 char name[IFNAMSIZ];
2774 void *tmplt_priv;
2775
2776
2777 if (!tca[TCA_KIND])
2778 return 0;
2779
2780 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2781 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2782 return -EINVAL;
2783 }
2784
2785 ops = tcf_proto_lookup_ops(name, true, extack);
2786 if (IS_ERR(ops))
2787 return PTR_ERR(ops);
2788 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2789 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2790 return -EOPNOTSUPP;
2791 }
2792
2793 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2794 if (IS_ERR(tmplt_priv)) {
2795 module_put(ops->owner);
2796 return PTR_ERR(tmplt_priv);
2797 }
2798 chain->tmplt_ops = ops;
2799 chain->tmplt_priv = tmplt_priv;
2800 return 0;
2801 }
2802
2803 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2804 void *tmplt_priv)
2805 {
2806
2807 if (!tmplt_ops)
2808 return;
2809
2810 tmplt_ops->tmplt_destroy(tmplt_priv);
2811 module_put(tmplt_ops->owner);
2812 }
2813
2814
2815
2816 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2817 struct netlink_ext_ack *extack)
2818 {
2819 struct net *net = sock_net(skb->sk);
2820 struct nlattr *tca[TCA_MAX + 1];
2821 struct tcmsg *t;
2822 u32 parent;
2823 u32 chain_index;
2824 struct Qdisc *q;
2825 struct tcf_chain *chain;
2826 struct tcf_block *block;
2827 unsigned long cl;
2828 int err;
2829
2830 if (n->nlmsg_type != RTM_GETCHAIN &&
2831 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2832 return -EPERM;
2833
2834 replay:
2835 q = NULL;
2836 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2837 rtm_tca_policy, extack);
2838 if (err < 0)
2839 return err;
2840
2841 t = nlmsg_data(n);
2842 parent = t->tcm_parent;
2843 cl = 0;
2844
2845 block = tcf_block_find(net, &q, &parent, &cl,
2846 t->tcm_ifindex, t->tcm_block_index, extack);
2847 if (IS_ERR(block))
2848 return PTR_ERR(block);
2849
2850 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2851 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2852 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2853 err = -EINVAL;
2854 goto errout_block;
2855 }
2856
2857 mutex_lock(&block->lock);
2858 chain = tcf_chain_lookup(block, chain_index);
2859 if (n->nlmsg_type == RTM_NEWCHAIN) {
2860 if (chain) {
2861 if (tcf_chain_held_by_acts_only(chain)) {
2862
2863
2864
2865 tcf_chain_hold(chain);
2866 } else {
2867 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2868 err = -EEXIST;
2869 goto errout_block_locked;
2870 }
2871 } else {
2872 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2873 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2874 err = -ENOENT;
2875 goto errout_block_locked;
2876 }
2877 chain = tcf_chain_create(block, chain_index);
2878 if (!chain) {
2879 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2880 err = -ENOMEM;
2881 goto errout_block_locked;
2882 }
2883 }
2884 } else {
2885 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2886 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2887 err = -EINVAL;
2888 goto errout_block_locked;
2889 }
2890 tcf_chain_hold(chain);
2891 }
2892
2893 if (n->nlmsg_type == RTM_NEWCHAIN) {
2894
2895
2896
2897
2898
2899 tcf_chain_hold(chain);
2900 chain->explicitly_created = true;
2901 }
2902 mutex_unlock(&block->lock);
2903
2904 switch (n->nlmsg_type) {
2905 case RTM_NEWCHAIN:
2906 err = tc_chain_tmplt_add(chain, net, tca, extack);
2907 if (err) {
2908 tcf_chain_put_explicitly_created(chain);
2909 goto errout;
2910 }
2911
2912 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2913 RTM_NEWCHAIN, false);
2914 break;
2915 case RTM_DELCHAIN:
2916 tfilter_notify_chain(net, skb, block, q, parent, n,
2917 chain, RTM_DELTFILTER);
2918
2919 tcf_chain_flush(chain, true);
2920
2921
2922
2923 tcf_chain_put_explicitly_created(chain);
2924 break;
2925 case RTM_GETCHAIN:
2926 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2927 n->nlmsg_flags, n->nlmsg_type, true);
2928 if (err < 0)
2929 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2930 break;
2931 default:
2932 err = -EOPNOTSUPP;
2933 NL_SET_ERR_MSG(extack, "Unsupported message type");
2934 goto errout;
2935 }
2936
2937 errout:
2938 tcf_chain_put(chain);
2939 errout_block:
2940 tcf_block_release(q, block, true);
2941 if (err == -EAGAIN)
2942
2943 goto replay;
2944 return err;
2945
2946 errout_block_locked:
2947 mutex_unlock(&block->lock);
2948 goto errout_block;
2949 }
2950
2951
2952 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2953 {
2954 struct net *net = sock_net(skb->sk);
2955 struct nlattr *tca[TCA_MAX + 1];
2956 struct Qdisc *q = NULL;
2957 struct tcf_block *block;
2958 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2959 struct tcf_chain *chain;
2960 long index_start;
2961 long index;
2962 int err;
2963
2964 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2965 return skb->len;
2966
2967 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2968 rtm_tca_policy, cb->extack);
2969 if (err)
2970 return err;
2971
2972 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2973 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2974 if (!block)
2975 goto out;
2976 } else {
2977 const struct Qdisc_class_ops *cops;
2978 struct net_device *dev;
2979 unsigned long cl = 0;
2980
2981 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2982 if (!dev)
2983 return skb->len;
2984
2985 if (!tcm->tcm_parent)
2986 q = rtnl_dereference(dev->qdisc);
2987 else
2988 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2989
2990 if (!q)
2991 goto out;
2992 cops = q->ops->cl_ops;
2993 if (!cops)
2994 goto out;
2995 if (!cops->tcf_block)
2996 goto out;
2997 if (TC_H_MIN(tcm->tcm_parent)) {
2998 cl = cops->find(q, tcm->tcm_parent);
2999 if (cl == 0)
3000 goto out;
3001 }
3002 block = cops->tcf_block(q, cl, NULL);
3003 if (!block)
3004 goto out;
3005 if (tcf_block_shared(block))
3006 q = NULL;
3007 }
3008
3009 index_start = cb->args[0];
3010 index = 0;
3011
3012 mutex_lock(&block->lock);
3013 list_for_each_entry(chain, &block->chain_list, list) {
3014 if ((tca[TCA_CHAIN] &&
3015 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3016 continue;
3017 if (index < index_start) {
3018 index++;
3019 continue;
3020 }
3021 if (tcf_chain_held_by_acts_only(chain))
3022 continue;
3023 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3024 chain->index, net, skb, block,
3025 NETLINK_CB(cb->skb).portid,
3026 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3027 RTM_NEWCHAIN);
3028 if (err <= 0)
3029 break;
3030 index++;
3031 }
3032 mutex_unlock(&block->lock);
3033
3034 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3035 tcf_block_refcnt_put(block, true);
3036 cb->args[0] = index;
3037
3038 out:
3039
3040 if (skb->len == 0 && err)
3041 return err;
3042 return skb->len;
3043 }
3044
3045 void tcf_exts_destroy(struct tcf_exts *exts)
3046 {
3047 #ifdef CONFIG_NET_CLS_ACT
3048 if (exts->actions) {
3049 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3050 kfree(exts->actions);
3051 }
3052 exts->nr_actions = 0;
3053 #endif
3054 }
3055 EXPORT_SYMBOL(tcf_exts_destroy);
3056
3057 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3058 struct nlattr *rate_tlv, struct tcf_exts *exts,
3059 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3060 {
3061 #ifdef CONFIG_NET_CLS_ACT
3062 {
3063 int init_res[TCA_ACT_MAX_PRIO] = {};
3064 struct tc_action *act;
3065 size_t attr_size = 0;
3066
3067 if (exts->police && tb[exts->police]) {
3068 struct tc_action_ops *a_o;
3069
3070 a_o = tc_action_load_ops(tb[exts->police], true,
3071 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3072 extack);
3073 if (IS_ERR(a_o))
3074 return PTR_ERR(a_o);
3075 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3076 act = tcf_action_init_1(net, tp, tb[exts->police],
3077 rate_tlv, a_o, init_res, flags,
3078 extack);
3079 module_put(a_o->owner);
3080 if (IS_ERR(act))
3081 return PTR_ERR(act);
3082
3083 act->type = exts->type = TCA_OLD_COMPAT;
3084 exts->actions[0] = act;
3085 exts->nr_actions = 1;
3086 tcf_idr_insert_many(exts->actions);
3087 } else if (exts->action && tb[exts->action]) {
3088 int err;
3089
3090 flags |= TCA_ACT_FLAGS_BIND;
3091 err = tcf_action_init(net, tp, tb[exts->action],
3092 rate_tlv, exts->actions, init_res,
3093 &attr_size, flags, fl_flags,
3094 extack);
3095 if (err < 0)
3096 return err;
3097 exts->nr_actions = err;
3098 }
3099 }
3100 #else
3101 if ((exts->action && tb[exts->action]) ||
3102 (exts->police && tb[exts->police])) {
3103 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3104 return -EOPNOTSUPP;
3105 }
3106 #endif
3107
3108 return 0;
3109 }
3110 EXPORT_SYMBOL(tcf_exts_validate_ex);
3111
3112 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3113 struct nlattr *rate_tlv, struct tcf_exts *exts,
3114 u32 flags, struct netlink_ext_ack *extack)
3115 {
3116 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3117 flags, 0, extack);
3118 }
3119 EXPORT_SYMBOL(tcf_exts_validate);
3120
3121 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3122 {
3123 #ifdef CONFIG_NET_CLS_ACT
3124 struct tcf_exts old = *dst;
3125
3126 *dst = *src;
3127 tcf_exts_destroy(&old);
3128 #endif
3129 }
3130 EXPORT_SYMBOL(tcf_exts_change);
3131
3132 #ifdef CONFIG_NET_CLS_ACT
3133 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3134 {
3135 if (exts->nr_actions == 0)
3136 return NULL;
3137 else
3138 return exts->actions[0];
3139 }
3140 #endif
3141
3142 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3143 {
3144 #ifdef CONFIG_NET_CLS_ACT
3145 struct nlattr *nest;
3146
3147 if (exts->action && tcf_exts_has_actions(exts)) {
3148
3149
3150
3151
3152
3153 if (exts->type != TCA_OLD_COMPAT) {
3154 nest = nla_nest_start_noflag(skb, exts->action);
3155 if (nest == NULL)
3156 goto nla_put_failure;
3157
3158 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3159 < 0)
3160 goto nla_put_failure;
3161 nla_nest_end(skb, nest);
3162 } else if (exts->police) {
3163 struct tc_action *act = tcf_exts_first_act(exts);
3164 nest = nla_nest_start_noflag(skb, exts->police);
3165 if (nest == NULL || !act)
3166 goto nla_put_failure;
3167 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3168 goto nla_put_failure;
3169 nla_nest_end(skb, nest);
3170 }
3171 }
3172 return 0;
3173
3174 nla_put_failure:
3175 nla_nest_cancel(skb, nest);
3176 return -1;
3177 #else
3178 return 0;
3179 #endif
3180 }
3181 EXPORT_SYMBOL(tcf_exts_dump);
3182
3183 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3184 {
3185 #ifdef CONFIG_NET_CLS_ACT
3186 struct nlattr *nest;
3187
3188 if (!exts->action || !tcf_exts_has_actions(exts))
3189 return 0;
3190
3191 nest = nla_nest_start_noflag(skb, exts->action);
3192 if (!nest)
3193 goto nla_put_failure;
3194
3195 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3196 goto nla_put_failure;
3197 nla_nest_end(skb, nest);
3198 return 0;
3199
3200 nla_put_failure:
3201 nla_nest_cancel(skb, nest);
3202 return -1;
3203 #else
3204 return 0;
3205 #endif
3206 }
3207 EXPORT_SYMBOL(tcf_exts_terse_dump);
3208
3209 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3210 {
3211 #ifdef CONFIG_NET_CLS_ACT
3212 struct tc_action *a = tcf_exts_first_act(exts);
3213 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3214 return -1;
3215 #endif
3216 return 0;
3217 }
3218 EXPORT_SYMBOL(tcf_exts_dump_stats);
3219
3220 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3221 {
3222 if (*flags & TCA_CLS_FLAGS_IN_HW)
3223 return;
3224 *flags |= TCA_CLS_FLAGS_IN_HW;
3225 atomic_inc(&block->offloadcnt);
3226 }
3227
3228 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3229 {
3230 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3231 return;
3232 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3233 atomic_dec(&block->offloadcnt);
3234 }
3235
3236 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3237 struct tcf_proto *tp, u32 *cnt,
3238 u32 *flags, u32 diff, bool add)
3239 {
3240 lockdep_assert_held(&block->cb_lock);
3241
3242 spin_lock(&tp->lock);
3243 if (add) {
3244 if (!*cnt)
3245 tcf_block_offload_inc(block, flags);
3246 *cnt += diff;
3247 } else {
3248 *cnt -= diff;
3249 if (!*cnt)
3250 tcf_block_offload_dec(block, flags);
3251 }
3252 spin_unlock(&tp->lock);
3253 }
3254
3255 static void
3256 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3257 u32 *cnt, u32 *flags)
3258 {
3259 lockdep_assert_held(&block->cb_lock);
3260
3261 spin_lock(&tp->lock);
3262 tcf_block_offload_dec(block, flags);
3263 *cnt = 0;
3264 spin_unlock(&tp->lock);
3265 }
3266
3267 static int
3268 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3269 void *type_data, bool err_stop)
3270 {
3271 struct flow_block_cb *block_cb;
3272 int ok_count = 0;
3273 int err;
3274
3275 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3276 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3277 if (err) {
3278 if (err_stop)
3279 return err;
3280 } else {
3281 ok_count++;
3282 }
3283 }
3284 return ok_count;
3285 }
3286
3287 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3288 void *type_data, bool err_stop, bool rtnl_held)
3289 {
3290 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3291 int ok_count;
3292
3293 retry:
3294 if (take_rtnl)
3295 rtnl_lock();
3296 down_read(&block->cb_lock);
3297
3298
3299
3300
3301 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3302 up_read(&block->cb_lock);
3303 take_rtnl = true;
3304 goto retry;
3305 }
3306
3307 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3308
3309 up_read(&block->cb_lock);
3310 if (take_rtnl)
3311 rtnl_unlock();
3312 return ok_count;
3313 }
3314 EXPORT_SYMBOL(tc_setup_cb_call);
3315
3316
3317
3318
3319
3320
3321
3322 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3323 enum tc_setup_type type, void *type_data, bool err_stop,
3324 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3325 {
3326 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3327 int ok_count;
3328
3329 retry:
3330 if (take_rtnl)
3331 rtnl_lock();
3332 down_read(&block->cb_lock);
3333
3334
3335
3336
3337 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3338 up_read(&block->cb_lock);
3339 take_rtnl = true;
3340 goto retry;
3341 }
3342
3343
3344 if (block->nooffloaddevcnt && err_stop) {
3345 ok_count = -EOPNOTSUPP;
3346 goto err_unlock;
3347 }
3348
3349 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3350 if (ok_count < 0)
3351 goto err_unlock;
3352
3353 if (tp->ops->hw_add)
3354 tp->ops->hw_add(tp, type_data);
3355 if (ok_count > 0)
3356 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3357 ok_count, true);
3358 err_unlock:
3359 up_read(&block->cb_lock);
3360 if (take_rtnl)
3361 rtnl_unlock();
3362 return min(ok_count, 0);
3363 }
3364 EXPORT_SYMBOL(tc_setup_cb_add);
3365
3366
3367
3368
3369
3370
3371
3372 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3373 enum tc_setup_type type, void *type_data, bool err_stop,
3374 u32 *old_flags, unsigned int *old_in_hw_count,
3375 u32 *new_flags, unsigned int *new_in_hw_count,
3376 bool rtnl_held)
3377 {
3378 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3379 int ok_count;
3380
3381 retry:
3382 if (take_rtnl)
3383 rtnl_lock();
3384 down_read(&block->cb_lock);
3385
3386
3387
3388
3389 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3390 up_read(&block->cb_lock);
3391 take_rtnl = true;
3392 goto retry;
3393 }
3394
3395
3396 if (block->nooffloaddevcnt && err_stop) {
3397 ok_count = -EOPNOTSUPP;
3398 goto err_unlock;
3399 }
3400
3401 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3402 if (tp->ops->hw_del)
3403 tp->ops->hw_del(tp, type_data);
3404
3405 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3406 if (ok_count < 0)
3407 goto err_unlock;
3408
3409 if (tp->ops->hw_add)
3410 tp->ops->hw_add(tp, type_data);
3411 if (ok_count > 0)
3412 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3413 new_flags, ok_count, true);
3414 err_unlock:
3415 up_read(&block->cb_lock);
3416 if (take_rtnl)
3417 rtnl_unlock();
3418 return min(ok_count, 0);
3419 }
3420 EXPORT_SYMBOL(tc_setup_cb_replace);
3421
3422
3423
3424
3425
3426 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3427 enum tc_setup_type type, void *type_data, bool err_stop,
3428 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3429 {
3430 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3431 int ok_count;
3432
3433 retry:
3434 if (take_rtnl)
3435 rtnl_lock();
3436 down_read(&block->cb_lock);
3437
3438
3439
3440
3441 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3442 up_read(&block->cb_lock);
3443 take_rtnl = true;
3444 goto retry;
3445 }
3446
3447 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3448
3449 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3450 if (tp->ops->hw_del)
3451 tp->ops->hw_del(tp, type_data);
3452
3453 up_read(&block->cb_lock);
3454 if (take_rtnl)
3455 rtnl_unlock();
3456 return min(ok_count, 0);
3457 }
3458 EXPORT_SYMBOL(tc_setup_cb_destroy);
3459
3460 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3461 bool add, flow_setup_cb_t *cb,
3462 enum tc_setup_type type, void *type_data,
3463 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3464 {
3465 int err = cb(type, type_data, cb_priv);
3466
3467 if (err) {
3468 if (add && tc_skip_sw(*flags))
3469 return err;
3470 } else {
3471 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3472 add);
3473 }
3474
3475 return 0;
3476 }
3477 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3478
3479 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3480 const struct tc_action *act)
3481 {
3482 struct tc_cookie *cookie;
3483 int err = 0;
3484
3485 rcu_read_lock();
3486 cookie = rcu_dereference(act->act_cookie);
3487 if (cookie) {
3488 entry->cookie = flow_action_cookie_create(cookie->data,
3489 cookie->len,
3490 GFP_ATOMIC);
3491 if (!entry->cookie)
3492 err = -ENOMEM;
3493 }
3494 rcu_read_unlock();
3495 return err;
3496 }
3497
3498 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3499 {
3500 flow_action_cookie_destroy(entry->cookie);
3501 }
3502
3503 void tc_cleanup_offload_action(struct flow_action *flow_action)
3504 {
3505 struct flow_action_entry *entry;
3506 int i;
3507
3508 flow_action_for_each(i, entry, flow_action) {
3509 tcf_act_put_cookie(entry);
3510 if (entry->destructor)
3511 entry->destructor(entry->destructor_priv);
3512 }
3513 }
3514 EXPORT_SYMBOL(tc_cleanup_offload_action);
3515
3516 static int tc_setup_offload_act(struct tc_action *act,
3517 struct flow_action_entry *entry,
3518 u32 *index_inc,
3519 struct netlink_ext_ack *extack)
3520 {
3521 #ifdef CONFIG_NET_CLS_ACT
3522 if (act->ops->offload_act_setup) {
3523 return act->ops->offload_act_setup(act, entry, index_inc, true,
3524 extack);
3525 } else {
3526 NL_SET_ERR_MSG(extack, "Action does not support offload");
3527 return -EOPNOTSUPP;
3528 }
3529 #else
3530 return 0;
3531 #endif
3532 }
3533
3534 int tc_setup_action(struct flow_action *flow_action,
3535 struct tc_action *actions[],
3536 struct netlink_ext_ack *extack)
3537 {
3538 int i, j, k, index, err = 0;
3539 struct tc_action *act;
3540
3541 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3542 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3543 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3544
3545 if (!actions)
3546 return 0;
3547
3548 j = 0;
3549 tcf_act_for_each_action(i, act, actions) {
3550 struct flow_action_entry *entry;
3551
3552 entry = &flow_action->entries[j];
3553 spin_lock_bh(&act->tcfa_lock);
3554 err = tcf_act_get_cookie(entry, act);
3555 if (err)
3556 goto err_out_locked;
3557
3558 index = 0;
3559 err = tc_setup_offload_act(act, entry, &index, extack);
3560 if (err)
3561 goto err_out_locked;
3562
3563 for (k = 0; k < index ; k++) {
3564 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3565 entry[k].hw_index = act->tcfa_index;
3566 }
3567
3568 j += index;
3569
3570 spin_unlock_bh(&act->tcfa_lock);
3571 }
3572
3573 err_out:
3574 if (err)
3575 tc_cleanup_offload_action(flow_action);
3576
3577 return err;
3578 err_out_locked:
3579 spin_unlock_bh(&act->tcfa_lock);
3580 goto err_out;
3581 }
3582
3583 int tc_setup_offload_action(struct flow_action *flow_action,
3584 const struct tcf_exts *exts,
3585 struct netlink_ext_ack *extack)
3586 {
3587 #ifdef CONFIG_NET_CLS_ACT
3588 if (!exts)
3589 return 0;
3590
3591 return tc_setup_action(flow_action, exts->actions, extack);
3592 #else
3593 return 0;
3594 #endif
3595 }
3596 EXPORT_SYMBOL(tc_setup_offload_action);
3597
3598 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3599 {
3600 unsigned int num_acts = 0;
3601 struct tc_action *act;
3602 int i;
3603
3604 tcf_exts_for_each_action(i, act, exts) {
3605 if (is_tcf_pedit(act))
3606 num_acts += tcf_pedit_nkeys(act);
3607 else
3608 num_acts++;
3609 }
3610 return num_acts;
3611 }
3612 EXPORT_SYMBOL(tcf_exts_num_actions);
3613
3614 #ifdef CONFIG_NET_CLS_ACT
3615 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3616 u32 *p_block_index,
3617 struct netlink_ext_ack *extack)
3618 {
3619 *p_block_index = nla_get_u32(block_index_attr);
3620 if (!*p_block_index) {
3621 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3622 return -EINVAL;
3623 }
3624
3625 return 0;
3626 }
3627
3628 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3629 enum flow_block_binder_type binder_type,
3630 struct nlattr *block_index_attr,
3631 struct netlink_ext_ack *extack)
3632 {
3633 u32 block_index;
3634 int err;
3635
3636 if (!block_index_attr)
3637 return 0;
3638
3639 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3640 if (err)
3641 return err;
3642
3643 if (!block_index)
3644 return 0;
3645
3646 qe->info.binder_type = binder_type;
3647 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3648 qe->info.chain_head_change_priv = &qe->filter_chain;
3649 qe->info.block_index = block_index;
3650
3651 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3652 }
3653 EXPORT_SYMBOL(tcf_qevent_init);
3654
3655 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3656 {
3657 if (qe->info.block_index)
3658 tcf_block_put_ext(qe->block, sch, &qe->info);
3659 }
3660 EXPORT_SYMBOL(tcf_qevent_destroy);
3661
3662 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3663 struct netlink_ext_ack *extack)
3664 {
3665 u32 block_index;
3666 int err;
3667
3668 if (!block_index_attr)
3669 return 0;
3670
3671 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3672 if (err)
3673 return err;
3674
3675
3676 if (block_index != qe->info.block_index) {
3677 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3678 return -EINVAL;
3679 }
3680
3681 return 0;
3682 }
3683 EXPORT_SYMBOL(tcf_qevent_validate_change);
3684
3685 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3686 struct sk_buff **to_free, int *ret)
3687 {
3688 struct tcf_result cl_res;
3689 struct tcf_proto *fl;
3690
3691 if (!qe->info.block_index)
3692 return skb;
3693
3694 fl = rcu_dereference_bh(qe->filter_chain);
3695
3696 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3697 case TC_ACT_SHOT:
3698 qdisc_qstats_drop(sch);
3699 __qdisc_drop(skb, to_free);
3700 *ret = __NET_XMIT_BYPASS;
3701 return NULL;
3702 case TC_ACT_STOLEN:
3703 case TC_ACT_QUEUED:
3704 case TC_ACT_TRAP:
3705 __qdisc_drop(skb, to_free);
3706 *ret = __NET_XMIT_STOLEN;
3707 return NULL;
3708 case TC_ACT_REDIRECT:
3709 skb_do_redirect(skb);
3710 *ret = __NET_XMIT_STOLEN;
3711 return NULL;
3712 }
3713
3714 return skb;
3715 }
3716 EXPORT_SYMBOL(tcf_qevent_handle);
3717
3718 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3719 {
3720 if (!qe->info.block_index)
3721 return 0;
3722 return nla_put_u32(skb, attr_name, qe->info.block_index);
3723 }
3724 EXPORT_SYMBOL(tcf_qevent_dump);
3725 #endif
3726
3727 static __net_init int tcf_net_init(struct net *net)
3728 {
3729 struct tcf_net *tn = net_generic(net, tcf_net_id);
3730
3731 spin_lock_init(&tn->idr_lock);
3732 idr_init(&tn->idr);
3733 return 0;
3734 }
3735
3736 static void __net_exit tcf_net_exit(struct net *net)
3737 {
3738 struct tcf_net *tn = net_generic(net, tcf_net_id);
3739
3740 idr_destroy(&tn->idr);
3741 }
3742
3743 static struct pernet_operations tcf_net_ops = {
3744 .init = tcf_net_init,
3745 .exit = tcf_net_exit,
3746 .id = &tcf_net_id,
3747 .size = sizeof(struct tcf_net),
3748 };
3749
3750 static int __init tc_filter_init(void)
3751 {
3752 int err;
3753
3754 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3755 if (!tc_filter_wq)
3756 return -ENOMEM;
3757
3758 err = register_pernet_subsys(&tcf_net_ops);
3759 if (err)
3760 goto err_register_pernet_subsys;
3761
3762 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3763 RTNL_FLAG_DOIT_UNLOCKED);
3764 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3765 RTNL_FLAG_DOIT_UNLOCKED);
3766 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3767 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3768 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3769 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3770 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3771 tc_dump_chain, 0);
3772
3773 return 0;
3774
3775 err_register_pernet_subsys:
3776 destroy_workqueue(tc_filter_wq);
3777 return err;
3778 }
3779
3780 subsys_initcall(tc_filter_init);