0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/init.h>
0010 #include <linux/module.h>
0011 #include <linux/percpu.h>
0012
0013 #include <net/sch_generic.h>
0014 #include <net/pkt_cls.h>
0015
0016 struct cls_mall_head {
0017 struct tcf_exts exts;
0018 struct tcf_result res;
0019 u32 handle;
0020 u32 flags;
0021 unsigned int in_hw_count;
0022 struct tc_matchall_pcnt __percpu *pf;
0023 struct rcu_work rwork;
0024 bool deleting;
0025 };
0026
0027 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
0028 struct tcf_result *res)
0029 {
0030 struct cls_mall_head *head = rcu_dereference_bh(tp->root);
0031
0032 if (unlikely(!head))
0033 return -1;
0034
0035 if (tc_skip_sw(head->flags))
0036 return -1;
0037
0038 *res = head->res;
0039 __this_cpu_inc(head->pf->rhit);
0040 return tcf_exts_exec(skb, &head->exts, res);
0041 }
0042
0043 static int mall_init(struct tcf_proto *tp)
0044 {
0045 return 0;
0046 }
0047
0048 static void __mall_destroy(struct cls_mall_head *head)
0049 {
0050 tcf_exts_destroy(&head->exts);
0051 tcf_exts_put_net(&head->exts);
0052 free_percpu(head->pf);
0053 kfree(head);
0054 }
0055
0056 static void mall_destroy_work(struct work_struct *work)
0057 {
0058 struct cls_mall_head *head = container_of(to_rcu_work(work),
0059 struct cls_mall_head,
0060 rwork);
0061 rtnl_lock();
0062 __mall_destroy(head);
0063 rtnl_unlock();
0064 }
0065
0066 static void mall_destroy_hw_filter(struct tcf_proto *tp,
0067 struct cls_mall_head *head,
0068 unsigned long cookie,
0069 struct netlink_ext_ack *extack)
0070 {
0071 struct tc_cls_matchall_offload cls_mall = {};
0072 struct tcf_block *block = tp->chain->block;
0073
0074 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
0075 cls_mall.command = TC_CLSMATCHALL_DESTROY;
0076 cls_mall.cookie = cookie;
0077
0078 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
0079 &head->flags, &head->in_hw_count, true);
0080 }
0081
0082 static int mall_replace_hw_filter(struct tcf_proto *tp,
0083 struct cls_mall_head *head,
0084 unsigned long cookie,
0085 struct netlink_ext_ack *extack)
0086 {
0087 struct tc_cls_matchall_offload cls_mall = {};
0088 struct tcf_block *block = tp->chain->block;
0089 bool skip_sw = tc_skip_sw(head->flags);
0090 int err;
0091
0092 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
0093 if (!cls_mall.rule)
0094 return -ENOMEM;
0095
0096 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
0097 cls_mall.command = TC_CLSMATCHALL_REPLACE;
0098 cls_mall.cookie = cookie;
0099
0100 err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
0101 cls_mall.common.extack);
0102 if (err) {
0103 kfree(cls_mall.rule);
0104 mall_destroy_hw_filter(tp, head, cookie, NULL);
0105
0106 return skip_sw ? err : 0;
0107 }
0108
0109 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
0110 skip_sw, &head->flags, &head->in_hw_count, true);
0111 tc_cleanup_offload_action(&cls_mall.rule->action);
0112 kfree(cls_mall.rule);
0113
0114 if (err) {
0115 mall_destroy_hw_filter(tp, head, cookie, NULL);
0116 return err;
0117 }
0118
0119 if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
0120 return -EINVAL;
0121
0122 return 0;
0123 }
0124
0125 static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
0126 struct netlink_ext_ack *extack)
0127 {
0128 struct cls_mall_head *head = rtnl_dereference(tp->root);
0129
0130 if (!head)
0131 return;
0132
0133 tcf_unbind_filter(tp, &head->res);
0134
0135 if (!tc_skip_hw(head->flags))
0136 mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
0137
0138 if (tcf_exts_get_net(&head->exts))
0139 tcf_queue_work(&head->rwork, mall_destroy_work);
0140 else
0141 __mall_destroy(head);
0142 }
0143
0144 static void *mall_get(struct tcf_proto *tp, u32 handle)
0145 {
0146 struct cls_mall_head *head = rtnl_dereference(tp->root);
0147
0148 if (head && head->handle == handle)
0149 return head;
0150
0151 return NULL;
0152 }
0153
0154 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
0155 [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
0156 [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
0157 [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
0158 };
0159
0160 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
0161 struct cls_mall_head *head,
0162 unsigned long base, struct nlattr **tb,
0163 struct nlattr *est, u32 flags, u32 fl_flags,
0164 struct netlink_ext_ack *extack)
0165 {
0166 int err;
0167
0168 err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags,
0169 fl_flags, extack);
0170 if (err < 0)
0171 return err;
0172
0173 if (tb[TCA_MATCHALL_CLASSID]) {
0174 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
0175 tcf_bind_filter(tp, &head->res, base);
0176 }
0177 return 0;
0178 }
0179
0180 static int mall_change(struct net *net, struct sk_buff *in_skb,
0181 struct tcf_proto *tp, unsigned long base,
0182 u32 handle, struct nlattr **tca,
0183 void **arg, u32 flags,
0184 struct netlink_ext_ack *extack)
0185 {
0186 struct cls_mall_head *head = rtnl_dereference(tp->root);
0187 struct nlattr *tb[TCA_MATCHALL_MAX + 1];
0188 struct cls_mall_head *new;
0189 u32 userflags = 0;
0190 int err;
0191
0192 if (!tca[TCA_OPTIONS])
0193 return -EINVAL;
0194
0195 if (head)
0196 return -EEXIST;
0197
0198 err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
0199 tca[TCA_OPTIONS], mall_policy, NULL);
0200 if (err < 0)
0201 return err;
0202
0203 if (tb[TCA_MATCHALL_FLAGS]) {
0204 userflags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
0205 if (!tc_flags_valid(userflags))
0206 return -EINVAL;
0207 }
0208
0209 new = kzalloc(sizeof(*new), GFP_KERNEL);
0210 if (!new)
0211 return -ENOBUFS;
0212
0213 err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
0214 if (err)
0215 goto err_exts_init;
0216
0217 if (!handle)
0218 handle = 1;
0219 new->handle = handle;
0220 new->flags = userflags;
0221 new->pf = alloc_percpu(struct tc_matchall_pcnt);
0222 if (!new->pf) {
0223 err = -ENOMEM;
0224 goto err_alloc_percpu;
0225 }
0226
0227 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE],
0228 flags, new->flags, extack);
0229 if (err)
0230 goto err_set_parms;
0231
0232 if (!tc_skip_hw(new->flags)) {
0233 err = mall_replace_hw_filter(tp, new, (unsigned long)new,
0234 extack);
0235 if (err)
0236 goto err_replace_hw_filter;
0237 }
0238
0239 if (!tc_in_hw(new->flags))
0240 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
0241
0242 *arg = head;
0243 rcu_assign_pointer(tp->root, new);
0244 return 0;
0245
0246 err_replace_hw_filter:
0247 err_set_parms:
0248 free_percpu(new->pf);
0249 err_alloc_percpu:
0250 tcf_exts_destroy(&new->exts);
0251 err_exts_init:
0252 kfree(new);
0253 return err;
0254 }
0255
0256 static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
0257 bool rtnl_held, struct netlink_ext_ack *extack)
0258 {
0259 struct cls_mall_head *head = rtnl_dereference(tp->root);
0260
0261 head->deleting = true;
0262 *last = true;
0263 return 0;
0264 }
0265
0266 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
0267 bool rtnl_held)
0268 {
0269 struct cls_mall_head *head = rtnl_dereference(tp->root);
0270
0271 if (arg->count < arg->skip)
0272 goto skip;
0273
0274 if (!head || head->deleting)
0275 return;
0276 if (arg->fn(tp, head, arg) < 0)
0277 arg->stop = 1;
0278 skip:
0279 arg->count++;
0280 }
0281
0282 static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
0283 void *cb_priv, struct netlink_ext_ack *extack)
0284 {
0285 struct cls_mall_head *head = rtnl_dereference(tp->root);
0286 struct tc_cls_matchall_offload cls_mall = {};
0287 struct tcf_block *block = tp->chain->block;
0288 int err;
0289
0290 if (tc_skip_hw(head->flags))
0291 return 0;
0292
0293 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
0294 if (!cls_mall.rule)
0295 return -ENOMEM;
0296
0297 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
0298 cls_mall.command = add ?
0299 TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
0300 cls_mall.cookie = (unsigned long)head;
0301
0302 err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
0303 cls_mall.common.extack);
0304 if (err) {
0305 kfree(cls_mall.rule);
0306
0307 return add && tc_skip_sw(head->flags) ? err : 0;
0308 }
0309
0310 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
0311 &cls_mall, cb_priv, &head->flags,
0312 &head->in_hw_count);
0313 tc_cleanup_offload_action(&cls_mall.rule->action);
0314 kfree(cls_mall.rule);
0315
0316 if (err)
0317 return err;
0318
0319 return 0;
0320 }
0321
0322 static void mall_stats_hw_filter(struct tcf_proto *tp,
0323 struct cls_mall_head *head,
0324 unsigned long cookie)
0325 {
0326 struct tc_cls_matchall_offload cls_mall = {};
0327 struct tcf_block *block = tp->chain->block;
0328
0329 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
0330 cls_mall.command = TC_CLSMATCHALL_STATS;
0331 cls_mall.cookie = cookie;
0332
0333 tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
0334
0335 tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes,
0336 cls_mall.stats.pkts, cls_mall.stats.drops,
0337 cls_mall.stats.lastused,
0338 cls_mall.stats.used_hw_stats,
0339 cls_mall.stats.used_hw_stats_valid);
0340 }
0341
0342 static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
0343 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
0344 {
0345 struct tc_matchall_pcnt gpf = {};
0346 struct cls_mall_head *head = fh;
0347 struct nlattr *nest;
0348 int cpu;
0349
0350 if (!head)
0351 return skb->len;
0352
0353 if (!tc_skip_hw(head->flags))
0354 mall_stats_hw_filter(tp, head, (unsigned long)head);
0355
0356 t->tcm_handle = head->handle;
0357
0358 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
0359 if (!nest)
0360 goto nla_put_failure;
0361
0362 if (head->res.classid &&
0363 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
0364 goto nla_put_failure;
0365
0366 if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
0367 goto nla_put_failure;
0368
0369 for_each_possible_cpu(cpu) {
0370 struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
0371
0372 gpf.rhit += pf->rhit;
0373 }
0374
0375 if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
0376 sizeof(struct tc_matchall_pcnt),
0377 &gpf, TCA_MATCHALL_PAD))
0378 goto nla_put_failure;
0379
0380 if (tcf_exts_dump(skb, &head->exts))
0381 goto nla_put_failure;
0382
0383 nla_nest_end(skb, nest);
0384
0385 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
0386 goto nla_put_failure;
0387
0388 return skb->len;
0389
0390 nla_put_failure:
0391 nla_nest_cancel(skb, nest);
0392 return -1;
0393 }
0394
0395 static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
0396 unsigned long base)
0397 {
0398 struct cls_mall_head *head = fh;
0399
0400 if (head && head->res.classid == classid) {
0401 if (cl)
0402 __tcf_bind_filter(q, &head->res, base);
0403 else
0404 __tcf_unbind_filter(q, &head->res);
0405 }
0406 }
0407
0408 static struct tcf_proto_ops cls_mall_ops __read_mostly = {
0409 .kind = "matchall",
0410 .classify = mall_classify,
0411 .init = mall_init,
0412 .destroy = mall_destroy,
0413 .get = mall_get,
0414 .change = mall_change,
0415 .delete = mall_delete,
0416 .walk = mall_walk,
0417 .reoffload = mall_reoffload,
0418 .dump = mall_dump,
0419 .bind_class = mall_bind_class,
0420 .owner = THIS_MODULE,
0421 };
0422
0423 static int __init cls_mall_init(void)
0424 {
0425 return register_tcf_proto_ops(&cls_mall_ops);
0426 }
0427
0428 static void __exit cls_mall_exit(void)
0429 {
0430 unregister_tcf_proto_ops(&cls_mall_ops);
0431 }
0432
0433 module_init(cls_mall_init);
0434 module_exit(cls_mall_exit);
0435
0436 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
0437 MODULE_DESCRIPTION("Match-all classifier");
0438 MODULE_LICENSE("GPL v2");