0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/module.h>
0027 #include <linux/slab.h>
0028 #include <linux/types.h>
0029 #include <linux/kernel.h>
0030 #include <linux/string.h>
0031 #include <linux/errno.h>
0032 #include <linux/percpu.h>
0033 #include <linux/rtnetlink.h>
0034 #include <linux/skbuff.h>
0035 #include <linux/bitmap.h>
0036 #include <linux/netdevice.h>
0037 #include <linux/hash.h>
0038 #include <net/netlink.h>
0039 #include <net/act_api.h>
0040 #include <net/pkt_cls.h>
0041 #include <linux/idr.h>
0042
0043 struct tc_u_knode {
0044 struct tc_u_knode __rcu *next;
0045 u32 handle;
0046 struct tc_u_hnode __rcu *ht_up;
0047 struct tcf_exts exts;
0048 int ifindex;
0049 u8 fshift;
0050 struct tcf_result res;
0051 struct tc_u_hnode __rcu *ht_down;
0052 #ifdef CONFIG_CLS_U32_PERF
0053 struct tc_u32_pcnt __percpu *pf;
0054 #endif
0055 u32 flags;
0056 unsigned int in_hw_count;
0057 #ifdef CONFIG_CLS_U32_MARK
0058 u32 val;
0059 u32 mask;
0060 u32 __percpu *pcpu_success;
0061 #endif
0062 struct rcu_work rwork;
0063
0064
0065
0066 struct tc_u32_sel sel;
0067 };
0068
0069 struct tc_u_hnode {
0070 struct tc_u_hnode __rcu *next;
0071 u32 handle;
0072 u32 prio;
0073 int refcnt;
0074 unsigned int divisor;
0075 struct idr handle_idr;
0076 bool is_root;
0077 struct rcu_head rcu;
0078 u32 flags;
0079
0080
0081
0082 struct tc_u_knode __rcu *ht[];
0083 };
0084
0085 struct tc_u_common {
0086 struct tc_u_hnode __rcu *hlist;
0087 void *ptr;
0088 int refcnt;
0089 struct idr handle_idr;
0090 struct hlist_node hnode;
0091 long knodes;
0092 };
0093
0094 static inline unsigned int u32_hash_fold(__be32 key,
0095 const struct tc_u32_sel *sel,
0096 u8 fshift)
0097 {
0098 unsigned int h = ntohl(key & sel->hmask) >> fshift;
0099
0100 return h;
0101 }
0102
0103 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
0104 struct tcf_result *res)
0105 {
0106 struct {
0107 struct tc_u_knode *knode;
0108 unsigned int off;
0109 } stack[TC_U32_MAXDEPTH];
0110
0111 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
0112 unsigned int off = skb_network_offset(skb);
0113 struct tc_u_knode *n;
0114 int sdepth = 0;
0115 int off2 = 0;
0116 int sel = 0;
0117 #ifdef CONFIG_CLS_U32_PERF
0118 int j;
0119 #endif
0120 int i, r;
0121
0122 next_ht:
0123 n = rcu_dereference_bh(ht->ht[sel]);
0124
0125 next_knode:
0126 if (n) {
0127 struct tc_u32_key *key = n->sel.keys;
0128
0129 #ifdef CONFIG_CLS_U32_PERF
0130 __this_cpu_inc(n->pf->rcnt);
0131 j = 0;
0132 #endif
0133
0134 if (tc_skip_sw(n->flags)) {
0135 n = rcu_dereference_bh(n->next);
0136 goto next_knode;
0137 }
0138
0139 #ifdef CONFIG_CLS_U32_MARK
0140 if ((skb->mark & n->mask) != n->val) {
0141 n = rcu_dereference_bh(n->next);
0142 goto next_knode;
0143 } else {
0144 __this_cpu_inc(*n->pcpu_success);
0145 }
0146 #endif
0147
0148 for (i = n->sel.nkeys; i > 0; i--, key++) {
0149 int toff = off + key->off + (off2 & key->offmask);
0150 __be32 *data, hdata;
0151
0152 if (skb_headroom(skb) + toff > INT_MAX)
0153 goto out;
0154
0155 data = skb_header_pointer(skb, toff, 4, &hdata);
0156 if (!data)
0157 goto out;
0158 if ((*data ^ key->val) & key->mask) {
0159 n = rcu_dereference_bh(n->next);
0160 goto next_knode;
0161 }
0162 #ifdef CONFIG_CLS_U32_PERF
0163 __this_cpu_inc(n->pf->kcnts[j]);
0164 j++;
0165 #endif
0166 }
0167
0168 ht = rcu_dereference_bh(n->ht_down);
0169 if (!ht) {
0170 check_terminal:
0171 if (n->sel.flags & TC_U32_TERMINAL) {
0172
0173 *res = n->res;
0174 if (!tcf_match_indev(skb, n->ifindex)) {
0175 n = rcu_dereference_bh(n->next);
0176 goto next_knode;
0177 }
0178 #ifdef CONFIG_CLS_U32_PERF
0179 __this_cpu_inc(n->pf->rhit);
0180 #endif
0181 r = tcf_exts_exec(skb, &n->exts, res);
0182 if (r < 0) {
0183 n = rcu_dereference_bh(n->next);
0184 goto next_knode;
0185 }
0186
0187 return r;
0188 }
0189 n = rcu_dereference_bh(n->next);
0190 goto next_knode;
0191 }
0192
0193
0194 if (sdepth >= TC_U32_MAXDEPTH)
0195 goto deadloop;
0196 stack[sdepth].knode = n;
0197 stack[sdepth].off = off;
0198 sdepth++;
0199
0200 ht = rcu_dereference_bh(n->ht_down);
0201 sel = 0;
0202 if (ht->divisor) {
0203 __be32 *data, hdata;
0204
0205 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
0206 &hdata);
0207 if (!data)
0208 goto out;
0209 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
0210 n->fshift);
0211 }
0212 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
0213 goto next_ht;
0214
0215 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
0216 off2 = n->sel.off + 3;
0217 if (n->sel.flags & TC_U32_VAROFFSET) {
0218 __be16 *data, hdata;
0219
0220 data = skb_header_pointer(skb,
0221 off + n->sel.offoff,
0222 2, &hdata);
0223 if (!data)
0224 goto out;
0225 off2 += ntohs(n->sel.offmask & *data) >>
0226 n->sel.offshift;
0227 }
0228 off2 &= ~3;
0229 }
0230 if (n->sel.flags & TC_U32_EAT) {
0231 off += off2;
0232 off2 = 0;
0233 }
0234
0235 if (off < skb->len)
0236 goto next_ht;
0237 }
0238
0239
0240 if (sdepth--) {
0241 n = stack[sdepth].knode;
0242 ht = rcu_dereference_bh(n->ht_up);
0243 off = stack[sdepth].off;
0244 goto check_terminal;
0245 }
0246 out:
0247 return -1;
0248
0249 deadloop:
0250 net_warn_ratelimited("cls_u32: dead loop\n");
0251 return -1;
0252 }
0253
0254 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
0255 {
0256 struct tc_u_hnode *ht;
0257
0258 for (ht = rtnl_dereference(tp_c->hlist);
0259 ht;
0260 ht = rtnl_dereference(ht->next))
0261 if (ht->handle == handle)
0262 break;
0263
0264 return ht;
0265 }
0266
0267 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
0268 {
0269 unsigned int sel;
0270 struct tc_u_knode *n = NULL;
0271
0272 sel = TC_U32_HASH(handle);
0273 if (sel > ht->divisor)
0274 goto out;
0275
0276 for (n = rtnl_dereference(ht->ht[sel]);
0277 n;
0278 n = rtnl_dereference(n->next))
0279 if (n->handle == handle)
0280 break;
0281 out:
0282 return n;
0283 }
0284
0285
0286 static void *u32_get(struct tcf_proto *tp, u32 handle)
0287 {
0288 struct tc_u_hnode *ht;
0289 struct tc_u_common *tp_c = tp->data;
0290
0291 if (TC_U32_HTID(handle) == TC_U32_ROOT)
0292 ht = rtnl_dereference(tp->root);
0293 else
0294 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
0295
0296 if (!ht)
0297 return NULL;
0298
0299 if (TC_U32_KEY(handle) == 0)
0300 return ht;
0301
0302 return u32_lookup_key(ht, handle);
0303 }
0304
0305
0306 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
0307 {
0308 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
0309 if (id < 0)
0310 return 0;
0311 return (id | 0x800U) << 20;
0312 }
0313
0314 static struct hlist_head *tc_u_common_hash;
0315
0316 #define U32_HASH_SHIFT 10
0317 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
0318
0319 static void *tc_u_common_ptr(const struct tcf_proto *tp)
0320 {
0321 struct tcf_block *block = tp->chain->block;
0322
0323
0324
0325
0326
0327
0328
0329 if (tcf_block_shared(block))
0330 return block;
0331 else
0332 return block->q;
0333 }
0334
0335 static struct hlist_head *tc_u_hash(void *key)
0336 {
0337 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
0338 }
0339
0340 static struct tc_u_common *tc_u_common_find(void *key)
0341 {
0342 struct tc_u_common *tc;
0343 hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
0344 if (tc->ptr == key)
0345 return tc;
0346 }
0347 return NULL;
0348 }
0349
0350 static int u32_init(struct tcf_proto *tp)
0351 {
0352 struct tc_u_hnode *root_ht;
0353 void *key = tc_u_common_ptr(tp);
0354 struct tc_u_common *tp_c = tc_u_common_find(key);
0355
0356 root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
0357 if (root_ht == NULL)
0358 return -ENOBUFS;
0359
0360 root_ht->refcnt++;
0361 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
0362 root_ht->prio = tp->prio;
0363 root_ht->is_root = true;
0364 idr_init(&root_ht->handle_idr);
0365
0366 if (tp_c == NULL) {
0367 tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
0368 if (tp_c == NULL) {
0369 kfree(root_ht);
0370 return -ENOBUFS;
0371 }
0372 tp_c->ptr = key;
0373 INIT_HLIST_NODE(&tp_c->hnode);
0374 idr_init(&tp_c->handle_idr);
0375
0376 hlist_add_head(&tp_c->hnode, tc_u_hash(key));
0377 }
0378
0379 tp_c->refcnt++;
0380 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
0381 rcu_assign_pointer(tp_c->hlist, root_ht);
0382
0383 root_ht->refcnt++;
0384 rcu_assign_pointer(tp->root, root_ht);
0385 tp->data = tp_c;
0386 return 0;
0387 }
0388
0389 static void __u32_destroy_key(struct tc_u_knode *n)
0390 {
0391 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
0392
0393 tcf_exts_destroy(&n->exts);
0394 if (ht && --ht->refcnt == 0)
0395 kfree(ht);
0396 kfree(n);
0397 }
0398
0399 static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
0400 {
0401 tcf_exts_put_net(&n->exts);
0402 #ifdef CONFIG_CLS_U32_PERF
0403 if (free_pf)
0404 free_percpu(n->pf);
0405 #endif
0406 #ifdef CONFIG_CLS_U32_MARK
0407 if (free_pf)
0408 free_percpu(n->pcpu_success);
0409 #endif
0410 __u32_destroy_key(n);
0411 }
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421 static void u32_delete_key_work(struct work_struct *work)
0422 {
0423 struct tc_u_knode *key = container_of(to_rcu_work(work),
0424 struct tc_u_knode,
0425 rwork);
0426 rtnl_lock();
0427 u32_destroy_key(key, false);
0428 rtnl_unlock();
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438 static void u32_delete_key_freepf_work(struct work_struct *work)
0439 {
0440 struct tc_u_knode *key = container_of(to_rcu_work(work),
0441 struct tc_u_knode,
0442 rwork);
0443 rtnl_lock();
0444 u32_destroy_key(key, true);
0445 rtnl_unlock();
0446 }
0447
0448 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
0449 {
0450 struct tc_u_common *tp_c = tp->data;
0451 struct tc_u_knode __rcu **kp;
0452 struct tc_u_knode *pkp;
0453 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
0454
0455 if (ht) {
0456 kp = &ht->ht[TC_U32_HASH(key->handle)];
0457 for (pkp = rtnl_dereference(*kp); pkp;
0458 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
0459 if (pkp == key) {
0460 RCU_INIT_POINTER(*kp, key->next);
0461 tp_c->knodes--;
0462
0463 tcf_unbind_filter(tp, &key->res);
0464 idr_remove(&ht->handle_idr, key->handle);
0465 tcf_exts_get_net(&key->exts);
0466 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
0467 return 0;
0468 }
0469 }
0470 }
0471 WARN_ON(1);
0472 return 0;
0473 }
0474
0475 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
0476 struct netlink_ext_ack *extack)
0477 {
0478 struct tcf_block *block = tp->chain->block;
0479 struct tc_cls_u32_offload cls_u32 = {};
0480
0481 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
0482 cls_u32.command = TC_CLSU32_DELETE_HNODE;
0483 cls_u32.hnode.divisor = h->divisor;
0484 cls_u32.hnode.handle = h->handle;
0485 cls_u32.hnode.prio = h->prio;
0486
0487 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
0488 }
0489
0490 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
0491 u32 flags, struct netlink_ext_ack *extack)
0492 {
0493 struct tcf_block *block = tp->chain->block;
0494 struct tc_cls_u32_offload cls_u32 = {};
0495 bool skip_sw = tc_skip_sw(flags);
0496 bool offloaded = false;
0497 int err;
0498
0499 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
0500 cls_u32.command = TC_CLSU32_NEW_HNODE;
0501 cls_u32.hnode.divisor = h->divisor;
0502 cls_u32.hnode.handle = h->handle;
0503 cls_u32.hnode.prio = h->prio;
0504
0505 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
0506 if (err < 0) {
0507 u32_clear_hw_hnode(tp, h, NULL);
0508 return err;
0509 } else if (err > 0) {
0510 offloaded = true;
0511 }
0512
0513 if (skip_sw && !offloaded)
0514 return -EINVAL;
0515
0516 return 0;
0517 }
0518
0519 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
0520 struct netlink_ext_ack *extack)
0521 {
0522 struct tcf_block *block = tp->chain->block;
0523 struct tc_cls_u32_offload cls_u32 = {};
0524
0525 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
0526 cls_u32.command = TC_CLSU32_DELETE_KNODE;
0527 cls_u32.knode.handle = n->handle;
0528
0529 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
0530 &n->flags, &n->in_hw_count, true);
0531 }
0532
0533 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
0534 u32 flags, struct netlink_ext_ack *extack)
0535 {
0536 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
0537 struct tcf_block *block = tp->chain->block;
0538 struct tc_cls_u32_offload cls_u32 = {};
0539 bool skip_sw = tc_skip_sw(flags);
0540 int err;
0541
0542 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
0543 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
0544 cls_u32.knode.handle = n->handle;
0545 cls_u32.knode.fshift = n->fshift;
0546 #ifdef CONFIG_CLS_U32_MARK
0547 cls_u32.knode.val = n->val;
0548 cls_u32.knode.mask = n->mask;
0549 #else
0550 cls_u32.knode.val = 0;
0551 cls_u32.knode.mask = 0;
0552 #endif
0553 cls_u32.knode.sel = &n->sel;
0554 cls_u32.knode.res = &n->res;
0555 cls_u32.knode.exts = &n->exts;
0556 if (n->ht_down)
0557 cls_u32.knode.link_handle = ht->handle;
0558
0559 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
0560 &n->flags, &n->in_hw_count, true);
0561 if (err) {
0562 u32_remove_hw_knode(tp, n, NULL);
0563 return err;
0564 }
0565
0566 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
0567 return -EINVAL;
0568
0569 return 0;
0570 }
0571
0572 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
0573 struct netlink_ext_ack *extack)
0574 {
0575 struct tc_u_common *tp_c = tp->data;
0576 struct tc_u_knode *n;
0577 unsigned int h;
0578
0579 for (h = 0; h <= ht->divisor; h++) {
0580 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
0581 RCU_INIT_POINTER(ht->ht[h],
0582 rtnl_dereference(n->next));
0583 tp_c->knodes--;
0584 tcf_unbind_filter(tp, &n->res);
0585 u32_remove_hw_knode(tp, n, extack);
0586 idr_remove(&ht->handle_idr, n->handle);
0587 if (tcf_exts_get_net(&n->exts))
0588 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
0589 else
0590 u32_destroy_key(n, true);
0591 }
0592 }
0593 }
0594
0595 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
0596 struct netlink_ext_ack *extack)
0597 {
0598 struct tc_u_common *tp_c = tp->data;
0599 struct tc_u_hnode __rcu **hn;
0600 struct tc_u_hnode *phn;
0601
0602 WARN_ON(--ht->refcnt);
0603
0604 u32_clear_hnode(tp, ht, extack);
0605
0606 hn = &tp_c->hlist;
0607 for (phn = rtnl_dereference(*hn);
0608 phn;
0609 hn = &phn->next, phn = rtnl_dereference(*hn)) {
0610 if (phn == ht) {
0611 u32_clear_hw_hnode(tp, ht, extack);
0612 idr_destroy(&ht->handle_idr);
0613 idr_remove(&tp_c->handle_idr, ht->handle);
0614 RCU_INIT_POINTER(*hn, ht->next);
0615 kfree_rcu(ht, rcu);
0616 return 0;
0617 }
0618 }
0619
0620 return -ENOENT;
0621 }
0622
0623 static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
0624 struct netlink_ext_ack *extack)
0625 {
0626 struct tc_u_common *tp_c = tp->data;
0627 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
0628
0629 WARN_ON(root_ht == NULL);
0630
0631 if (root_ht && --root_ht->refcnt == 1)
0632 u32_destroy_hnode(tp, root_ht, extack);
0633
0634 if (--tp_c->refcnt == 0) {
0635 struct tc_u_hnode *ht;
0636
0637 hlist_del(&tp_c->hnode);
0638
0639 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
0640 u32_clear_hnode(tp, ht, extack);
0641 RCU_INIT_POINTER(tp_c->hlist, ht->next);
0642
0643
0644
0645
0646 if (--ht->refcnt == 0)
0647 kfree_rcu(ht, rcu);
0648 }
0649
0650 idr_destroy(&tp_c->handle_idr);
0651 kfree(tp_c);
0652 }
0653
0654 tp->data = NULL;
0655 }
0656
0657 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
0658 bool rtnl_held, struct netlink_ext_ack *extack)
0659 {
0660 struct tc_u_hnode *ht = arg;
0661 struct tc_u_common *tp_c = tp->data;
0662 int ret = 0;
0663
0664 if (TC_U32_KEY(ht->handle)) {
0665 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
0666 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
0667 goto out;
0668 }
0669
0670 if (ht->is_root) {
0671 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
0672 return -EINVAL;
0673 }
0674
0675 if (ht->refcnt == 1) {
0676 u32_destroy_hnode(tp, ht, extack);
0677 } else {
0678 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
0679 return -EBUSY;
0680 }
0681
0682 out:
0683 *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
0684 return ret;
0685 }
0686
0687 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
0688 {
0689 u32 index = htid | 0x800;
0690 u32 max = htid | 0xFFF;
0691
0692 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
0693 index = htid + 1;
0694 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
0695 GFP_KERNEL))
0696 index = max;
0697 }
0698
0699 return index;
0700 }
0701
0702 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
0703 [TCA_U32_CLASSID] = { .type = NLA_U32 },
0704 [TCA_U32_HASH] = { .type = NLA_U32 },
0705 [TCA_U32_LINK] = { .type = NLA_U32 },
0706 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
0707 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
0708 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
0709 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
0710 [TCA_U32_FLAGS] = { .type = NLA_U32 },
0711 };
0712
0713 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
0714 unsigned long base,
0715 struct tc_u_knode *n, struct nlattr **tb,
0716 struct nlattr *est, u32 flags, u32 fl_flags,
0717 struct netlink_ext_ack *extack)
0718 {
0719 int err;
0720
0721 err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
0722 fl_flags, extack);
0723 if (err < 0)
0724 return err;
0725
0726 if (tb[TCA_U32_LINK]) {
0727 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
0728 struct tc_u_hnode *ht_down = NULL, *ht_old;
0729
0730 if (TC_U32_KEY(handle)) {
0731 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
0732 return -EINVAL;
0733 }
0734
0735 if (handle) {
0736 ht_down = u32_lookup_ht(tp->data, handle);
0737
0738 if (!ht_down) {
0739 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
0740 return -EINVAL;
0741 }
0742 if (ht_down->is_root) {
0743 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
0744 return -EINVAL;
0745 }
0746 ht_down->refcnt++;
0747 }
0748
0749 ht_old = rtnl_dereference(n->ht_down);
0750 rcu_assign_pointer(n->ht_down, ht_down);
0751
0752 if (ht_old)
0753 ht_old->refcnt--;
0754 }
0755 if (tb[TCA_U32_CLASSID]) {
0756 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
0757 tcf_bind_filter(tp, &n->res, base);
0758 }
0759
0760 if (tb[TCA_U32_INDEV]) {
0761 int ret;
0762 ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
0763 if (ret < 0)
0764 return -EINVAL;
0765 n->ifindex = ret;
0766 }
0767 return 0;
0768 }
0769
0770 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
0771 struct tc_u_knode *n)
0772 {
0773 struct tc_u_knode __rcu **ins;
0774 struct tc_u_knode *pins;
0775 struct tc_u_hnode *ht;
0776
0777 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
0778 ht = rtnl_dereference(tp->root);
0779 else
0780 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
0781
0782 ins = &ht->ht[TC_U32_HASH(n->handle)];
0783
0784
0785
0786
0787 for (pins = rtnl_dereference(*ins); ;
0788 ins = &pins->next, pins = rtnl_dereference(*ins))
0789 if (pins->handle == n->handle)
0790 break;
0791
0792 idr_replace(&ht->handle_idr, n, n->handle);
0793 RCU_INIT_POINTER(n->next, pins->next);
0794 rcu_assign_pointer(*ins, n);
0795 }
0796
0797 static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
0798 struct tc_u_knode *n)
0799 {
0800 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
0801 struct tc_u32_sel *s = &n->sel;
0802 struct tc_u_knode *new;
0803
0804 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
0805 if (!new)
0806 return NULL;
0807
0808 RCU_INIT_POINTER(new->next, n->next);
0809 new->handle = n->handle;
0810 RCU_INIT_POINTER(new->ht_up, n->ht_up);
0811
0812 new->ifindex = n->ifindex;
0813 new->fshift = n->fshift;
0814 new->res = n->res;
0815 new->flags = n->flags;
0816 RCU_INIT_POINTER(new->ht_down, ht);
0817
0818 #ifdef CONFIG_CLS_U32_PERF
0819
0820
0821
0822
0823 new->pf = n->pf;
0824 #endif
0825
0826 #ifdef CONFIG_CLS_U32_MARK
0827 new->val = n->val;
0828 new->mask = n->mask;
0829
0830 new->pcpu_success = n->pcpu_success;
0831 #endif
0832 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
0833
0834 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
0835 kfree(new);
0836 return NULL;
0837 }
0838
0839
0840 if (ht)
0841 ht->refcnt++;
0842
0843 return new;
0844 }
0845
0846 static int u32_change(struct net *net, struct sk_buff *in_skb,
0847 struct tcf_proto *tp, unsigned long base, u32 handle,
0848 struct nlattr **tca, void **arg, u32 flags,
0849 struct netlink_ext_ack *extack)
0850 {
0851 struct tc_u_common *tp_c = tp->data;
0852 struct tc_u_hnode *ht;
0853 struct tc_u_knode *n;
0854 struct tc_u32_sel *s;
0855 struct nlattr *opt = tca[TCA_OPTIONS];
0856 struct nlattr *tb[TCA_U32_MAX + 1];
0857 u32 htid, userflags = 0;
0858 size_t sel_size;
0859 int err;
0860
0861 if (!opt) {
0862 if (handle) {
0863 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
0864 return -EINVAL;
0865 } else {
0866 return 0;
0867 }
0868 }
0869
0870 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
0871 extack);
0872 if (err < 0)
0873 return err;
0874
0875 if (tb[TCA_U32_FLAGS]) {
0876 userflags = nla_get_u32(tb[TCA_U32_FLAGS]);
0877 if (!tc_flags_valid(userflags)) {
0878 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
0879 return -EINVAL;
0880 }
0881 }
0882
0883 n = *arg;
0884 if (n) {
0885 struct tc_u_knode *new;
0886
0887 if (TC_U32_KEY(n->handle) == 0) {
0888 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
0889 return -EINVAL;
0890 }
0891
0892 if ((n->flags ^ userflags) &
0893 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
0894 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
0895 return -EINVAL;
0896 }
0897
0898 new = u32_init_knode(net, tp, n);
0899 if (!new)
0900 return -ENOMEM;
0901
0902 err = u32_set_parms(net, tp, base, new, tb,
0903 tca[TCA_RATE], flags, new->flags,
0904 extack);
0905
0906 if (err) {
0907 __u32_destroy_key(new);
0908 return err;
0909 }
0910
0911 err = u32_replace_hw_knode(tp, new, flags, extack);
0912 if (err) {
0913 __u32_destroy_key(new);
0914 return err;
0915 }
0916
0917 if (!tc_in_hw(new->flags))
0918 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
0919
0920 u32_replace_knode(tp, tp_c, new);
0921 tcf_unbind_filter(tp, &n->res);
0922 tcf_exts_get_net(&n->exts);
0923 tcf_queue_work(&n->rwork, u32_delete_key_work);
0924 return 0;
0925 }
0926
0927 if (tb[TCA_U32_DIVISOR]) {
0928 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
0929
0930 if (!is_power_of_2(divisor)) {
0931 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
0932 return -EINVAL;
0933 }
0934 if (divisor-- > 0x100) {
0935 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
0936 return -EINVAL;
0937 }
0938 if (TC_U32_KEY(handle)) {
0939 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
0940 return -EINVAL;
0941 }
0942 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
0943 if (ht == NULL)
0944 return -ENOBUFS;
0945 if (handle == 0) {
0946 handle = gen_new_htid(tp->data, ht);
0947 if (handle == 0) {
0948 kfree(ht);
0949 return -ENOMEM;
0950 }
0951 } else {
0952 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
0953 handle, GFP_KERNEL);
0954 if (err) {
0955 kfree(ht);
0956 return err;
0957 }
0958 }
0959 ht->refcnt = 1;
0960 ht->divisor = divisor;
0961 ht->handle = handle;
0962 ht->prio = tp->prio;
0963 idr_init(&ht->handle_idr);
0964 ht->flags = userflags;
0965
0966 err = u32_replace_hw_hnode(tp, ht, userflags, extack);
0967 if (err) {
0968 idr_remove(&tp_c->handle_idr, handle);
0969 kfree(ht);
0970 return err;
0971 }
0972
0973 RCU_INIT_POINTER(ht->next, tp_c->hlist);
0974 rcu_assign_pointer(tp_c->hlist, ht);
0975 *arg = ht;
0976
0977 return 0;
0978 }
0979
0980 if (tb[TCA_U32_HASH]) {
0981 htid = nla_get_u32(tb[TCA_U32_HASH]);
0982 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
0983 ht = rtnl_dereference(tp->root);
0984 htid = ht->handle;
0985 } else {
0986 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
0987 if (!ht) {
0988 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
0989 return -EINVAL;
0990 }
0991 }
0992 } else {
0993 ht = rtnl_dereference(tp->root);
0994 htid = ht->handle;
0995 }
0996
0997 if (ht->divisor < TC_U32_HASH(htid)) {
0998 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
0999 return -EINVAL;
1000 }
1001
1002 if (handle) {
1003 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1004 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1005 return -EINVAL;
1006 }
1007 handle = htid | TC_U32_NODE(handle);
1008 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1009 GFP_KERNEL);
1010 if (err)
1011 return err;
1012 } else
1013 handle = gen_new_kid(ht, htid);
1014
1015 if (tb[TCA_U32_SEL] == NULL) {
1016 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1017 err = -EINVAL;
1018 goto erridr;
1019 }
1020
1021 s = nla_data(tb[TCA_U32_SEL]);
1022 sel_size = struct_size(s, keys, s->nkeys);
1023 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1024 err = -EINVAL;
1025 goto erridr;
1026 }
1027
1028 n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
1029 if (n == NULL) {
1030 err = -ENOBUFS;
1031 goto erridr;
1032 }
1033
1034 #ifdef CONFIG_CLS_U32_PERF
1035 n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
1036 __alignof__(struct tc_u32_pcnt));
1037 if (!n->pf) {
1038 err = -ENOBUFS;
1039 goto errfree;
1040 }
1041 #endif
1042
1043 memcpy(&n->sel, s, sel_size);
1044 RCU_INIT_POINTER(n->ht_up, ht);
1045 n->handle = handle;
1046 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1047 n->flags = userflags;
1048
1049 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1050 if (err < 0)
1051 goto errout;
1052
1053 #ifdef CONFIG_CLS_U32_MARK
1054 n->pcpu_success = alloc_percpu(u32);
1055 if (!n->pcpu_success) {
1056 err = -ENOMEM;
1057 goto errout;
1058 }
1059
1060 if (tb[TCA_U32_MARK]) {
1061 struct tc_u32_mark *mark;
1062
1063 mark = nla_data(tb[TCA_U32_MARK]);
1064 n->val = mark->val;
1065 n->mask = mark->mask;
1066 }
1067 #endif
1068
1069 err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE],
1070 flags, n->flags, extack);
1071 if (err == 0) {
1072 struct tc_u_knode __rcu **ins;
1073 struct tc_u_knode *pins;
1074
1075 err = u32_replace_hw_knode(tp, n, flags, extack);
1076 if (err)
1077 goto errhw;
1078
1079 if (!tc_in_hw(n->flags))
1080 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1081
1082 ins = &ht->ht[TC_U32_HASH(handle)];
1083 for (pins = rtnl_dereference(*ins); pins;
1084 ins = &pins->next, pins = rtnl_dereference(*ins))
1085 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1086 break;
1087
1088 RCU_INIT_POINTER(n->next, pins);
1089 rcu_assign_pointer(*ins, n);
1090 tp_c->knodes++;
1091 *arg = n;
1092 return 0;
1093 }
1094
1095 errhw:
1096 #ifdef CONFIG_CLS_U32_MARK
1097 free_percpu(n->pcpu_success);
1098 #endif
1099
1100 errout:
1101 tcf_exts_destroy(&n->exts);
1102 #ifdef CONFIG_CLS_U32_PERF
1103 errfree:
1104 free_percpu(n->pf);
1105 #endif
1106 kfree(n);
1107 erridr:
1108 idr_remove(&ht->handle_idr, handle);
1109 return err;
1110 }
1111
1112 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1113 bool rtnl_held)
1114 {
1115 struct tc_u_common *tp_c = tp->data;
1116 struct tc_u_hnode *ht;
1117 struct tc_u_knode *n;
1118 unsigned int h;
1119
1120 if (arg->stop)
1121 return;
1122
1123 for (ht = rtnl_dereference(tp_c->hlist);
1124 ht;
1125 ht = rtnl_dereference(ht->next)) {
1126 if (ht->prio != tp->prio)
1127 continue;
1128 if (arg->count >= arg->skip) {
1129 if (arg->fn(tp, ht, arg) < 0) {
1130 arg->stop = 1;
1131 return;
1132 }
1133 }
1134 arg->count++;
1135 for (h = 0; h <= ht->divisor; h++) {
1136 for (n = rtnl_dereference(ht->ht[h]);
1137 n;
1138 n = rtnl_dereference(n->next)) {
1139 if (arg->count < arg->skip) {
1140 arg->count++;
1141 continue;
1142 }
1143 if (arg->fn(tp, n, arg) < 0) {
1144 arg->stop = 1;
1145 return;
1146 }
1147 arg->count++;
1148 }
1149 }
1150 }
1151 }
1152
1153 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1154 bool add, flow_setup_cb_t *cb, void *cb_priv,
1155 struct netlink_ext_ack *extack)
1156 {
1157 struct tc_cls_u32_offload cls_u32 = {};
1158 int err;
1159
1160 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1161 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1162 cls_u32.hnode.divisor = ht->divisor;
1163 cls_u32.hnode.handle = ht->handle;
1164 cls_u32.hnode.prio = ht->prio;
1165
1166 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1167 if (err && add && tc_skip_sw(ht->flags))
1168 return err;
1169
1170 return 0;
1171 }
1172
1173 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1174 bool add, flow_setup_cb_t *cb, void *cb_priv,
1175 struct netlink_ext_ack *extack)
1176 {
1177 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1178 struct tcf_block *block = tp->chain->block;
1179 struct tc_cls_u32_offload cls_u32 = {};
1180
1181 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1182 cls_u32.command = add ?
1183 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1184 cls_u32.knode.handle = n->handle;
1185
1186 if (add) {
1187 cls_u32.knode.fshift = n->fshift;
1188 #ifdef CONFIG_CLS_U32_MARK
1189 cls_u32.knode.val = n->val;
1190 cls_u32.knode.mask = n->mask;
1191 #else
1192 cls_u32.knode.val = 0;
1193 cls_u32.knode.mask = 0;
1194 #endif
1195 cls_u32.knode.sel = &n->sel;
1196 cls_u32.knode.res = &n->res;
1197 cls_u32.knode.exts = &n->exts;
1198 if (n->ht_down)
1199 cls_u32.knode.link_handle = ht->handle;
1200 }
1201
1202 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1203 &cls_u32, cb_priv, &n->flags,
1204 &n->in_hw_count);
1205 }
1206
1207 static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1208 void *cb_priv, struct netlink_ext_ack *extack)
1209 {
1210 struct tc_u_common *tp_c = tp->data;
1211 struct tc_u_hnode *ht;
1212 struct tc_u_knode *n;
1213 unsigned int h;
1214 int err;
1215
1216 for (ht = rtnl_dereference(tp_c->hlist);
1217 ht;
1218 ht = rtnl_dereference(ht->next)) {
1219 if (ht->prio != tp->prio)
1220 continue;
1221
1222
1223
1224
1225
1226 if (add && !tc_skip_hw(ht->flags)) {
1227 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1228 extack);
1229 if (err)
1230 return err;
1231 }
1232
1233 for (h = 0; h <= ht->divisor; h++) {
1234 for (n = rtnl_dereference(ht->ht[h]);
1235 n;
1236 n = rtnl_dereference(n->next)) {
1237 if (tc_skip_hw(n->flags))
1238 continue;
1239
1240 err = u32_reoffload_knode(tp, n, add, cb,
1241 cb_priv, extack);
1242 if (err)
1243 return err;
1244 }
1245 }
1246
1247 if (!add && !tc_skip_hw(ht->flags))
1248 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1249 }
1250
1251 return 0;
1252 }
1253
1254 static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1255 unsigned long base)
1256 {
1257 struct tc_u_knode *n = fh;
1258
1259 if (n && n->res.classid == classid) {
1260 if (cl)
1261 __tcf_bind_filter(q, &n->res, base);
1262 else
1263 __tcf_unbind_filter(q, &n->res);
1264 }
1265 }
1266
1267 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1268 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1269 {
1270 struct tc_u_knode *n = fh;
1271 struct tc_u_hnode *ht_up, *ht_down;
1272 struct nlattr *nest;
1273
1274 if (n == NULL)
1275 return skb->len;
1276
1277 t->tcm_handle = n->handle;
1278
1279 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1280 if (nest == NULL)
1281 goto nla_put_failure;
1282
1283 if (TC_U32_KEY(n->handle) == 0) {
1284 struct tc_u_hnode *ht = fh;
1285 u32 divisor = ht->divisor + 1;
1286
1287 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1288 goto nla_put_failure;
1289 } else {
1290 #ifdef CONFIG_CLS_U32_PERF
1291 struct tc_u32_pcnt *gpf;
1292 int cpu;
1293 #endif
1294
1295 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
1296 &n->sel))
1297 goto nla_put_failure;
1298
1299 ht_up = rtnl_dereference(n->ht_up);
1300 if (ht_up) {
1301 u32 htid = n->handle & 0xFFFFF000;
1302 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1303 goto nla_put_failure;
1304 }
1305 if (n->res.classid &&
1306 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1307 goto nla_put_failure;
1308
1309 ht_down = rtnl_dereference(n->ht_down);
1310 if (ht_down &&
1311 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1312 goto nla_put_failure;
1313
1314 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1315 goto nla_put_failure;
1316
1317 #ifdef CONFIG_CLS_U32_MARK
1318 if ((n->val || n->mask)) {
1319 struct tc_u32_mark mark = {.val = n->val,
1320 .mask = n->mask,
1321 .success = 0};
1322 int cpum;
1323
1324 for_each_possible_cpu(cpum) {
1325 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1326
1327 mark.success += cnt;
1328 }
1329
1330 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1331 goto nla_put_failure;
1332 }
1333 #endif
1334
1335 if (tcf_exts_dump(skb, &n->exts) < 0)
1336 goto nla_put_failure;
1337
1338 if (n->ifindex) {
1339 struct net_device *dev;
1340 dev = __dev_get_by_index(net, n->ifindex);
1341 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1342 goto nla_put_failure;
1343 }
1344 #ifdef CONFIG_CLS_U32_PERF
1345 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
1346 if (!gpf)
1347 goto nla_put_failure;
1348
1349 for_each_possible_cpu(cpu) {
1350 int i;
1351 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1352
1353 gpf->rcnt += pf->rcnt;
1354 gpf->rhit += pf->rhit;
1355 for (i = 0; i < n->sel.nkeys; i++)
1356 gpf->kcnts[i] += pf->kcnts[i];
1357 }
1358
1359 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
1360 gpf, TCA_U32_PAD)) {
1361 kfree(gpf);
1362 goto nla_put_failure;
1363 }
1364 kfree(gpf);
1365 #endif
1366 }
1367
1368 nla_nest_end(skb, nest);
1369
1370 if (TC_U32_KEY(n->handle))
1371 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1372 goto nla_put_failure;
1373 return skb->len;
1374
1375 nla_put_failure:
1376 nla_nest_cancel(skb, nest);
1377 return -1;
1378 }
1379
1380 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1381 .kind = "u32",
1382 .classify = u32_classify,
1383 .init = u32_init,
1384 .destroy = u32_destroy,
1385 .get = u32_get,
1386 .change = u32_change,
1387 .delete = u32_delete,
1388 .walk = u32_walk,
1389 .reoffload = u32_reoffload,
1390 .dump = u32_dump,
1391 .bind_class = u32_bind_class,
1392 .owner = THIS_MODULE,
1393 };
1394
1395 static int __init init_u32(void)
1396 {
1397 int i, ret;
1398
1399 pr_info("u32 classifier\n");
1400 #ifdef CONFIG_CLS_U32_PERF
1401 pr_info(" Performance counters on\n");
1402 #endif
1403 pr_info(" input device check on\n");
1404 #ifdef CONFIG_NET_CLS_ACT
1405 pr_info(" Actions configured\n");
1406 #endif
1407 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1408 sizeof(struct hlist_head),
1409 GFP_KERNEL);
1410 if (!tc_u_common_hash)
1411 return -ENOMEM;
1412
1413 for (i = 0; i < U32_HASH_SIZE; i++)
1414 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1415
1416 ret = register_tcf_proto_ops(&cls_u32_ops);
1417 if (ret)
1418 kvfree(tc_u_common_hash);
1419 return ret;
1420 }
1421
1422 static void __exit exit_u32(void)
1423 {
1424 unregister_tcf_proto_ops(&cls_u32_ops);
1425 kvfree(tc_u_common_hash);
1426 }
1427
1428 module_init(init_u32)
1429 module_exit(exit_u32)
1430 MODULE_LICENSE("GPL");