0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/slab.h>
0010 #include <linux/types.h>
0011 #include <linux/kernel.h>
0012 #include <linux/string.h>
0013 #include <linux/errno.h>
0014 #include <linux/skbuff.h>
0015 #include <net/dst.h>
0016 #include <net/route.h>
0017 #include <net/netlink.h>
0018 #include <net/act_api.h>
0019 #include <net/pkt_cls.h>
0020
0021
0022
0023
0024
0025
0026
0027
0028 struct route4_fastmap {
0029 struct route4_filter *filter;
0030 u32 id;
0031 int iif;
0032 };
0033
0034 struct route4_head {
0035 struct route4_fastmap fastmap[16];
0036 struct route4_bucket __rcu *table[256 + 1];
0037 struct rcu_head rcu;
0038 };
0039
0040 struct route4_bucket {
0041
0042 struct route4_filter __rcu *ht[16 + 16 + 1];
0043 struct rcu_head rcu;
0044 };
0045
0046 struct route4_filter {
0047 struct route4_filter __rcu *next;
0048 u32 id;
0049 int iif;
0050
0051 struct tcf_result res;
0052 struct tcf_exts exts;
0053 u32 handle;
0054 struct route4_bucket *bkt;
0055 struct tcf_proto *tp;
0056 struct rcu_work rwork;
0057 };
0058
0059 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
0060
0061 static inline int route4_fastmap_hash(u32 id, int iif)
0062 {
0063 return id & 0xF;
0064 }
0065
0066 static DEFINE_SPINLOCK(fastmap_lock);
0067 static void
0068 route4_reset_fastmap(struct route4_head *head)
0069 {
0070 spin_lock_bh(&fastmap_lock);
0071 memset(head->fastmap, 0, sizeof(head->fastmap));
0072 spin_unlock_bh(&fastmap_lock);
0073 }
0074
0075 static void
0076 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
0077 struct route4_filter *f)
0078 {
0079 int h = route4_fastmap_hash(id, iif);
0080
0081
0082 spin_lock_bh(&fastmap_lock);
0083 head->fastmap[h].id = id;
0084 head->fastmap[h].iif = iif;
0085 head->fastmap[h].filter = f;
0086 spin_unlock_bh(&fastmap_lock);
0087 }
0088
0089 static inline int route4_hash_to(u32 id)
0090 {
0091 return id & 0xFF;
0092 }
0093
0094 static inline int route4_hash_from(u32 id)
0095 {
0096 return (id >> 16) & 0xF;
0097 }
0098
0099 static inline int route4_hash_iif(int iif)
0100 {
0101 return 16 + ((iif >> 16) & 0xF);
0102 }
0103
0104 static inline int route4_hash_wild(void)
0105 {
0106 return 32;
0107 }
0108
0109 #define ROUTE4_APPLY_RESULT() \
0110 { \
0111 *res = f->res; \
0112 if (tcf_exts_has_actions(&f->exts)) { \
0113 int r = tcf_exts_exec(skb, &f->exts, res); \
0114 if (r < 0) { \
0115 dont_cache = 1; \
0116 continue; \
0117 } \
0118 return r; \
0119 } else if (!dont_cache) \
0120 route4_set_fastmap(head, id, iif, f); \
0121 return 0; \
0122 }
0123
0124 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
0125 struct tcf_result *res)
0126 {
0127 struct route4_head *head = rcu_dereference_bh(tp->root);
0128 struct dst_entry *dst;
0129 struct route4_bucket *b;
0130 struct route4_filter *f;
0131 u32 id, h;
0132 int iif, dont_cache = 0;
0133
0134 dst = skb_dst(skb);
0135 if (!dst)
0136 goto failure;
0137
0138 id = dst->tclassid;
0139
0140 iif = inet_iif(skb);
0141
0142 h = route4_fastmap_hash(id, iif);
0143
0144 spin_lock(&fastmap_lock);
0145 if (id == head->fastmap[h].id &&
0146 iif == head->fastmap[h].iif &&
0147 (f = head->fastmap[h].filter) != NULL) {
0148 if (f == ROUTE4_FAILURE) {
0149 spin_unlock(&fastmap_lock);
0150 goto failure;
0151 }
0152
0153 *res = f->res;
0154 spin_unlock(&fastmap_lock);
0155 return 0;
0156 }
0157 spin_unlock(&fastmap_lock);
0158
0159 h = route4_hash_to(id);
0160
0161 restart:
0162 b = rcu_dereference_bh(head->table[h]);
0163 if (b) {
0164 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
0165 f;
0166 f = rcu_dereference_bh(f->next))
0167 if (f->id == id)
0168 ROUTE4_APPLY_RESULT();
0169
0170 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
0171 f;
0172 f = rcu_dereference_bh(f->next))
0173 if (f->iif == iif)
0174 ROUTE4_APPLY_RESULT();
0175
0176 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
0177 f;
0178 f = rcu_dereference_bh(f->next))
0179 ROUTE4_APPLY_RESULT();
0180 }
0181 if (h < 256) {
0182 h = 256;
0183 id &= ~0xFFFF;
0184 goto restart;
0185 }
0186
0187 if (!dont_cache)
0188 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
0189 failure:
0190 return -1;
0191 }
0192
0193 static inline u32 to_hash(u32 id)
0194 {
0195 u32 h = id & 0xFF;
0196
0197 if (id & 0x8000)
0198 h += 256;
0199 return h;
0200 }
0201
0202 static inline u32 from_hash(u32 id)
0203 {
0204 id &= 0xFFFF;
0205 if (id == 0xFFFF)
0206 return 32;
0207 if (!(id & 0x8000)) {
0208 if (id > 255)
0209 return 256;
0210 return id & 0xF;
0211 }
0212 return 16 + (id & 0xF);
0213 }
0214
0215 static void *route4_get(struct tcf_proto *tp, u32 handle)
0216 {
0217 struct route4_head *head = rtnl_dereference(tp->root);
0218 struct route4_bucket *b;
0219 struct route4_filter *f;
0220 unsigned int h1, h2;
0221
0222 h1 = to_hash(handle);
0223 if (h1 > 256)
0224 return NULL;
0225
0226 h2 = from_hash(handle >> 16);
0227 if (h2 > 32)
0228 return NULL;
0229
0230 b = rtnl_dereference(head->table[h1]);
0231 if (b) {
0232 for (f = rtnl_dereference(b->ht[h2]);
0233 f;
0234 f = rtnl_dereference(f->next))
0235 if (f->handle == handle)
0236 return f;
0237 }
0238 return NULL;
0239 }
0240
0241 static int route4_init(struct tcf_proto *tp)
0242 {
0243 struct route4_head *head;
0244
0245 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
0246 if (head == NULL)
0247 return -ENOBUFS;
0248
0249 rcu_assign_pointer(tp->root, head);
0250 return 0;
0251 }
0252
0253 static void __route4_delete_filter(struct route4_filter *f)
0254 {
0255 tcf_exts_destroy(&f->exts);
0256 tcf_exts_put_net(&f->exts);
0257 kfree(f);
0258 }
0259
0260 static void route4_delete_filter_work(struct work_struct *work)
0261 {
0262 struct route4_filter *f = container_of(to_rcu_work(work),
0263 struct route4_filter,
0264 rwork);
0265 rtnl_lock();
0266 __route4_delete_filter(f);
0267 rtnl_unlock();
0268 }
0269
0270 static void route4_queue_work(struct route4_filter *f)
0271 {
0272 tcf_queue_work(&f->rwork, route4_delete_filter_work);
0273 }
0274
0275 static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
0276 struct netlink_ext_ack *extack)
0277 {
0278 struct route4_head *head = rtnl_dereference(tp->root);
0279 int h1, h2;
0280
0281 if (head == NULL)
0282 return;
0283
0284 for (h1 = 0; h1 <= 256; h1++) {
0285 struct route4_bucket *b;
0286
0287 b = rtnl_dereference(head->table[h1]);
0288 if (b) {
0289 for (h2 = 0; h2 <= 32; h2++) {
0290 struct route4_filter *f;
0291
0292 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
0293 struct route4_filter *next;
0294
0295 next = rtnl_dereference(f->next);
0296 RCU_INIT_POINTER(b->ht[h2], next);
0297 tcf_unbind_filter(tp, &f->res);
0298 if (tcf_exts_get_net(&f->exts))
0299 route4_queue_work(f);
0300 else
0301 __route4_delete_filter(f);
0302 }
0303 }
0304 RCU_INIT_POINTER(head->table[h1], NULL);
0305 kfree_rcu(b, rcu);
0306 }
0307 }
0308 kfree_rcu(head, rcu);
0309 }
0310
0311 static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
0312 bool rtnl_held, struct netlink_ext_ack *extack)
0313 {
0314 struct route4_head *head = rtnl_dereference(tp->root);
0315 struct route4_filter *f = arg;
0316 struct route4_filter __rcu **fp;
0317 struct route4_filter *nf;
0318 struct route4_bucket *b;
0319 unsigned int h = 0;
0320 int i, h1;
0321
0322 if (!head || !f)
0323 return -EINVAL;
0324
0325 h = f->handle;
0326 b = f->bkt;
0327
0328 fp = &b->ht[from_hash(h >> 16)];
0329 for (nf = rtnl_dereference(*fp); nf;
0330 fp = &nf->next, nf = rtnl_dereference(*fp)) {
0331 if (nf == f) {
0332
0333 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
0334
0335
0336
0337
0338
0339 route4_reset_fastmap(head);
0340
0341
0342 tcf_unbind_filter(tp, &f->res);
0343 tcf_exts_get_net(&f->exts);
0344 tcf_queue_work(&f->rwork, route4_delete_filter_work);
0345
0346
0347 for (i = 0; i <= 32; i++) {
0348 struct route4_filter *rt;
0349
0350 rt = rtnl_dereference(b->ht[i]);
0351 if (rt)
0352 goto out;
0353 }
0354
0355
0356 RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
0357 kfree_rcu(b, rcu);
0358 break;
0359 }
0360 }
0361
0362 out:
0363 *last = true;
0364 for (h1 = 0; h1 <= 256; h1++) {
0365 if (rcu_access_pointer(head->table[h1])) {
0366 *last = false;
0367 break;
0368 }
0369 }
0370
0371 return 0;
0372 }
0373
0374 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
0375 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
0376 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
0377 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
0378 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
0379 };
0380
0381 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
0382 unsigned long base, struct route4_filter *f,
0383 u32 handle, struct route4_head *head,
0384 struct nlattr **tb, struct nlattr *est, int new,
0385 u32 flags, struct netlink_ext_ack *extack)
0386 {
0387 u32 id = 0, to = 0, nhandle = 0x8000;
0388 struct route4_filter *fp;
0389 unsigned int h1;
0390 struct route4_bucket *b;
0391 int err;
0392
0393 err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
0394 if (err < 0)
0395 return err;
0396
0397 if (tb[TCA_ROUTE4_TO]) {
0398 if (new && handle & 0x8000)
0399 return -EINVAL;
0400 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
0401 if (to > 0xFF)
0402 return -EINVAL;
0403 nhandle = to;
0404 }
0405
0406 if (tb[TCA_ROUTE4_FROM]) {
0407 if (tb[TCA_ROUTE4_IIF])
0408 return -EINVAL;
0409 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
0410 if (id > 0xFF)
0411 return -EINVAL;
0412 nhandle |= id << 16;
0413 } else if (tb[TCA_ROUTE4_IIF]) {
0414 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
0415 if (id > 0x7FFF)
0416 return -EINVAL;
0417 nhandle |= (id | 0x8000) << 16;
0418 } else
0419 nhandle |= 0xFFFF << 16;
0420
0421 if (handle && new) {
0422 nhandle |= handle & 0x7F00;
0423 if (nhandle != handle)
0424 return -EINVAL;
0425 }
0426
0427 if (!nhandle) {
0428 NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
0429 return -EINVAL;
0430 }
0431
0432 h1 = to_hash(nhandle);
0433 b = rtnl_dereference(head->table[h1]);
0434 if (!b) {
0435 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
0436 if (b == NULL)
0437 return -ENOBUFS;
0438
0439 rcu_assign_pointer(head->table[h1], b);
0440 } else {
0441 unsigned int h2 = from_hash(nhandle >> 16);
0442
0443 for (fp = rtnl_dereference(b->ht[h2]);
0444 fp;
0445 fp = rtnl_dereference(fp->next))
0446 if (fp->handle == f->handle)
0447 return -EEXIST;
0448 }
0449
0450 if (tb[TCA_ROUTE4_TO])
0451 f->id = to;
0452
0453 if (tb[TCA_ROUTE4_FROM])
0454 f->id = to | id<<16;
0455 else if (tb[TCA_ROUTE4_IIF])
0456 f->iif = id;
0457
0458 f->handle = nhandle;
0459 f->bkt = b;
0460 f->tp = tp;
0461
0462 if (tb[TCA_ROUTE4_CLASSID]) {
0463 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
0464 tcf_bind_filter(tp, &f->res, base);
0465 }
0466
0467 return 0;
0468 }
0469
0470 static int route4_change(struct net *net, struct sk_buff *in_skb,
0471 struct tcf_proto *tp, unsigned long base, u32 handle,
0472 struct nlattr **tca, void **arg, u32 flags,
0473 struct netlink_ext_ack *extack)
0474 {
0475 struct route4_head *head = rtnl_dereference(tp->root);
0476 struct route4_filter __rcu **fp;
0477 struct route4_filter *fold, *f1, *pfp, *f = NULL;
0478 struct route4_bucket *b;
0479 struct nlattr *opt = tca[TCA_OPTIONS];
0480 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
0481 unsigned int h, th;
0482 int err;
0483 bool new = true;
0484
0485 if (!handle) {
0486 NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
0487 return -EINVAL;
0488 }
0489
0490 if (opt == NULL)
0491 return handle ? -EINVAL : 0;
0492
0493 err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
0494 route4_policy, NULL);
0495 if (err < 0)
0496 return err;
0497
0498 fold = *arg;
0499 if (fold && handle && fold->handle != handle)
0500 return -EINVAL;
0501
0502 err = -ENOBUFS;
0503 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
0504 if (!f)
0505 goto errout;
0506
0507 err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
0508 if (err < 0)
0509 goto errout;
0510
0511 if (fold) {
0512 f->id = fold->id;
0513 f->iif = fold->iif;
0514 f->res = fold->res;
0515 f->handle = fold->handle;
0516
0517 f->tp = fold->tp;
0518 f->bkt = fold->bkt;
0519 new = false;
0520 }
0521
0522 err = route4_set_parms(net, tp, base, f, handle, head, tb,
0523 tca[TCA_RATE], new, flags, extack);
0524 if (err < 0)
0525 goto errout;
0526
0527 h = from_hash(f->handle >> 16);
0528 fp = &f->bkt->ht[h];
0529 for (pfp = rtnl_dereference(*fp);
0530 (f1 = rtnl_dereference(*fp)) != NULL;
0531 fp = &f1->next)
0532 if (f->handle < f1->handle)
0533 break;
0534
0535 tcf_block_netif_keep_dst(tp->chain->block);
0536 rcu_assign_pointer(f->next, f1);
0537 rcu_assign_pointer(*fp, f);
0538
0539 if (fold) {
0540 th = to_hash(fold->handle);
0541 h = from_hash(fold->handle >> 16);
0542 b = rtnl_dereference(head->table[th]);
0543 if (b) {
0544 fp = &b->ht[h];
0545 for (pfp = rtnl_dereference(*fp); pfp;
0546 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
0547 if (pfp == fold) {
0548 rcu_assign_pointer(*fp, fold->next);
0549 break;
0550 }
0551 }
0552 }
0553 }
0554
0555 route4_reset_fastmap(head);
0556 *arg = f;
0557 if (fold) {
0558 tcf_unbind_filter(tp, &fold->res);
0559 tcf_exts_get_net(&fold->exts);
0560 tcf_queue_work(&fold->rwork, route4_delete_filter_work);
0561 }
0562 return 0;
0563
0564 errout:
0565 if (f)
0566 tcf_exts_destroy(&f->exts);
0567 kfree(f);
0568 return err;
0569 }
0570
0571 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
0572 bool rtnl_held)
0573 {
0574 struct route4_head *head = rtnl_dereference(tp->root);
0575 unsigned int h, h1;
0576
0577 if (head == NULL || arg->stop)
0578 return;
0579
0580 for (h = 0; h <= 256; h++) {
0581 struct route4_bucket *b = rtnl_dereference(head->table[h]);
0582
0583 if (b) {
0584 for (h1 = 0; h1 <= 32; h1++) {
0585 struct route4_filter *f;
0586
0587 for (f = rtnl_dereference(b->ht[h1]);
0588 f;
0589 f = rtnl_dereference(f->next)) {
0590 if (arg->count < arg->skip) {
0591 arg->count++;
0592 continue;
0593 }
0594 if (arg->fn(tp, f, arg) < 0) {
0595 arg->stop = 1;
0596 return;
0597 }
0598 arg->count++;
0599 }
0600 }
0601 }
0602 }
0603 }
0604
0605 static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
0606 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
0607 {
0608 struct route4_filter *f = fh;
0609 struct nlattr *nest;
0610 u32 id;
0611
0612 if (f == NULL)
0613 return skb->len;
0614
0615 t->tcm_handle = f->handle;
0616
0617 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
0618 if (nest == NULL)
0619 goto nla_put_failure;
0620
0621 if (!(f->handle & 0x8000)) {
0622 id = f->id & 0xFF;
0623 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
0624 goto nla_put_failure;
0625 }
0626 if (f->handle & 0x80000000) {
0627 if ((f->handle >> 16) != 0xFFFF &&
0628 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
0629 goto nla_put_failure;
0630 } else {
0631 id = f->id >> 16;
0632 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
0633 goto nla_put_failure;
0634 }
0635 if (f->res.classid &&
0636 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
0637 goto nla_put_failure;
0638
0639 if (tcf_exts_dump(skb, &f->exts) < 0)
0640 goto nla_put_failure;
0641
0642 nla_nest_end(skb, nest);
0643
0644 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
0645 goto nla_put_failure;
0646
0647 return skb->len;
0648
0649 nla_put_failure:
0650 nla_nest_cancel(skb, nest);
0651 return -1;
0652 }
0653
0654 static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
0655 unsigned long base)
0656 {
0657 struct route4_filter *f = fh;
0658
0659 if (f && f->res.classid == classid) {
0660 if (cl)
0661 __tcf_bind_filter(q, &f->res, base);
0662 else
0663 __tcf_unbind_filter(q, &f->res);
0664 }
0665 }
0666
0667 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
0668 .kind = "route",
0669 .classify = route4_classify,
0670 .init = route4_init,
0671 .destroy = route4_destroy,
0672 .get = route4_get,
0673 .change = route4_change,
0674 .delete = route4_delete,
0675 .walk = route4_walk,
0676 .dump = route4_dump,
0677 .bind_class = route4_bind_class,
0678 .owner = THIS_MODULE,
0679 };
0680
0681 static int __init init_route4(void)
0682 {
0683 return register_tcf_proto_ops(&cls_route4_ops);
0684 }
0685
0686 static void __exit exit_route4(void)
0687 {
0688 unregister_tcf_proto_ops(&cls_route4_ops);
0689 }
0690
0691 module_init(init_route4)
0692 module_exit(exit_route4)
0693 MODULE_LICENSE("GPL");