0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 #include <linux/slab.h>
0058 #include <linux/module.h>
0059 #include <linux/types.h>
0060 #include <linux/kernel.h>
0061 #include <linux/sched.h>
0062 #include <linux/sched/loadavg.h>
0063 #include <linux/string.h>
0064 #include <linux/skbuff.h>
0065 #include <linux/random.h>
0066 #include <linux/if_vlan.h>
0067 #include <linux/tc_ematch/tc_em_meta.h>
0068 #include <net/dst.h>
0069 #include <net/route.h>
0070 #include <net/pkt_cls.h>
0071 #include <net/sock.h>
0072
0073 struct meta_obj {
0074 unsigned long value;
0075 unsigned int len;
0076 };
0077
0078 struct meta_value {
0079 struct tcf_meta_val hdr;
0080 unsigned long val;
0081 unsigned int len;
0082 };
0083
0084 struct meta_match {
0085 struct meta_value lvalue;
0086 struct meta_value rvalue;
0087 };
0088
0089 static inline int meta_id(struct meta_value *v)
0090 {
0091 return TCF_META_ID(v->hdr.kind);
0092 }
0093
0094 static inline int meta_type(struct meta_value *v)
0095 {
0096 return TCF_META_TYPE(v->hdr.kind);
0097 }
0098
0099 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
0100 struct tcf_pkt_info *info, struct meta_value *v, \
0101 struct meta_obj *dst, int *err)
0102
0103
0104
0105
0106
0107 META_COLLECTOR(int_random)
0108 {
0109 get_random_bytes(&dst->value, sizeof(dst->value));
0110 }
0111
0112 static inline unsigned long fixed_loadavg(int load)
0113 {
0114 int rnd_load = load + (FIXED_1/200);
0115 int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
0116
0117 return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
0118 }
0119
0120 META_COLLECTOR(int_loadavg_0)
0121 {
0122 dst->value = fixed_loadavg(avenrun[0]);
0123 }
0124
0125 META_COLLECTOR(int_loadavg_1)
0126 {
0127 dst->value = fixed_loadavg(avenrun[1]);
0128 }
0129
0130 META_COLLECTOR(int_loadavg_2)
0131 {
0132 dst->value = fixed_loadavg(avenrun[2]);
0133 }
0134
0135
0136
0137
0138
0139 static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
0140 {
0141 if (unlikely(dev == NULL))
0142 return -1;
0143
0144 dst->value = dev->ifindex;
0145 return 0;
0146 }
0147
0148 static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
0149 {
0150 if (unlikely(dev == NULL))
0151 return -1;
0152
0153 dst->value = (unsigned long) dev->name;
0154 dst->len = strlen(dev->name);
0155 return 0;
0156 }
0157
0158 META_COLLECTOR(int_dev)
0159 {
0160 *err = int_dev(skb->dev, dst);
0161 }
0162
0163 META_COLLECTOR(var_dev)
0164 {
0165 *err = var_dev(skb->dev, dst);
0166 }
0167
0168
0169
0170
0171
0172 META_COLLECTOR(int_vlan_tag)
0173 {
0174 unsigned short tag;
0175
0176 if (skb_vlan_tag_present(skb))
0177 dst->value = skb_vlan_tag_get(skb);
0178 else if (!__vlan_get_tag(skb, &tag))
0179 dst->value = tag;
0180 else
0181 *err = -1;
0182 }
0183
0184
0185
0186
0187
0188
0189
0190 META_COLLECTOR(int_priority)
0191 {
0192 dst->value = skb->priority;
0193 }
0194
0195 META_COLLECTOR(int_protocol)
0196 {
0197
0198 dst->value = skb_protocol(skb, false);
0199 }
0200
0201 META_COLLECTOR(int_pkttype)
0202 {
0203 dst->value = skb->pkt_type;
0204 }
0205
0206 META_COLLECTOR(int_pktlen)
0207 {
0208 dst->value = skb->len;
0209 }
0210
0211 META_COLLECTOR(int_datalen)
0212 {
0213 dst->value = skb->data_len;
0214 }
0215
0216 META_COLLECTOR(int_maclen)
0217 {
0218 dst->value = skb->mac_len;
0219 }
0220
0221 META_COLLECTOR(int_rxhash)
0222 {
0223 dst->value = skb_get_hash(skb);
0224 }
0225
0226
0227
0228
0229
0230 META_COLLECTOR(int_mark)
0231 {
0232 dst->value = skb->mark;
0233 }
0234
0235
0236
0237
0238
0239 META_COLLECTOR(int_tcindex)
0240 {
0241 dst->value = skb->tc_index;
0242 }
0243
0244
0245
0246
0247
0248 META_COLLECTOR(int_rtclassid)
0249 {
0250 if (unlikely(skb_dst(skb) == NULL))
0251 *err = -1;
0252 else
0253 #ifdef CONFIG_IP_ROUTE_CLASSID
0254 dst->value = skb_dst(skb)->tclassid;
0255 #else
0256 dst->value = 0;
0257 #endif
0258 }
0259
0260 META_COLLECTOR(int_rtiif)
0261 {
0262 if (unlikely(skb_rtable(skb) == NULL))
0263 *err = -1;
0264 else
0265 dst->value = inet_iif(skb);
0266 }
0267
0268
0269
0270
0271
0272 #define skip_nonlocal(skb) \
0273 (unlikely(skb->sk == NULL))
0274
0275 META_COLLECTOR(int_sk_family)
0276 {
0277 if (skip_nonlocal(skb)) {
0278 *err = -1;
0279 return;
0280 }
0281 dst->value = skb->sk->sk_family;
0282 }
0283
0284 META_COLLECTOR(int_sk_state)
0285 {
0286 if (skip_nonlocal(skb)) {
0287 *err = -1;
0288 return;
0289 }
0290 dst->value = skb->sk->sk_state;
0291 }
0292
0293 META_COLLECTOR(int_sk_reuse)
0294 {
0295 if (skip_nonlocal(skb)) {
0296 *err = -1;
0297 return;
0298 }
0299 dst->value = skb->sk->sk_reuse;
0300 }
0301
0302 META_COLLECTOR(int_sk_bound_if)
0303 {
0304 if (skip_nonlocal(skb)) {
0305 *err = -1;
0306 return;
0307 }
0308
0309 dst->value = skb->sk->sk_bound_dev_if;
0310 }
0311
0312 META_COLLECTOR(var_sk_bound_if)
0313 {
0314 int bound_dev_if;
0315
0316 if (skip_nonlocal(skb)) {
0317 *err = -1;
0318 return;
0319 }
0320
0321 bound_dev_if = READ_ONCE(skb->sk->sk_bound_dev_if);
0322 if (bound_dev_if == 0) {
0323 dst->value = (unsigned long) "any";
0324 dst->len = 3;
0325 } else {
0326 struct net_device *dev;
0327
0328 rcu_read_lock();
0329 dev = dev_get_by_index_rcu(sock_net(skb->sk),
0330 bound_dev_if);
0331 *err = var_dev(dev, dst);
0332 rcu_read_unlock();
0333 }
0334 }
0335
0336 META_COLLECTOR(int_sk_refcnt)
0337 {
0338 if (skip_nonlocal(skb)) {
0339 *err = -1;
0340 return;
0341 }
0342 dst->value = refcount_read(&skb->sk->sk_refcnt);
0343 }
0344
0345 META_COLLECTOR(int_sk_rcvbuf)
0346 {
0347 const struct sock *sk = skb_to_full_sk(skb);
0348
0349 if (!sk) {
0350 *err = -1;
0351 return;
0352 }
0353 dst->value = sk->sk_rcvbuf;
0354 }
0355
0356 META_COLLECTOR(int_sk_shutdown)
0357 {
0358 const struct sock *sk = skb_to_full_sk(skb);
0359
0360 if (!sk) {
0361 *err = -1;
0362 return;
0363 }
0364 dst->value = sk->sk_shutdown;
0365 }
0366
0367 META_COLLECTOR(int_sk_proto)
0368 {
0369 const struct sock *sk = skb_to_full_sk(skb);
0370
0371 if (!sk) {
0372 *err = -1;
0373 return;
0374 }
0375 dst->value = sk->sk_protocol;
0376 }
0377
0378 META_COLLECTOR(int_sk_type)
0379 {
0380 const struct sock *sk = skb_to_full_sk(skb);
0381
0382 if (!sk) {
0383 *err = -1;
0384 return;
0385 }
0386 dst->value = sk->sk_type;
0387 }
0388
0389 META_COLLECTOR(int_sk_rmem_alloc)
0390 {
0391 const struct sock *sk = skb_to_full_sk(skb);
0392
0393 if (!sk) {
0394 *err = -1;
0395 return;
0396 }
0397 dst->value = sk_rmem_alloc_get(sk);
0398 }
0399
0400 META_COLLECTOR(int_sk_wmem_alloc)
0401 {
0402 const struct sock *sk = skb_to_full_sk(skb);
0403
0404 if (!sk) {
0405 *err = -1;
0406 return;
0407 }
0408 dst->value = sk_wmem_alloc_get(sk);
0409 }
0410
0411 META_COLLECTOR(int_sk_omem_alloc)
0412 {
0413 const struct sock *sk = skb_to_full_sk(skb);
0414
0415 if (!sk) {
0416 *err = -1;
0417 return;
0418 }
0419 dst->value = atomic_read(&sk->sk_omem_alloc);
0420 }
0421
0422 META_COLLECTOR(int_sk_rcv_qlen)
0423 {
0424 const struct sock *sk = skb_to_full_sk(skb);
0425
0426 if (!sk) {
0427 *err = -1;
0428 return;
0429 }
0430 dst->value = sk->sk_receive_queue.qlen;
0431 }
0432
0433 META_COLLECTOR(int_sk_snd_qlen)
0434 {
0435 const struct sock *sk = skb_to_full_sk(skb);
0436
0437 if (!sk) {
0438 *err = -1;
0439 return;
0440 }
0441 dst->value = sk->sk_write_queue.qlen;
0442 }
0443
0444 META_COLLECTOR(int_sk_wmem_queued)
0445 {
0446 const struct sock *sk = skb_to_full_sk(skb);
0447
0448 if (!sk) {
0449 *err = -1;
0450 return;
0451 }
0452 dst->value = READ_ONCE(sk->sk_wmem_queued);
0453 }
0454
0455 META_COLLECTOR(int_sk_fwd_alloc)
0456 {
0457 const struct sock *sk = skb_to_full_sk(skb);
0458
0459 if (!sk) {
0460 *err = -1;
0461 return;
0462 }
0463 dst->value = sk_forward_alloc_get(sk);
0464 }
0465
0466 META_COLLECTOR(int_sk_sndbuf)
0467 {
0468 const struct sock *sk = skb_to_full_sk(skb);
0469
0470 if (!sk) {
0471 *err = -1;
0472 return;
0473 }
0474 dst->value = sk->sk_sndbuf;
0475 }
0476
0477 META_COLLECTOR(int_sk_alloc)
0478 {
0479 const struct sock *sk = skb_to_full_sk(skb);
0480
0481 if (!sk) {
0482 *err = -1;
0483 return;
0484 }
0485 dst->value = (__force int) sk->sk_allocation;
0486 }
0487
0488 META_COLLECTOR(int_sk_hash)
0489 {
0490 if (skip_nonlocal(skb)) {
0491 *err = -1;
0492 return;
0493 }
0494 dst->value = skb->sk->sk_hash;
0495 }
0496
0497 META_COLLECTOR(int_sk_lingertime)
0498 {
0499 const struct sock *sk = skb_to_full_sk(skb);
0500
0501 if (!sk) {
0502 *err = -1;
0503 return;
0504 }
0505 dst->value = sk->sk_lingertime / HZ;
0506 }
0507
0508 META_COLLECTOR(int_sk_err_qlen)
0509 {
0510 const struct sock *sk = skb_to_full_sk(skb);
0511
0512 if (!sk) {
0513 *err = -1;
0514 return;
0515 }
0516 dst->value = sk->sk_error_queue.qlen;
0517 }
0518
0519 META_COLLECTOR(int_sk_ack_bl)
0520 {
0521 const struct sock *sk = skb_to_full_sk(skb);
0522
0523 if (!sk) {
0524 *err = -1;
0525 return;
0526 }
0527 dst->value = READ_ONCE(sk->sk_ack_backlog);
0528 }
0529
0530 META_COLLECTOR(int_sk_max_ack_bl)
0531 {
0532 const struct sock *sk = skb_to_full_sk(skb);
0533
0534 if (!sk) {
0535 *err = -1;
0536 return;
0537 }
0538 dst->value = READ_ONCE(sk->sk_max_ack_backlog);
0539 }
0540
0541 META_COLLECTOR(int_sk_prio)
0542 {
0543 const struct sock *sk = skb_to_full_sk(skb);
0544
0545 if (!sk) {
0546 *err = -1;
0547 return;
0548 }
0549 dst->value = sk->sk_priority;
0550 }
0551
0552 META_COLLECTOR(int_sk_rcvlowat)
0553 {
0554 const struct sock *sk = skb_to_full_sk(skb);
0555
0556 if (!sk) {
0557 *err = -1;
0558 return;
0559 }
0560 dst->value = READ_ONCE(sk->sk_rcvlowat);
0561 }
0562
0563 META_COLLECTOR(int_sk_rcvtimeo)
0564 {
0565 const struct sock *sk = skb_to_full_sk(skb);
0566
0567 if (!sk) {
0568 *err = -1;
0569 return;
0570 }
0571 dst->value = sk->sk_rcvtimeo / HZ;
0572 }
0573
0574 META_COLLECTOR(int_sk_sndtimeo)
0575 {
0576 const struct sock *sk = skb_to_full_sk(skb);
0577
0578 if (!sk) {
0579 *err = -1;
0580 return;
0581 }
0582 dst->value = sk->sk_sndtimeo / HZ;
0583 }
0584
0585 META_COLLECTOR(int_sk_sendmsg_off)
0586 {
0587 const struct sock *sk = skb_to_full_sk(skb);
0588
0589 if (!sk) {
0590 *err = -1;
0591 return;
0592 }
0593 dst->value = sk->sk_frag.offset;
0594 }
0595
0596 META_COLLECTOR(int_sk_write_pend)
0597 {
0598 const struct sock *sk = skb_to_full_sk(skb);
0599
0600 if (!sk) {
0601 *err = -1;
0602 return;
0603 }
0604 dst->value = sk->sk_write_pending;
0605 }
0606
0607
0608
0609
0610
0611 struct meta_ops {
0612 void (*get)(struct sk_buff *, struct tcf_pkt_info *,
0613 struct meta_value *, struct meta_obj *, int *);
0614 };
0615
0616 #define META_ID(name) TCF_META_ID_##name
0617 #define META_FUNC(name) { .get = meta_##name }
0618
0619
0620
0621 static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
0622 [TCF_META_TYPE_VAR] = {
0623 [META_ID(DEV)] = META_FUNC(var_dev),
0624 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
0625 },
0626 [TCF_META_TYPE_INT] = {
0627 [META_ID(RANDOM)] = META_FUNC(int_random),
0628 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0),
0629 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
0630 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
0631 [META_ID(DEV)] = META_FUNC(int_dev),
0632 [META_ID(PRIORITY)] = META_FUNC(int_priority),
0633 [META_ID(PROTOCOL)] = META_FUNC(int_protocol),
0634 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
0635 [META_ID(PKTLEN)] = META_FUNC(int_pktlen),
0636 [META_ID(DATALEN)] = META_FUNC(int_datalen),
0637 [META_ID(MACLEN)] = META_FUNC(int_maclen),
0638 [META_ID(NFMARK)] = META_FUNC(int_mark),
0639 [META_ID(TCINDEX)] = META_FUNC(int_tcindex),
0640 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
0641 [META_ID(RTIIF)] = META_FUNC(int_rtiif),
0642 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
0643 [META_ID(SK_STATE)] = META_FUNC(int_sk_state),
0644 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse),
0645 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if),
0646 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt),
0647 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf),
0648 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf),
0649 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown),
0650 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto),
0651 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type),
0652 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc),
0653 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc),
0654 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc),
0655 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued),
0656 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen),
0657 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen),
0658 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
0659 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
0660 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
0661 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
0662 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
0663 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
0664 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
0665 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio),
0666 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat),
0667 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo),
0668 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
0669 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
0670 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
0671 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
0672 [META_ID(RXHASH)] = META_FUNC(int_rxhash),
0673 }
0674 };
0675
0676 static inline struct meta_ops *meta_ops(struct meta_value *val)
0677 {
0678 return &__meta_ops[meta_type(val)][meta_id(val)];
0679 }
0680
0681
0682
0683
0684
0685 static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
0686 {
0687 int r = a->len - b->len;
0688
0689 if (r == 0)
0690 r = memcmp((void *) a->value, (void *) b->value, a->len);
0691
0692 return r;
0693 }
0694
0695 static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
0696 {
0697 int len = nla_len(nla);
0698
0699 dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
0700 if (dst->val == 0UL)
0701 return -ENOMEM;
0702 dst->len = len;
0703 return 0;
0704 }
0705
0706 static void meta_var_destroy(struct meta_value *v)
0707 {
0708 kfree((void *) v->val);
0709 }
0710
0711 static void meta_var_apply_extras(struct meta_value *v,
0712 struct meta_obj *dst)
0713 {
0714 int shift = v->hdr.shift;
0715
0716 if (shift && shift < dst->len)
0717 dst->len -= shift;
0718 }
0719
0720 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
0721 {
0722 if (v->val && v->len &&
0723 nla_put(skb, tlv, v->len, (void *) v->val))
0724 goto nla_put_failure;
0725 return 0;
0726
0727 nla_put_failure:
0728 return -1;
0729 }
0730
0731
0732
0733
0734
0735 static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
0736 {
0737
0738
0739
0740 if (unlikely(a->value == b->value))
0741 return 0;
0742 else if (a->value < b->value)
0743 return -1;
0744 else
0745 return 1;
0746 }
0747
0748 static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
0749 {
0750 if (nla_len(nla) >= sizeof(unsigned long)) {
0751 dst->val = *(unsigned long *) nla_data(nla);
0752 dst->len = sizeof(unsigned long);
0753 } else if (nla_len(nla) == sizeof(u32)) {
0754 dst->val = nla_get_u32(nla);
0755 dst->len = sizeof(u32);
0756 } else
0757 return -EINVAL;
0758
0759 return 0;
0760 }
0761
0762 static void meta_int_apply_extras(struct meta_value *v,
0763 struct meta_obj *dst)
0764 {
0765 if (v->hdr.shift)
0766 dst->value >>= v->hdr.shift;
0767
0768 if (v->val)
0769 dst->value &= v->val;
0770 }
0771
0772 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
0773 {
0774 if (v->len == sizeof(unsigned long)) {
0775 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
0776 goto nla_put_failure;
0777 } else if (v->len == sizeof(u32)) {
0778 if (nla_put_u32(skb, tlv, v->val))
0779 goto nla_put_failure;
0780 }
0781
0782 return 0;
0783
0784 nla_put_failure:
0785 return -1;
0786 }
0787
0788
0789
0790
0791
0792 struct meta_type_ops {
0793 void (*destroy)(struct meta_value *);
0794 int (*compare)(struct meta_obj *, struct meta_obj *);
0795 int (*change)(struct meta_value *, struct nlattr *);
0796 void (*apply_extras)(struct meta_value *, struct meta_obj *);
0797 int (*dump)(struct sk_buff *, struct meta_value *, int);
0798 };
0799
0800 static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
0801 [TCF_META_TYPE_VAR] = {
0802 .destroy = meta_var_destroy,
0803 .compare = meta_var_compare,
0804 .change = meta_var_change,
0805 .apply_extras = meta_var_apply_extras,
0806 .dump = meta_var_dump
0807 },
0808 [TCF_META_TYPE_INT] = {
0809 .compare = meta_int_compare,
0810 .change = meta_int_change,
0811 .apply_extras = meta_int_apply_extras,
0812 .dump = meta_int_dump
0813 }
0814 };
0815
0816 static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v)
0817 {
0818 return &__meta_type_ops[meta_type(v)];
0819 }
0820
0821
0822
0823
0824
0825 static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
0826 struct meta_value *v, struct meta_obj *dst)
0827 {
0828 int err = 0;
0829
0830 if (meta_id(v) == TCF_META_ID_VALUE) {
0831 dst->value = v->val;
0832 dst->len = v->len;
0833 return 0;
0834 }
0835
0836 meta_ops(v)->get(skb, info, v, dst, &err);
0837 if (err < 0)
0838 return err;
0839
0840 if (meta_type_ops(v)->apply_extras)
0841 meta_type_ops(v)->apply_extras(v, dst);
0842
0843 return 0;
0844 }
0845
0846 static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
0847 struct tcf_pkt_info *info)
0848 {
0849 int r;
0850 struct meta_match *meta = (struct meta_match *) m->data;
0851 struct meta_obj l_value, r_value;
0852
0853 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
0854 meta_get(skb, info, &meta->rvalue, &r_value) < 0)
0855 return 0;
0856
0857 r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
0858
0859 switch (meta->lvalue.hdr.op) {
0860 case TCF_EM_OPND_EQ:
0861 return !r;
0862 case TCF_EM_OPND_LT:
0863 return r < 0;
0864 case TCF_EM_OPND_GT:
0865 return r > 0;
0866 }
0867
0868 return 0;
0869 }
0870
0871 static void meta_delete(struct meta_match *meta)
0872 {
0873 if (meta) {
0874 const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
0875
0876 if (ops && ops->destroy) {
0877 ops->destroy(&meta->lvalue);
0878 ops->destroy(&meta->rvalue);
0879 }
0880 }
0881
0882 kfree(meta);
0883 }
0884
0885 static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
0886 {
0887 if (nla) {
0888 if (nla_len(nla) == 0)
0889 return -EINVAL;
0890
0891 return meta_type_ops(dst)->change(dst, nla);
0892 }
0893
0894 return 0;
0895 }
0896
0897 static inline int meta_is_supported(struct meta_value *val)
0898 {
0899 return !meta_id(val) || meta_ops(val)->get;
0900 }
0901
0902 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
0903 [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) },
0904 };
0905
0906 static int em_meta_change(struct net *net, void *data, int len,
0907 struct tcf_ematch *m)
0908 {
0909 int err;
0910 struct nlattr *tb[TCA_EM_META_MAX + 1];
0911 struct tcf_meta_hdr *hdr;
0912 struct meta_match *meta = NULL;
0913
0914 err = nla_parse_deprecated(tb, TCA_EM_META_MAX, data, len,
0915 meta_policy, NULL);
0916 if (err < 0)
0917 goto errout;
0918
0919 err = -EINVAL;
0920 if (tb[TCA_EM_META_HDR] == NULL)
0921 goto errout;
0922 hdr = nla_data(tb[TCA_EM_META_HDR]);
0923
0924 if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
0925 TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
0926 TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
0927 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
0928 goto errout;
0929
0930 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
0931 if (meta == NULL) {
0932 err = -ENOMEM;
0933 goto errout;
0934 }
0935
0936 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
0937 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
0938
0939 if (!meta_is_supported(&meta->lvalue) ||
0940 !meta_is_supported(&meta->rvalue)) {
0941 err = -EOPNOTSUPP;
0942 goto errout;
0943 }
0944
0945 if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
0946 meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
0947 goto errout;
0948
0949 m->datalen = sizeof(*meta);
0950 m->data = (unsigned long) meta;
0951
0952 err = 0;
0953 errout:
0954 if (err && meta)
0955 meta_delete(meta);
0956 return err;
0957 }
0958
0959 static void em_meta_destroy(struct tcf_ematch *m)
0960 {
0961 if (m)
0962 meta_delete((struct meta_match *) m->data);
0963 }
0964
0965 static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
0966 {
0967 struct meta_match *meta = (struct meta_match *) em->data;
0968 struct tcf_meta_hdr hdr;
0969 const struct meta_type_ops *ops;
0970
0971 memset(&hdr, 0, sizeof(hdr));
0972 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
0973 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
0974
0975 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
0976 goto nla_put_failure;
0977
0978 ops = meta_type_ops(&meta->lvalue);
0979 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
0980 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
0981 goto nla_put_failure;
0982
0983 return 0;
0984
0985 nla_put_failure:
0986 return -1;
0987 }
0988
0989 static struct tcf_ematch_ops em_meta_ops = {
0990 .kind = TCF_EM_META,
0991 .change = em_meta_change,
0992 .match = em_meta_match,
0993 .destroy = em_meta_destroy,
0994 .dump = em_meta_dump,
0995 .owner = THIS_MODULE,
0996 .link = LIST_HEAD_INIT(em_meta_ops.link)
0997 };
0998
0999 static int __init init_em_meta(void)
1000 {
1001 return tcf_em_register(&em_meta_ops);
1002 }
1003
1004 static void __exit exit_em_meta(void)
1005 {
1006 tcf_em_unregister(&em_meta_ops);
1007 }
1008
1009 MODULE_LICENSE("GPL");
1010
1011 module_init(init_em_meta);
1012 module_exit(exit_em_meta);
1013
1014 MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);