Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * net/sched/cls_flow.c     Generic flow classifier
0004  *
0005  * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
0006  */
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/init.h>
0010 #include <linux/list.h>
0011 #include <linux/jhash.h>
0012 #include <linux/random.h>
0013 #include <linux/pkt_cls.h>
0014 #include <linux/skbuff.h>
0015 #include <linux/in.h>
0016 #include <linux/ip.h>
0017 #include <linux/ipv6.h>
0018 #include <linux/if_vlan.h>
0019 #include <linux/slab.h>
0020 #include <linux/module.h>
0021 #include <net/inet_sock.h>
0022 
0023 #include <net/pkt_cls.h>
0024 #include <net/ip.h>
0025 #include <net/route.h>
0026 #include <net/flow_dissector.h>
0027 
0028 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
0029 #include <net/netfilter/nf_conntrack.h>
0030 #endif
0031 
0032 struct flow_head {
0033     struct list_head    filters;
0034     struct rcu_head     rcu;
0035 };
0036 
0037 struct flow_filter {
0038     struct list_head    list;
0039     struct tcf_exts     exts;
0040     struct tcf_ematch_tree  ematches;
0041     struct tcf_proto    *tp;
0042     struct timer_list   perturb_timer;
0043     u32         perturb_period;
0044     u32         handle;
0045 
0046     u32         nkeys;
0047     u32         keymask;
0048     u32         mode;
0049     u32         mask;
0050     u32         xor;
0051     u32         rshift;
0052     u32         addend;
0053     u32         divisor;
0054     u32         baseclass;
0055     u32         hashrnd;
0056     struct rcu_work     rwork;
0057 };
0058 
0059 static inline u32 addr_fold(void *addr)
0060 {
0061     unsigned long a = (unsigned long)addr;
0062 
0063     return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
0064 }
0065 
0066 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
0067 {
0068     __be32 src = flow_get_u32_src(flow);
0069 
0070     if (src)
0071         return ntohl(src);
0072 
0073     return addr_fold(skb->sk);
0074 }
0075 
0076 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
0077 {
0078     __be32 dst = flow_get_u32_dst(flow);
0079 
0080     if (dst)
0081         return ntohl(dst);
0082 
0083     return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
0084 }
0085 
0086 static u32 flow_get_proto(const struct sk_buff *skb,
0087               const struct flow_keys *flow)
0088 {
0089     return flow->basic.ip_proto;
0090 }
0091 
0092 static u32 flow_get_proto_src(const struct sk_buff *skb,
0093                   const struct flow_keys *flow)
0094 {
0095     if (flow->ports.ports)
0096         return ntohs(flow->ports.src);
0097 
0098     return addr_fold(skb->sk);
0099 }
0100 
0101 static u32 flow_get_proto_dst(const struct sk_buff *skb,
0102                   const struct flow_keys *flow)
0103 {
0104     if (flow->ports.ports)
0105         return ntohs(flow->ports.dst);
0106 
0107     return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
0108 }
0109 
0110 static u32 flow_get_iif(const struct sk_buff *skb)
0111 {
0112     return skb->skb_iif;
0113 }
0114 
0115 static u32 flow_get_priority(const struct sk_buff *skb)
0116 {
0117     return skb->priority;
0118 }
0119 
0120 static u32 flow_get_mark(const struct sk_buff *skb)
0121 {
0122     return skb->mark;
0123 }
0124 
0125 static u32 flow_get_nfct(const struct sk_buff *skb)
0126 {
0127 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
0128     return addr_fold(skb_nfct(skb));
0129 #else
0130     return 0;
0131 #endif
0132 }
0133 
0134 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
0135 #define CTTUPLE(skb, member)                        \
0136 ({                                  \
0137     enum ip_conntrack_info ctinfo;                  \
0138     const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);     \
0139     if (ct == NULL)                         \
0140         goto fallback;                      \
0141     ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;         \
0142 })
0143 #else
0144 #define CTTUPLE(skb, member)                        \
0145 ({                                  \
0146     goto fallback;                          \
0147     0;                              \
0148 })
0149 #endif
0150 
0151 static u32 flow_get_nfct_src(const struct sk_buff *skb,
0152                  const struct flow_keys *flow)
0153 {
0154     switch (skb_protocol(skb, true)) {
0155     case htons(ETH_P_IP):
0156         return ntohl(CTTUPLE(skb, src.u3.ip));
0157     case htons(ETH_P_IPV6):
0158         return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
0159     }
0160 fallback:
0161     return flow_get_src(skb, flow);
0162 }
0163 
0164 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
0165                  const struct flow_keys *flow)
0166 {
0167     switch (skb_protocol(skb, true)) {
0168     case htons(ETH_P_IP):
0169         return ntohl(CTTUPLE(skb, dst.u3.ip));
0170     case htons(ETH_P_IPV6):
0171         return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
0172     }
0173 fallback:
0174     return flow_get_dst(skb, flow);
0175 }
0176 
0177 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
0178                    const struct flow_keys *flow)
0179 {
0180     return ntohs(CTTUPLE(skb, src.u.all));
0181 fallback:
0182     return flow_get_proto_src(skb, flow);
0183 }
0184 
0185 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
0186                    const struct flow_keys *flow)
0187 {
0188     return ntohs(CTTUPLE(skb, dst.u.all));
0189 fallback:
0190     return flow_get_proto_dst(skb, flow);
0191 }
0192 
0193 static u32 flow_get_rtclassid(const struct sk_buff *skb)
0194 {
0195 #ifdef CONFIG_IP_ROUTE_CLASSID
0196     if (skb_dst(skb))
0197         return skb_dst(skb)->tclassid;
0198 #endif
0199     return 0;
0200 }
0201 
0202 static u32 flow_get_skuid(const struct sk_buff *skb)
0203 {
0204     struct sock *sk = skb_to_full_sk(skb);
0205 
0206     if (sk && sk->sk_socket && sk->sk_socket->file) {
0207         kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
0208 
0209         return from_kuid(&init_user_ns, skuid);
0210     }
0211     return 0;
0212 }
0213 
0214 static u32 flow_get_skgid(const struct sk_buff *skb)
0215 {
0216     struct sock *sk = skb_to_full_sk(skb);
0217 
0218     if (sk && sk->sk_socket && sk->sk_socket->file) {
0219         kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
0220 
0221         return from_kgid(&init_user_ns, skgid);
0222     }
0223     return 0;
0224 }
0225 
0226 static u32 flow_get_vlan_tag(const struct sk_buff *skb)
0227 {
0228     u16 tag;
0229 
0230     if (vlan_get_tag(skb, &tag) < 0)
0231         return 0;
0232     return tag & VLAN_VID_MASK;
0233 }
0234 
0235 static u32 flow_get_rxhash(struct sk_buff *skb)
0236 {
0237     return skb_get_hash(skb);
0238 }
0239 
0240 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
0241 {
0242     switch (key) {
0243     case FLOW_KEY_SRC:
0244         return flow_get_src(skb, flow);
0245     case FLOW_KEY_DST:
0246         return flow_get_dst(skb, flow);
0247     case FLOW_KEY_PROTO:
0248         return flow_get_proto(skb, flow);
0249     case FLOW_KEY_PROTO_SRC:
0250         return flow_get_proto_src(skb, flow);
0251     case FLOW_KEY_PROTO_DST:
0252         return flow_get_proto_dst(skb, flow);
0253     case FLOW_KEY_IIF:
0254         return flow_get_iif(skb);
0255     case FLOW_KEY_PRIORITY:
0256         return flow_get_priority(skb);
0257     case FLOW_KEY_MARK:
0258         return flow_get_mark(skb);
0259     case FLOW_KEY_NFCT:
0260         return flow_get_nfct(skb);
0261     case FLOW_KEY_NFCT_SRC:
0262         return flow_get_nfct_src(skb, flow);
0263     case FLOW_KEY_NFCT_DST:
0264         return flow_get_nfct_dst(skb, flow);
0265     case FLOW_KEY_NFCT_PROTO_SRC:
0266         return flow_get_nfct_proto_src(skb, flow);
0267     case FLOW_KEY_NFCT_PROTO_DST:
0268         return flow_get_nfct_proto_dst(skb, flow);
0269     case FLOW_KEY_RTCLASSID:
0270         return flow_get_rtclassid(skb);
0271     case FLOW_KEY_SKUID:
0272         return flow_get_skuid(skb);
0273     case FLOW_KEY_SKGID:
0274         return flow_get_skgid(skb);
0275     case FLOW_KEY_VLAN_TAG:
0276         return flow_get_vlan_tag(skb);
0277     case FLOW_KEY_RXHASH:
0278         return flow_get_rxhash(skb);
0279     default:
0280         WARN_ON(1);
0281         return 0;
0282     }
0283 }
0284 
0285 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) |         \
0286               (1 << FLOW_KEY_DST) |         \
0287               (1 << FLOW_KEY_PROTO) |       \
0288               (1 << FLOW_KEY_PROTO_SRC) |       \
0289               (1 << FLOW_KEY_PROTO_DST) |       \
0290               (1 << FLOW_KEY_NFCT_SRC) |        \
0291               (1 << FLOW_KEY_NFCT_DST) |        \
0292               (1 << FLOW_KEY_NFCT_PROTO_SRC) |  \
0293               (1 << FLOW_KEY_NFCT_PROTO_DST))
0294 
0295 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
0296              struct tcf_result *res)
0297 {
0298     struct flow_head *head = rcu_dereference_bh(tp->root);
0299     struct flow_filter *f;
0300     u32 keymask;
0301     u32 classid;
0302     unsigned int n, key;
0303     int r;
0304 
0305     list_for_each_entry_rcu(f, &head->filters, list) {
0306         u32 keys[FLOW_KEY_MAX + 1];
0307         struct flow_keys flow_keys;
0308 
0309         if (!tcf_em_tree_match(skb, &f->ematches, NULL))
0310             continue;
0311 
0312         keymask = f->keymask;
0313         if (keymask & FLOW_KEYS_NEEDED)
0314             skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
0315 
0316         for (n = 0; n < f->nkeys; n++) {
0317             key = ffs(keymask) - 1;
0318             keymask &= ~(1 << key);
0319             keys[n] = flow_key_get(skb, key, &flow_keys);
0320         }
0321 
0322         if (f->mode == FLOW_MODE_HASH)
0323             classid = jhash2(keys, f->nkeys, f->hashrnd);
0324         else {
0325             classid = keys[0];
0326             classid = (classid & f->mask) ^ f->xor;
0327             classid = (classid >> f->rshift) + f->addend;
0328         }
0329 
0330         if (f->divisor)
0331             classid %= f->divisor;
0332 
0333         res->class   = 0;
0334         res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
0335 
0336         r = tcf_exts_exec(skb, &f->exts, res);
0337         if (r < 0)
0338             continue;
0339         return r;
0340     }
0341     return -1;
0342 }
0343 
0344 static void flow_perturbation(struct timer_list *t)
0345 {
0346     struct flow_filter *f = from_timer(f, t, perturb_timer);
0347 
0348     get_random_bytes(&f->hashrnd, 4);
0349     if (f->perturb_period)
0350         mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
0351 }
0352 
0353 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
0354     [TCA_FLOW_KEYS]     = { .type = NLA_U32 },
0355     [TCA_FLOW_MODE]     = { .type = NLA_U32 },
0356     [TCA_FLOW_BASECLASS]    = { .type = NLA_U32 },
0357     [TCA_FLOW_RSHIFT]   = { .type = NLA_U32 },
0358     [TCA_FLOW_ADDEND]   = { .type = NLA_U32 },
0359     [TCA_FLOW_MASK]     = { .type = NLA_U32 },
0360     [TCA_FLOW_XOR]      = { .type = NLA_U32 },
0361     [TCA_FLOW_DIVISOR]  = { .type = NLA_U32 },
0362     [TCA_FLOW_ACT]      = { .type = NLA_NESTED },
0363     [TCA_FLOW_POLICE]   = { .type = NLA_NESTED },
0364     [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
0365     [TCA_FLOW_PERTURB]  = { .type = NLA_U32 },
0366 };
0367 
0368 static void __flow_destroy_filter(struct flow_filter *f)
0369 {
0370     del_timer_sync(&f->perturb_timer);
0371     tcf_exts_destroy(&f->exts);
0372     tcf_em_tree_destroy(&f->ematches);
0373     tcf_exts_put_net(&f->exts);
0374     kfree(f);
0375 }
0376 
0377 static void flow_destroy_filter_work(struct work_struct *work)
0378 {
0379     struct flow_filter *f = container_of(to_rcu_work(work),
0380                          struct flow_filter,
0381                          rwork);
0382     rtnl_lock();
0383     __flow_destroy_filter(f);
0384     rtnl_unlock();
0385 }
0386 
0387 static int flow_change(struct net *net, struct sk_buff *in_skb,
0388                struct tcf_proto *tp, unsigned long base,
0389                u32 handle, struct nlattr **tca,
0390                void **arg, u32 flags,
0391                struct netlink_ext_ack *extack)
0392 {
0393     struct flow_head *head = rtnl_dereference(tp->root);
0394     struct flow_filter *fold, *fnew;
0395     struct nlattr *opt = tca[TCA_OPTIONS];
0396     struct nlattr *tb[TCA_FLOW_MAX + 1];
0397     unsigned int nkeys = 0;
0398     unsigned int perturb_period = 0;
0399     u32 baseclass = 0;
0400     u32 keymask = 0;
0401     u32 mode;
0402     int err;
0403 
0404     if (opt == NULL)
0405         return -EINVAL;
0406 
0407     err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
0408                       NULL);
0409     if (err < 0)
0410         return err;
0411 
0412     if (tb[TCA_FLOW_BASECLASS]) {
0413         baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
0414         if (TC_H_MIN(baseclass) == 0)
0415             return -EINVAL;
0416     }
0417 
0418     if (tb[TCA_FLOW_KEYS]) {
0419         keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
0420 
0421         nkeys = hweight32(keymask);
0422         if (nkeys == 0)
0423             return -EINVAL;
0424 
0425         if (fls(keymask) - 1 > FLOW_KEY_MAX)
0426             return -EOPNOTSUPP;
0427 
0428         if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
0429             sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
0430             return -EOPNOTSUPP;
0431     }
0432 
0433     fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
0434     if (!fnew)
0435         return -ENOBUFS;
0436 
0437     err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
0438     if (err < 0)
0439         goto err1;
0440 
0441     err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
0442     if (err < 0)
0443         goto err2;
0444 
0445     err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, flags,
0446                 extack);
0447     if (err < 0)
0448         goto err2;
0449 
0450     fold = *arg;
0451     if (fold) {
0452         err = -EINVAL;
0453         if (fold->handle != handle && handle)
0454             goto err2;
0455 
0456         /* Copy fold into fnew */
0457         fnew->tp = fold->tp;
0458         fnew->handle = fold->handle;
0459         fnew->nkeys = fold->nkeys;
0460         fnew->keymask = fold->keymask;
0461         fnew->mode = fold->mode;
0462         fnew->mask = fold->mask;
0463         fnew->xor = fold->xor;
0464         fnew->rshift = fold->rshift;
0465         fnew->addend = fold->addend;
0466         fnew->divisor = fold->divisor;
0467         fnew->baseclass = fold->baseclass;
0468         fnew->hashrnd = fold->hashrnd;
0469 
0470         mode = fold->mode;
0471         if (tb[TCA_FLOW_MODE])
0472             mode = nla_get_u32(tb[TCA_FLOW_MODE]);
0473         if (mode != FLOW_MODE_HASH && nkeys > 1)
0474             goto err2;
0475 
0476         if (mode == FLOW_MODE_HASH)
0477             perturb_period = fold->perturb_period;
0478         if (tb[TCA_FLOW_PERTURB]) {
0479             if (mode != FLOW_MODE_HASH)
0480                 goto err2;
0481             perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
0482         }
0483     } else {
0484         err = -EINVAL;
0485         if (!handle)
0486             goto err2;
0487         if (!tb[TCA_FLOW_KEYS])
0488             goto err2;
0489 
0490         mode = FLOW_MODE_MAP;
0491         if (tb[TCA_FLOW_MODE])
0492             mode = nla_get_u32(tb[TCA_FLOW_MODE]);
0493         if (mode != FLOW_MODE_HASH && nkeys > 1)
0494             goto err2;
0495 
0496         if (tb[TCA_FLOW_PERTURB]) {
0497             if (mode != FLOW_MODE_HASH)
0498                 goto err2;
0499             perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
0500         }
0501 
0502         if (TC_H_MAJ(baseclass) == 0) {
0503             struct Qdisc *q = tcf_block_q(tp->chain->block);
0504 
0505             baseclass = TC_H_MAKE(q->handle, baseclass);
0506         }
0507         if (TC_H_MIN(baseclass) == 0)
0508             baseclass = TC_H_MAKE(baseclass, 1);
0509 
0510         fnew->handle = handle;
0511         fnew->mask  = ~0U;
0512         fnew->tp = tp;
0513         get_random_bytes(&fnew->hashrnd, 4);
0514     }
0515 
0516     timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
0517 
0518     tcf_block_netif_keep_dst(tp->chain->block);
0519 
0520     if (tb[TCA_FLOW_KEYS]) {
0521         fnew->keymask = keymask;
0522         fnew->nkeys   = nkeys;
0523     }
0524 
0525     fnew->mode = mode;
0526 
0527     if (tb[TCA_FLOW_MASK])
0528         fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
0529     if (tb[TCA_FLOW_XOR])
0530         fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
0531     if (tb[TCA_FLOW_RSHIFT])
0532         fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
0533     if (tb[TCA_FLOW_ADDEND])
0534         fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
0535 
0536     if (tb[TCA_FLOW_DIVISOR])
0537         fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
0538     if (baseclass)
0539         fnew->baseclass = baseclass;
0540 
0541     fnew->perturb_period = perturb_period;
0542     if (perturb_period)
0543         mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
0544 
0545     if (!*arg)
0546         list_add_tail_rcu(&fnew->list, &head->filters);
0547     else
0548         list_replace_rcu(&fold->list, &fnew->list);
0549 
0550     *arg = fnew;
0551 
0552     if (fold) {
0553         tcf_exts_get_net(&fold->exts);
0554         tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
0555     }
0556     return 0;
0557 
0558 err2:
0559     tcf_exts_destroy(&fnew->exts);
0560     tcf_em_tree_destroy(&fnew->ematches);
0561 err1:
0562     kfree(fnew);
0563     return err;
0564 }
0565 
0566 static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
0567                bool rtnl_held, struct netlink_ext_ack *extack)
0568 {
0569     struct flow_head *head = rtnl_dereference(tp->root);
0570     struct flow_filter *f = arg;
0571 
0572     list_del_rcu(&f->list);
0573     tcf_exts_get_net(&f->exts);
0574     tcf_queue_work(&f->rwork, flow_destroy_filter_work);
0575     *last = list_empty(&head->filters);
0576     return 0;
0577 }
0578 
0579 static int flow_init(struct tcf_proto *tp)
0580 {
0581     struct flow_head *head;
0582 
0583     head = kzalloc(sizeof(*head), GFP_KERNEL);
0584     if (head == NULL)
0585         return -ENOBUFS;
0586     INIT_LIST_HEAD(&head->filters);
0587     rcu_assign_pointer(tp->root, head);
0588     return 0;
0589 }
0590 
0591 static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
0592              struct netlink_ext_ack *extack)
0593 {
0594     struct flow_head *head = rtnl_dereference(tp->root);
0595     struct flow_filter *f, *next;
0596 
0597     list_for_each_entry_safe(f, next, &head->filters, list) {
0598         list_del_rcu(&f->list);
0599         if (tcf_exts_get_net(&f->exts))
0600             tcf_queue_work(&f->rwork, flow_destroy_filter_work);
0601         else
0602             __flow_destroy_filter(f);
0603     }
0604     kfree_rcu(head, rcu);
0605 }
0606 
0607 static void *flow_get(struct tcf_proto *tp, u32 handle)
0608 {
0609     struct flow_head *head = rtnl_dereference(tp->root);
0610     struct flow_filter *f;
0611 
0612     list_for_each_entry(f, &head->filters, list)
0613         if (f->handle == handle)
0614             return f;
0615     return NULL;
0616 }
0617 
0618 static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
0619              struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
0620 {
0621     struct flow_filter *f = fh;
0622     struct nlattr *nest;
0623 
0624     if (f == NULL)
0625         return skb->len;
0626 
0627     t->tcm_handle = f->handle;
0628 
0629     nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
0630     if (nest == NULL)
0631         goto nla_put_failure;
0632 
0633     if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
0634         nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
0635         goto nla_put_failure;
0636 
0637     if (f->mask != ~0 || f->xor != 0) {
0638         if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
0639             nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
0640             goto nla_put_failure;
0641     }
0642     if (f->rshift &&
0643         nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
0644         goto nla_put_failure;
0645     if (f->addend &&
0646         nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
0647         goto nla_put_failure;
0648 
0649     if (f->divisor &&
0650         nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
0651         goto nla_put_failure;
0652     if (f->baseclass &&
0653         nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
0654         goto nla_put_failure;
0655 
0656     if (f->perturb_period &&
0657         nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
0658         goto nla_put_failure;
0659 
0660     if (tcf_exts_dump(skb, &f->exts) < 0)
0661         goto nla_put_failure;
0662 #ifdef CONFIG_NET_EMATCH
0663     if (f->ematches.hdr.nmatches &&
0664         tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
0665         goto nla_put_failure;
0666 #endif
0667     nla_nest_end(skb, nest);
0668 
0669     if (tcf_exts_dump_stats(skb, &f->exts) < 0)
0670         goto nla_put_failure;
0671 
0672     return skb->len;
0673 
0674 nla_put_failure:
0675     nla_nest_cancel(skb, nest);
0676     return -1;
0677 }
0678 
0679 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
0680               bool rtnl_held)
0681 {
0682     struct flow_head *head = rtnl_dereference(tp->root);
0683     struct flow_filter *f;
0684 
0685     list_for_each_entry(f, &head->filters, list) {
0686         if (arg->count < arg->skip)
0687             goto skip;
0688         if (arg->fn(tp, f, arg) < 0) {
0689             arg->stop = 1;
0690             break;
0691         }
0692 skip:
0693         arg->count++;
0694     }
0695 }
0696 
0697 static struct tcf_proto_ops cls_flow_ops __read_mostly = {
0698     .kind       = "flow",
0699     .classify   = flow_classify,
0700     .init       = flow_init,
0701     .destroy    = flow_destroy,
0702     .change     = flow_change,
0703     .delete     = flow_delete,
0704     .get        = flow_get,
0705     .dump       = flow_dump,
0706     .walk       = flow_walk,
0707     .owner      = THIS_MODULE,
0708 };
0709 
0710 static int __init cls_flow_init(void)
0711 {
0712     return register_tcf_proto_ops(&cls_flow_ops);
0713 }
0714 
0715 static void __exit cls_flow_exit(void)
0716 {
0717     unregister_tcf_proto_ops(&cls_flow_ops);
0718 }
0719 
0720 module_init(cls_flow_init);
0721 module_exit(cls_flow_exit);
0722 
0723 MODULE_LICENSE("GPL");
0724 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
0725 MODULE_DESCRIPTION("TC flow classifier");