Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __NET_PKT_CLS_H
0003 #define __NET_PKT_CLS_H
0004 
0005 #include <linux/pkt_cls.h>
0006 #include <linux/workqueue.h>
0007 #include <net/sch_generic.h>
0008 #include <net/act_api.h>
0009 #include <net/net_namespace.h>
0010 
0011 /* TC action not accessible from user space */
0012 #define TC_ACT_CONSUMED     (TC_ACT_VALUE_MAX + 1)
0013 
0014 /* Basic packet classifier frontend definitions. */
0015 
0016 struct tcf_walker {
0017     int stop;
0018     int skip;
0019     int count;
0020     bool    nonempty;
0021     unsigned long cookie;
0022     int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
0023 };
0024 
0025 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
0026 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
0027 
0028 struct tcf_block_ext_info {
0029     enum flow_block_binder_type binder_type;
0030     tcf_chain_head_change_t *chain_head_change;
0031     void *chain_head_change_priv;
0032     u32 block_index;
0033 };
0034 
0035 struct tcf_qevent {
0036     struct tcf_block    *block;
0037     struct tcf_block_ext_info info;
0038     struct tcf_proto __rcu *filter_chain;
0039 };
0040 
0041 struct tcf_block_cb;
0042 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
0043 
0044 #ifdef CONFIG_NET_CLS
0045 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
0046                        u32 chain_index);
0047 void tcf_chain_put_by_act(struct tcf_chain *chain);
0048 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
0049                      struct tcf_chain *chain);
0050 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
0051                      struct tcf_proto *tp);
0052 void tcf_block_netif_keep_dst(struct tcf_block *block);
0053 int tcf_block_get(struct tcf_block **p_block,
0054           struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
0055           struct netlink_ext_ack *extack);
0056 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
0057               struct tcf_block_ext_info *ei,
0058               struct netlink_ext_ack *extack);
0059 void tcf_block_put(struct tcf_block *block);
0060 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
0061                struct tcf_block_ext_info *ei);
0062 
0063 static inline bool tcf_block_shared(struct tcf_block *block)
0064 {
0065     return block->index;
0066 }
0067 
0068 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
0069 {
0070     return block && block->index;
0071 }
0072 
0073 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
0074 {
0075     WARN_ON(tcf_block_shared(block));
0076     return block->q;
0077 }
0078 
0079 int tcf_classify(struct sk_buff *skb,
0080          const struct tcf_block *block,
0081          const struct tcf_proto *tp, struct tcf_result *res,
0082          bool compat_mode);
0083 
0084 #else
0085 static inline bool tcf_block_shared(struct tcf_block *block)
0086 {
0087     return false;
0088 }
0089 
0090 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
0091 {
0092     return false;
0093 }
0094 
0095 static inline
0096 int tcf_block_get(struct tcf_block **p_block,
0097           struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
0098           struct netlink_ext_ack *extack)
0099 {
0100     return 0;
0101 }
0102 
0103 static inline
0104 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
0105               struct tcf_block_ext_info *ei,
0106               struct netlink_ext_ack *extack)
0107 {
0108     return 0;
0109 }
0110 
0111 static inline void tcf_block_put(struct tcf_block *block)
0112 {
0113 }
0114 
0115 static inline
0116 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
0117                struct tcf_block_ext_info *ei)
0118 {
0119 }
0120 
0121 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
0122 {
0123     return NULL;
0124 }
0125 
0126 static inline
0127 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
0128                    void *cb_priv)
0129 {
0130     return 0;
0131 }
0132 
0133 static inline
0134 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
0135                   void *cb_priv)
0136 {
0137 }
0138 
0139 static inline int tcf_classify(struct sk_buff *skb,
0140                    const struct tcf_block *block,
0141                    const struct tcf_proto *tp,
0142                    struct tcf_result *res, bool compat_mode)
0143 {
0144     return TC_ACT_UNSPEC;
0145 }
0146 
0147 #endif
0148 
0149 static inline unsigned long
0150 __cls_set_class(unsigned long *clp, unsigned long cl)
0151 {
0152     return xchg(clp, cl);
0153 }
0154 
0155 static inline void
0156 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
0157 {
0158     unsigned long cl;
0159 
0160     cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
0161     cl = __cls_set_class(&r->class, cl);
0162     if (cl)
0163         q->ops->cl_ops->unbind_tcf(q, cl);
0164 }
0165 
0166 static inline void
0167 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
0168 {
0169     struct Qdisc *q = tp->chain->block->q;
0170 
0171     /* Check q as it is not set for shared blocks. In that case,
0172      * setting class is not supported.
0173      */
0174     if (!q)
0175         return;
0176     sch_tree_lock(q);
0177     __tcf_bind_filter(q, r, base);
0178     sch_tree_unlock(q);
0179 }
0180 
0181 static inline void
0182 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
0183 {
0184     unsigned long cl;
0185 
0186     if ((cl = __cls_set_class(&r->class, 0)) != 0)
0187         q->ops->cl_ops->unbind_tcf(q, cl);
0188 }
0189 
0190 static inline void
0191 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
0192 {
0193     struct Qdisc *q = tp->chain->block->q;
0194 
0195     if (!q)
0196         return;
0197     __tcf_unbind_filter(q, r);
0198 }
0199 
0200 struct tcf_exts {
0201 #ifdef CONFIG_NET_CLS_ACT
0202     __u32   type; /* for backward compat(TCA_OLD_COMPAT) */
0203     int nr_actions;
0204     struct tc_action **actions;
0205     struct net  *net;
0206     netns_tracker   ns_tracker;
0207 #endif
0208     /* Map to export classifier specific extension TLV types to the
0209      * generic extensions API. Unsupported extensions must be set to 0.
0210      */
0211     int action;
0212     int police;
0213 };
0214 
0215 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
0216                 int action, int police)
0217 {
0218 #ifdef CONFIG_NET_CLS_ACT
0219     exts->type = 0;
0220     exts->nr_actions = 0;
0221     /* Note: we do not own yet a reference on net.
0222      * This reference might be taken later from tcf_exts_get_net().
0223      */
0224     exts->net = net;
0225     exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
0226                 GFP_KERNEL);
0227     if (!exts->actions)
0228         return -ENOMEM;
0229 #endif
0230     exts->action = action;
0231     exts->police = police;
0232     return 0;
0233 }
0234 
0235 /* Return false if the netns is being destroyed in cleanup_net(). Callers
0236  * need to do cleanup synchronously in this case, otherwise may race with
0237  * tc_action_net_exit(). Return true for other cases.
0238  */
0239 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
0240 {
0241 #ifdef CONFIG_NET_CLS_ACT
0242     exts->net = maybe_get_net(exts->net);
0243     if (exts->net)
0244         netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL);
0245     return exts->net != NULL;
0246 #else
0247     return true;
0248 #endif
0249 }
0250 
0251 static inline void tcf_exts_put_net(struct tcf_exts *exts)
0252 {
0253 #ifdef CONFIG_NET_CLS_ACT
0254     if (exts->net)
0255         put_net_track(exts->net, &exts->ns_tracker);
0256 #endif
0257 }
0258 
0259 #ifdef CONFIG_NET_CLS_ACT
0260 #define tcf_exts_for_each_action(i, a, exts) \
0261     for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
0262 #else
0263 #define tcf_exts_for_each_action(i, a, exts) \
0264     for (; 0; (void)(i), (void)(a), (void)(exts))
0265 #endif
0266 
0267 #define tcf_act_for_each_action(i, a, actions) \
0268     for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
0269 
0270 static inline void
0271 tcf_exts_hw_stats_update(const struct tcf_exts *exts,
0272              u64 bytes, u64 packets, u64 drops, u64 lastuse,
0273              u8 used_hw_stats, bool used_hw_stats_valid)
0274 {
0275 #ifdef CONFIG_NET_CLS_ACT
0276     int i;
0277 
0278     for (i = 0; i < exts->nr_actions; i++) {
0279         struct tc_action *a = exts->actions[i];
0280 
0281         /* if stats from hw, just skip */
0282         if (tcf_action_update_hw_stats(a)) {
0283             preempt_disable();
0284             tcf_action_stats_update(a, bytes, packets, drops,
0285                         lastuse, true);
0286             preempt_enable();
0287 
0288             a->used_hw_stats = used_hw_stats;
0289             a->used_hw_stats_valid = used_hw_stats_valid;
0290         }
0291     }
0292 #endif
0293 }
0294 
0295 /**
0296  * tcf_exts_has_actions - check if at least one action is present
0297  * @exts: tc filter extensions handle
0298  *
0299  * Returns true if at least one action is present.
0300  */
0301 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
0302 {
0303 #ifdef CONFIG_NET_CLS_ACT
0304     return exts->nr_actions;
0305 #else
0306     return false;
0307 #endif
0308 }
0309 
0310 /**
0311  * tcf_exts_exec - execute tc filter extensions
0312  * @skb: socket buffer
0313  * @exts: tc filter extensions handle
0314  * @res: desired result
0315  *
0316  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
0317  * a negative number if the filter must be considered unmatched or
0318  * a positive action code (TC_ACT_*) which must be returned to the
0319  * underlying layer.
0320  */
0321 static inline int
0322 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
0323           struct tcf_result *res)
0324 {
0325 #ifdef CONFIG_NET_CLS_ACT
0326     return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
0327 #endif
0328     return TC_ACT_OK;
0329 }
0330 
0331 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
0332               struct nlattr **tb, struct nlattr *rate_tlv,
0333               struct tcf_exts *exts, u32 flags,
0334               struct netlink_ext_ack *extack);
0335 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
0336              struct nlattr *rate_tlv, struct tcf_exts *exts,
0337              u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
0338 void tcf_exts_destroy(struct tcf_exts *exts);
0339 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
0340 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
0341 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
0342 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
0343 
0344 /**
0345  * struct tcf_pkt_info - packet information
0346  *
0347  * @ptr: start of the pkt data
0348  * @nexthdr: offset of the next header
0349  */
0350 struct tcf_pkt_info {
0351     unsigned char *     ptr;
0352     int         nexthdr;
0353 };
0354 
0355 #ifdef CONFIG_NET_EMATCH
0356 
0357 struct tcf_ematch_ops;
0358 
0359 /**
0360  * struct tcf_ematch - extended match (ematch)
0361  * 
0362  * @matchid: identifier to allow userspace to reidentify a match
0363  * @flags: flags specifying attributes and the relation to other matches
0364  * @ops: the operations lookup table of the corresponding ematch module
0365  * @datalen: length of the ematch specific configuration data
0366  * @data: ematch specific data
0367  * @net: the network namespace
0368  */
0369 struct tcf_ematch {
0370     struct tcf_ematch_ops * ops;
0371     unsigned long       data;
0372     unsigned int        datalen;
0373     u16         matchid;
0374     u16         flags;
0375     struct net      *net;
0376 };
0377 
0378 static inline int tcf_em_is_container(struct tcf_ematch *em)
0379 {
0380     return !em->ops;
0381 }
0382 
0383 static inline int tcf_em_is_simple(struct tcf_ematch *em)
0384 {
0385     return em->flags & TCF_EM_SIMPLE;
0386 }
0387 
0388 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
0389 {
0390     return em->flags & TCF_EM_INVERT;
0391 }
0392 
0393 static inline int tcf_em_last_match(struct tcf_ematch *em)
0394 {
0395     return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
0396 }
0397 
0398 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
0399 {
0400     if (tcf_em_last_match(em))
0401         return 1;
0402 
0403     if (result == 0 && em->flags & TCF_EM_REL_AND)
0404         return 1;
0405 
0406     if (result != 0 && em->flags & TCF_EM_REL_OR)
0407         return 1;
0408 
0409     return 0;
0410 }
0411     
0412 /**
0413  * struct tcf_ematch_tree - ematch tree handle
0414  *
0415  * @hdr: ematch tree header supplied by userspace
0416  * @matches: array of ematches
0417  */
0418 struct tcf_ematch_tree {
0419     struct tcf_ematch_tree_hdr hdr;
0420     struct tcf_ematch * matches;
0421     
0422 };
0423 
0424 /**
0425  * struct tcf_ematch_ops - ematch module operations
0426  * 
0427  * @kind: identifier (kind) of this ematch module
0428  * @datalen: length of expected configuration data (optional)
0429  * @change: called during validation (optional)
0430  * @match: called during ematch tree evaluation, must return 1/0
0431  * @destroy: called during destroyage (optional)
0432  * @dump: called during dumping process (optional)
0433  * @owner: owner, must be set to THIS_MODULE
0434  * @link: link to previous/next ematch module (internal use)
0435  */
0436 struct tcf_ematch_ops {
0437     int         kind;
0438     int         datalen;
0439     int         (*change)(struct net *net, void *,
0440                       int, struct tcf_ematch *);
0441     int         (*match)(struct sk_buff *, struct tcf_ematch *,
0442                      struct tcf_pkt_info *);
0443     void            (*destroy)(struct tcf_ematch *);
0444     int         (*dump)(struct sk_buff *, struct tcf_ematch *);
0445     struct module       *owner;
0446     struct list_head    link;
0447 };
0448 
0449 int tcf_em_register(struct tcf_ematch_ops *);
0450 void tcf_em_unregister(struct tcf_ematch_ops *);
0451 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
0452              struct tcf_ematch_tree *);
0453 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
0454 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
0455 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
0456             struct tcf_pkt_info *);
0457 
0458 /**
0459  * tcf_em_tree_match - evaulate an ematch tree
0460  *
0461  * @skb: socket buffer of the packet in question
0462  * @tree: ematch tree to be used for evaluation
0463  * @info: packet information examined by classifier
0464  *
0465  * This function matches @skb against the ematch tree in @tree by going
0466  * through all ematches respecting their logic relations returning
0467  * as soon as the result is obvious.
0468  *
0469  * Returns 1 if the ematch tree as-one matches, no ematches are configured
0470  * or ematch is not enabled in the kernel, otherwise 0 is returned.
0471  */
0472 static inline int tcf_em_tree_match(struct sk_buff *skb,
0473                     struct tcf_ematch_tree *tree,
0474                     struct tcf_pkt_info *info)
0475 {
0476     if (tree->hdr.nmatches)
0477         return __tcf_em_tree_match(skb, tree, info);
0478     else
0479         return 1;
0480 }
0481 
0482 #define MODULE_ALIAS_TCF_EMATCH(kind)   MODULE_ALIAS("ematch-kind-" __stringify(kind))
0483 
0484 #else /* CONFIG_NET_EMATCH */
0485 
0486 struct tcf_ematch_tree {
0487 };
0488 
0489 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
0490 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
0491 #define tcf_em_tree_dump(skb, t, tlv) (0)
0492 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
0493 
0494 #endif /* CONFIG_NET_EMATCH */
0495 
0496 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
0497 {
0498     switch (layer) {
0499         case TCF_LAYER_LINK:
0500             return skb_mac_header(skb);
0501         case TCF_LAYER_NETWORK:
0502             return skb_network_header(skb);
0503         case TCF_LAYER_TRANSPORT:
0504             return skb_transport_header(skb);
0505     }
0506 
0507     return NULL;
0508 }
0509 
0510 static inline int tcf_valid_offset(const struct sk_buff *skb,
0511                    const unsigned char *ptr, const int len)
0512 {
0513     return likely((ptr + len) <= skb_tail_pointer(skb) &&
0514               ptr >= skb->head &&
0515               (ptr <= (ptr + len)));
0516 }
0517 
0518 static inline int
0519 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
0520          struct netlink_ext_ack *extack)
0521 {
0522     char indev[IFNAMSIZ];
0523     struct net_device *dev;
0524 
0525     if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
0526         NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
0527                     "Interface name too long");
0528         return -EINVAL;
0529     }
0530     dev = __dev_get_by_name(net, indev);
0531     if (!dev) {
0532         NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
0533                     "Network device not found");
0534         return -ENODEV;
0535     }
0536     return dev->ifindex;
0537 }
0538 
0539 static inline bool
0540 tcf_match_indev(struct sk_buff *skb, int ifindex)
0541 {
0542     if (!ifindex)
0543         return true;
0544     if  (!skb->skb_iif)
0545         return false;
0546     return ifindex == skb->skb_iif;
0547 }
0548 
0549 int tc_setup_offload_action(struct flow_action *flow_action,
0550                 const struct tcf_exts *exts,
0551                 struct netlink_ext_ack *extack);
0552 void tc_cleanup_offload_action(struct flow_action *flow_action);
0553 int tc_setup_action(struct flow_action *flow_action,
0554             struct tc_action *actions[],
0555             struct netlink_ext_ack *extack);
0556 
0557 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
0558              void *type_data, bool err_stop, bool rtnl_held);
0559 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
0560             enum tc_setup_type type, void *type_data, bool err_stop,
0561             u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
0562 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
0563             enum tc_setup_type type, void *type_data, bool err_stop,
0564             u32 *old_flags, unsigned int *old_in_hw_count,
0565             u32 *new_flags, unsigned int *new_in_hw_count,
0566             bool rtnl_held);
0567 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
0568             enum tc_setup_type type, void *type_data, bool err_stop,
0569             u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
0570 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
0571               bool add, flow_setup_cb_t *cb,
0572               enum tc_setup_type type, void *type_data,
0573               void *cb_priv, u32 *flags, unsigned int *in_hw_count);
0574 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
0575 
0576 #ifdef CONFIG_NET_CLS_ACT
0577 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
0578             enum flow_block_binder_type binder_type,
0579             struct nlattr *block_index_attr,
0580             struct netlink_ext_ack *extack);
0581 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
0582 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
0583                    struct netlink_ext_ack *extack);
0584 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
0585                   struct sk_buff **to_free, int *ret);
0586 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
0587 #else
0588 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
0589                   enum flow_block_binder_type binder_type,
0590                   struct nlattr *block_index_attr,
0591                   struct netlink_ext_ack *extack)
0592 {
0593     return 0;
0594 }
0595 
0596 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
0597 {
0598 }
0599 
0600 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
0601                          struct netlink_ext_ack *extack)
0602 {
0603     return 0;
0604 }
0605 
0606 static inline struct sk_buff *
0607 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
0608           struct sk_buff **to_free, int *ret)
0609 {
0610     return skb;
0611 }
0612 
0613 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
0614 {
0615     return 0;
0616 }
0617 #endif
0618 
0619 struct tc_cls_u32_knode {
0620     struct tcf_exts *exts;
0621     struct tcf_result *res;
0622     struct tc_u32_sel *sel;
0623     u32 handle;
0624     u32 val;
0625     u32 mask;
0626     u32 link_handle;
0627     u8 fshift;
0628 };
0629 
0630 struct tc_cls_u32_hnode {
0631     u32 handle;
0632     u32 prio;
0633     unsigned int divisor;
0634 };
0635 
0636 enum tc_clsu32_command {
0637     TC_CLSU32_NEW_KNODE,
0638     TC_CLSU32_REPLACE_KNODE,
0639     TC_CLSU32_DELETE_KNODE,
0640     TC_CLSU32_NEW_HNODE,
0641     TC_CLSU32_REPLACE_HNODE,
0642     TC_CLSU32_DELETE_HNODE,
0643 };
0644 
0645 struct tc_cls_u32_offload {
0646     struct flow_cls_common_offload common;
0647     /* knode values */
0648     enum tc_clsu32_command command;
0649     union {
0650         struct tc_cls_u32_knode knode;
0651         struct tc_cls_u32_hnode hnode;
0652     };
0653 };
0654 
0655 static inline bool tc_can_offload(const struct net_device *dev)
0656 {
0657     return dev->features & NETIF_F_HW_TC;
0658 }
0659 
0660 static inline bool tc_can_offload_extack(const struct net_device *dev,
0661                      struct netlink_ext_ack *extack)
0662 {
0663     bool can = tc_can_offload(dev);
0664 
0665     if (!can)
0666         NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
0667 
0668     return can;
0669 }
0670 
0671 static inline bool
0672 tc_cls_can_offload_and_chain0(const struct net_device *dev,
0673                   struct flow_cls_common_offload *common)
0674 {
0675     if (!tc_can_offload_extack(dev, common->extack))
0676         return false;
0677     if (common->chain_index) {
0678         NL_SET_ERR_MSG(common->extack,
0679                    "Driver supports only offload of chain 0");
0680         return false;
0681     }
0682     return true;
0683 }
0684 
0685 static inline bool tc_skip_hw(u32 flags)
0686 {
0687     return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
0688 }
0689 
0690 static inline bool tc_skip_sw(u32 flags)
0691 {
0692     return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
0693 }
0694 
0695 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
0696 static inline bool tc_flags_valid(u32 flags)
0697 {
0698     if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
0699               TCA_CLS_FLAGS_VERBOSE))
0700         return false;
0701 
0702     flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
0703     if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
0704         return false;
0705 
0706     return true;
0707 }
0708 
0709 static inline bool tc_in_hw(u32 flags)
0710 {
0711     return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
0712 }
0713 
0714 static inline void
0715 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
0716                const struct tcf_proto *tp, u32 flags,
0717                struct netlink_ext_ack *extack)
0718 {
0719     cls_common->chain_index = tp->chain->index;
0720     cls_common->protocol = tp->protocol;
0721     cls_common->prio = tp->prio >> 16;
0722     if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
0723         cls_common->extack = extack;
0724 }
0725 
0726 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
0727 static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
0728 {
0729     struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
0730 
0731     if (tc_skb_ext)
0732         memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
0733     return tc_skb_ext;
0734 }
0735 #endif
0736 
0737 enum tc_matchall_command {
0738     TC_CLSMATCHALL_REPLACE,
0739     TC_CLSMATCHALL_DESTROY,
0740     TC_CLSMATCHALL_STATS,
0741 };
0742 
0743 struct tc_cls_matchall_offload {
0744     struct flow_cls_common_offload common;
0745     enum tc_matchall_command command;
0746     struct flow_rule *rule;
0747     struct flow_stats stats;
0748     unsigned long cookie;
0749 };
0750 
0751 enum tc_clsbpf_command {
0752     TC_CLSBPF_OFFLOAD,
0753     TC_CLSBPF_STATS,
0754 };
0755 
0756 struct tc_cls_bpf_offload {
0757     struct flow_cls_common_offload common;
0758     enum tc_clsbpf_command command;
0759     struct tcf_exts *exts;
0760     struct bpf_prog *prog;
0761     struct bpf_prog *oldprog;
0762     const char *name;
0763     bool exts_integrated;
0764 };
0765 
0766 struct tc_mqprio_qopt_offload {
0767     /* struct tc_mqprio_qopt must always be the first element */
0768     struct tc_mqprio_qopt qopt;
0769     u16 mode;
0770     u16 shaper;
0771     u32 flags;
0772     u64 min_rate[TC_QOPT_MAX_QUEUE];
0773     u64 max_rate[TC_QOPT_MAX_QUEUE];
0774 };
0775 
0776 /* This structure holds cookie structure that is passed from user
0777  * to the kernel for actions and classifiers
0778  */
0779 struct tc_cookie {
0780     u8  *data;
0781     u32 len;
0782     struct rcu_head rcu;
0783 };
0784 
0785 struct tc_qopt_offload_stats {
0786     struct gnet_stats_basic_sync *bstats;
0787     struct gnet_stats_queue *qstats;
0788 };
0789 
0790 enum tc_mq_command {
0791     TC_MQ_CREATE,
0792     TC_MQ_DESTROY,
0793     TC_MQ_STATS,
0794     TC_MQ_GRAFT,
0795 };
0796 
0797 struct tc_mq_opt_offload_graft_params {
0798     unsigned long queue;
0799     u32 child_handle;
0800 };
0801 
0802 struct tc_mq_qopt_offload {
0803     enum tc_mq_command command;
0804     u32 handle;
0805     union {
0806         struct tc_qopt_offload_stats stats;
0807         struct tc_mq_opt_offload_graft_params graft_params;
0808     };
0809 };
0810 
0811 enum tc_htb_command {
0812     /* Root */
0813     TC_HTB_CREATE, /* Initialize HTB offload. */
0814     TC_HTB_DESTROY, /* Destroy HTB offload. */
0815 
0816     /* Classes */
0817     /* Allocate qid and create leaf. */
0818     TC_HTB_LEAF_ALLOC_QUEUE,
0819     /* Convert leaf to inner, preserve and return qid, create new leaf. */
0820     TC_HTB_LEAF_TO_INNER,
0821     /* Delete leaf, while siblings remain. */
0822     TC_HTB_LEAF_DEL,
0823     /* Delete leaf, convert parent to leaf, preserving qid. */
0824     TC_HTB_LEAF_DEL_LAST,
0825     /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
0826     TC_HTB_LEAF_DEL_LAST_FORCE,
0827     /* Modify parameters of a node. */
0828     TC_HTB_NODE_MODIFY,
0829 
0830     /* Class qdisc */
0831     TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
0832 };
0833 
0834 struct tc_htb_qopt_offload {
0835     struct netlink_ext_ack *extack;
0836     enum tc_htb_command command;
0837     u32 parent_classid;
0838     u16 classid;
0839     u16 qid;
0840     u64 rate;
0841     u64 ceil;
0842 };
0843 
0844 #define TC_HTB_CLASSID_ROOT U32_MAX
0845 
0846 enum tc_red_command {
0847     TC_RED_REPLACE,
0848     TC_RED_DESTROY,
0849     TC_RED_STATS,
0850     TC_RED_XSTATS,
0851     TC_RED_GRAFT,
0852 };
0853 
0854 struct tc_red_qopt_offload_params {
0855     u32 min;
0856     u32 max;
0857     u32 probability;
0858     u32 limit;
0859     bool is_ecn;
0860     bool is_harddrop;
0861     bool is_nodrop;
0862     struct gnet_stats_queue *qstats;
0863 };
0864 
0865 struct tc_red_qopt_offload {
0866     enum tc_red_command command;
0867     u32 handle;
0868     u32 parent;
0869     union {
0870         struct tc_red_qopt_offload_params set;
0871         struct tc_qopt_offload_stats stats;
0872         struct red_stats *xstats;
0873         u32 child_handle;
0874     };
0875 };
0876 
0877 enum tc_gred_command {
0878     TC_GRED_REPLACE,
0879     TC_GRED_DESTROY,
0880     TC_GRED_STATS,
0881 };
0882 
0883 struct tc_gred_vq_qopt_offload_params {
0884     bool present;
0885     u32 limit;
0886     u32 prio;
0887     u32 min;
0888     u32 max;
0889     bool is_ecn;
0890     bool is_harddrop;
0891     u32 probability;
0892     /* Only need backlog, see struct tc_prio_qopt_offload_params */
0893     u32 *backlog;
0894 };
0895 
0896 struct tc_gred_qopt_offload_params {
0897     bool grio_on;
0898     bool wred_on;
0899     unsigned int dp_cnt;
0900     unsigned int dp_def;
0901     struct gnet_stats_queue *qstats;
0902     struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
0903 };
0904 
0905 struct tc_gred_qopt_offload_stats {
0906     struct gnet_stats_basic_sync bstats[MAX_DPs];
0907     struct gnet_stats_queue qstats[MAX_DPs];
0908     struct red_stats *xstats[MAX_DPs];
0909 };
0910 
0911 struct tc_gred_qopt_offload {
0912     enum tc_gred_command command;
0913     u32 handle;
0914     u32 parent;
0915     union {
0916         struct tc_gred_qopt_offload_params set;
0917         struct tc_gred_qopt_offload_stats stats;
0918     };
0919 };
0920 
0921 enum tc_prio_command {
0922     TC_PRIO_REPLACE,
0923     TC_PRIO_DESTROY,
0924     TC_PRIO_STATS,
0925     TC_PRIO_GRAFT,
0926 };
0927 
0928 struct tc_prio_qopt_offload_params {
0929     int bands;
0930     u8 priomap[TC_PRIO_MAX + 1];
0931     /* At the point of un-offloading the Qdisc, the reported backlog and
0932      * qlen need to be reduced by the portion that is in HW.
0933      */
0934     struct gnet_stats_queue *qstats;
0935 };
0936 
0937 struct tc_prio_qopt_offload_graft_params {
0938     u8 band;
0939     u32 child_handle;
0940 };
0941 
0942 struct tc_prio_qopt_offload {
0943     enum tc_prio_command command;
0944     u32 handle;
0945     u32 parent;
0946     union {
0947         struct tc_prio_qopt_offload_params replace_params;
0948         struct tc_qopt_offload_stats stats;
0949         struct tc_prio_qopt_offload_graft_params graft_params;
0950     };
0951 };
0952 
0953 enum tc_root_command {
0954     TC_ROOT_GRAFT,
0955 };
0956 
0957 struct tc_root_qopt_offload {
0958     enum tc_root_command command;
0959     u32 handle;
0960     bool ingress;
0961 };
0962 
0963 enum tc_ets_command {
0964     TC_ETS_REPLACE,
0965     TC_ETS_DESTROY,
0966     TC_ETS_STATS,
0967     TC_ETS_GRAFT,
0968 };
0969 
0970 struct tc_ets_qopt_offload_replace_params {
0971     unsigned int bands;
0972     u8 priomap[TC_PRIO_MAX + 1];
0973     unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
0974     unsigned int weights[TCQ_ETS_MAX_BANDS];
0975     struct gnet_stats_queue *qstats;
0976 };
0977 
0978 struct tc_ets_qopt_offload_graft_params {
0979     u8 band;
0980     u32 child_handle;
0981 };
0982 
0983 struct tc_ets_qopt_offload {
0984     enum tc_ets_command command;
0985     u32 handle;
0986     u32 parent;
0987     union {
0988         struct tc_ets_qopt_offload_replace_params replace_params;
0989         struct tc_qopt_offload_stats stats;
0990         struct tc_ets_qopt_offload_graft_params graft_params;
0991     };
0992 };
0993 
0994 enum tc_tbf_command {
0995     TC_TBF_REPLACE,
0996     TC_TBF_DESTROY,
0997     TC_TBF_STATS,
0998     TC_TBF_GRAFT,
0999 };
1000 
1001 struct tc_tbf_qopt_offload_replace_params {
1002     struct psched_ratecfg rate;
1003     u32 max_size;
1004     struct gnet_stats_queue *qstats;
1005 };
1006 
1007 struct tc_tbf_qopt_offload {
1008     enum tc_tbf_command command;
1009     u32 handle;
1010     u32 parent;
1011     union {
1012         struct tc_tbf_qopt_offload_replace_params replace_params;
1013         struct tc_qopt_offload_stats stats;
1014         u32 child_handle;
1015     };
1016 };
1017 
1018 enum tc_fifo_command {
1019     TC_FIFO_REPLACE,
1020     TC_FIFO_DESTROY,
1021     TC_FIFO_STATS,
1022 };
1023 
1024 struct tc_fifo_qopt_offload {
1025     enum tc_fifo_command command;
1026     u32 handle;
1027     u32 parent;
1028     union {
1029         struct tc_qopt_offload_stats stats;
1030     };
1031 };
1032 
1033 #ifdef CONFIG_NET_CLS_ACT
1034 DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc);
1035 void tc_skb_ext_tc_enable(void);
1036 void tc_skb_ext_tc_disable(void);
1037 #define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc)
1038 #else /* CONFIG_NET_CLS_ACT */
1039 static inline void tc_skb_ext_tc_enable(void) { }
1040 static inline void tc_skb_ext_tc_disable(void) { }
1041 #define tc_skb_ext_tc_enabled() false
1042 #endif
1043 
1044 #endif