Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __NET_ACT_API_H
0003 #define __NET_ACT_API_H
0004 
0005 /*
0006  * Public action API for classifiers/qdiscs
0007 */
0008 
0009 #include <linux/refcount.h>
0010 #include <net/flow_offload.h>
0011 #include <net/sch_generic.h>
0012 #include <net/pkt_sched.h>
0013 #include <net/net_namespace.h>
0014 #include <net/netns/generic.h>
0015 
0016 struct tcf_idrinfo {
0017     struct mutex    lock;
0018     struct idr  action_idr;
0019     struct net  *net;
0020 };
0021 
0022 struct tc_action_ops;
0023 
0024 struct tc_action {
0025     const struct tc_action_ops  *ops;
0026     __u32               type; /* for backward compat(TCA_OLD_COMPAT) */
0027     struct tcf_idrinfo      *idrinfo;
0028 
0029     u32             tcfa_index;
0030     refcount_t          tcfa_refcnt;
0031     atomic_t            tcfa_bindcnt;
0032     int             tcfa_action;
0033     struct tcf_t            tcfa_tm;
0034     struct gnet_stats_basic_sync    tcfa_bstats;
0035     struct gnet_stats_basic_sync    tcfa_bstats_hw;
0036     struct gnet_stats_queue     tcfa_qstats;
0037     struct net_rate_estimator __rcu *tcfa_rate_est;
0038     spinlock_t          tcfa_lock;
0039     struct gnet_stats_basic_sync __percpu *cpu_bstats;
0040     struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
0041     struct gnet_stats_queue __percpu *cpu_qstats;
0042     struct tc_cookie    __rcu *act_cookie;
0043     struct tcf_chain    __rcu *goto_chain;
0044     u32         tcfa_flags;
0045     u8          hw_stats;
0046     u8          used_hw_stats;
0047     bool            used_hw_stats_valid;
0048     u32         in_hw_count;
0049 };
0050 #define tcf_index   common.tcfa_index
0051 #define tcf_refcnt  common.tcfa_refcnt
0052 #define tcf_bindcnt common.tcfa_bindcnt
0053 #define tcf_action  common.tcfa_action
0054 #define tcf_tm      common.tcfa_tm
0055 #define tcf_bstats  common.tcfa_bstats
0056 #define tcf_qstats  common.tcfa_qstats
0057 #define tcf_rate_est    common.tcfa_rate_est
0058 #define tcf_lock    common.tcfa_lock
0059 
0060 #define TCA_ACT_HW_STATS_ANY (TCA_ACT_HW_STATS_IMMEDIATE | \
0061                   TCA_ACT_HW_STATS_DELAYED)
0062 
0063 /* Reserve 16 bits for user-space. See TCA_ACT_FLAGS_NO_PERCPU_STATS. */
0064 #define TCA_ACT_FLAGS_USER_BITS 16
0065 #define TCA_ACT_FLAGS_USER_MASK 0xffff
0066 #define TCA_ACT_FLAGS_POLICE    (1U << TCA_ACT_FLAGS_USER_BITS)
0067 #define TCA_ACT_FLAGS_BIND  (1U << (TCA_ACT_FLAGS_USER_BITS + 1))
0068 #define TCA_ACT_FLAGS_REPLACE   (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
0069 #define TCA_ACT_FLAGS_NO_RTNL   (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
0070 
0071 /* Update lastuse only if needed, to avoid dirtying a cache line.
0072  * We use a temp variable to avoid fetching jiffies twice.
0073  */
0074 static inline void tcf_lastuse_update(struct tcf_t *tm)
0075 {
0076     unsigned long now = jiffies;
0077 
0078     if (tm->lastuse != now)
0079         tm->lastuse = now;
0080     if (unlikely(!tm->firstuse))
0081         tm->firstuse = now;
0082 }
0083 
0084 static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
0085 {
0086     dtm->install = jiffies_to_clock_t(jiffies - stm->install);
0087     dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
0088     dtm->firstuse = stm->firstuse ?
0089         jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
0090     dtm->expires = jiffies_to_clock_t(stm->expires);
0091 }
0092 
0093 static inline enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
0094 {
0095     if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
0096         return FLOW_ACTION_HW_STATS_DONT_CARE;
0097     else if (!hw_stats)
0098         return FLOW_ACTION_HW_STATS_DISABLED;
0099 
0100     return hw_stats;
0101 }
0102 
0103 #ifdef CONFIG_NET_CLS_ACT
0104 
0105 #define ACT_P_CREATED 1
0106 #define ACT_P_DELETED 1
0107 
0108 typedef void (*tc_action_priv_destructor)(void *priv);
0109 
0110 struct tc_action_ops {
0111     struct list_head head;
0112     char    kind[IFNAMSIZ];
0113     enum tca_id  id; /* identifier should match kind */
0114     size_t  size;
0115     struct module       *owner;
0116     int     (*act)(struct sk_buff *, const struct tc_action *,
0117                struct tcf_result *); /* called under RCU BH lock*/
0118     int     (*dump)(struct sk_buff *, struct tc_action *, int, int);
0119     void    (*cleanup)(struct tc_action *);
0120     int     (*lookup)(struct net *net, struct tc_action **a, u32 index);
0121     int     (*init)(struct net *net, struct nlattr *nla,
0122             struct nlattr *est, struct tc_action **act,
0123             struct tcf_proto *tp,
0124             u32 flags, struct netlink_ext_ack *extack);
0125     int     (*walk)(struct net *, struct sk_buff *,
0126             struct netlink_callback *, int,
0127             const struct tc_action_ops *,
0128             struct netlink_ext_ack *);
0129     void    (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool);
0130     size_t  (*get_fill_size)(const struct tc_action *act);
0131     struct net_device *(*get_dev)(const struct tc_action *a,
0132                       tc_action_priv_destructor *destructor);
0133     struct psample_group *
0134     (*get_psample_group)(const struct tc_action *a,
0135                  tc_action_priv_destructor *destructor);
0136     int     (*offload_act_setup)(struct tc_action *act, void *entry_data,
0137                      u32 *index_inc, bool bind,
0138                      struct netlink_ext_ack *extack);
0139 };
0140 
0141 struct tc_action_net {
0142     struct tcf_idrinfo *idrinfo;
0143     const struct tc_action_ops *ops;
0144 };
0145 
0146 static inline
0147 int tc_action_net_init(struct net *net, struct tc_action_net *tn,
0148                const struct tc_action_ops *ops)
0149 {
0150     int err = 0;
0151 
0152     tn->idrinfo = kmalloc(sizeof(*tn->idrinfo), GFP_KERNEL);
0153     if (!tn->idrinfo)
0154         return -ENOMEM;
0155     tn->ops = ops;
0156     tn->idrinfo->net = net;
0157     mutex_init(&tn->idrinfo->lock);
0158     idr_init(&tn->idrinfo->action_idr);
0159     return err;
0160 }
0161 
0162 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
0163              struct tcf_idrinfo *idrinfo);
0164 
0165 static inline void tc_action_net_exit(struct list_head *net_list,
0166                       unsigned int id)
0167 {
0168     struct net *net;
0169 
0170     rtnl_lock();
0171     list_for_each_entry(net, net_list, exit_list) {
0172         struct tc_action_net *tn = net_generic(net, id);
0173 
0174         tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
0175         kfree(tn->idrinfo);
0176     }
0177     rtnl_unlock();
0178 }
0179 
0180 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
0181                struct netlink_callback *cb, int type,
0182                const struct tc_action_ops *ops,
0183                struct netlink_ext_ack *extack);
0184 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
0185 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
0186            struct tc_action **a, const struct tc_action_ops *ops,
0187            int bind, bool cpustats, u32 flags);
0188 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
0189                   struct nlattr *est, struct tc_action **a,
0190                   const struct tc_action_ops *ops, int bind,
0191                   u32 flags);
0192 void tcf_idr_insert_many(struct tc_action *actions[]);
0193 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
0194 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
0195             struct tc_action **a, int bind);
0196 int tcf_idr_release(struct tc_action *a, bool bind);
0197 
0198 int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
0199 int tcf_unregister_action(struct tc_action_ops *a,
0200               struct pernet_operations *ops);
0201 int tcf_action_destroy(struct tc_action *actions[], int bind);
0202 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
0203             int nr_actions, struct tcf_result *res);
0204 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
0205             struct nlattr *est,
0206             struct tc_action *actions[], int init_res[], size_t *attr_size,
0207             u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
0208 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
0209                      bool rtnl_held,
0210                      struct netlink_ext_ack *extack);
0211 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
0212                     struct nlattr *nla, struct nlattr *est,
0213                     struct tc_action_ops *a_o, int *init_res,
0214                     u32 flags, struct netlink_ext_ack *extack);
0215 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
0216             int ref, bool terse);
0217 int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
0218 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
0219 
0220 static inline void tcf_action_update_bstats(struct tc_action *a,
0221                         struct sk_buff *skb)
0222 {
0223     if (likely(a->cpu_bstats)) {
0224         bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
0225         return;
0226     }
0227     spin_lock(&a->tcfa_lock);
0228     bstats_update(&a->tcfa_bstats, skb);
0229     spin_unlock(&a->tcfa_lock);
0230 }
0231 
0232 static inline void tcf_action_inc_drop_qstats(struct tc_action *a)
0233 {
0234     if (likely(a->cpu_qstats)) {
0235         qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
0236         return;
0237     }
0238     spin_lock(&a->tcfa_lock);
0239     qstats_drop_inc(&a->tcfa_qstats);
0240     spin_unlock(&a->tcfa_lock);
0241 }
0242 
0243 static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
0244 {
0245     if (likely(a->cpu_qstats)) {
0246         qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
0247         return;
0248     }
0249     spin_lock(&a->tcfa_lock);
0250     qstats_overlimit_inc(&a->tcfa_qstats);
0251     spin_unlock(&a->tcfa_lock);
0252 }
0253 
0254 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
0255                  u64 drops, bool hw);
0256 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
0257 
0258 int tcf_action_update_hw_stats(struct tc_action *action);
0259 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
0260                 void *cb_priv, bool add);
0261 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
0262                  struct tcf_chain **handle,
0263                  struct netlink_ext_ack *newchain);
0264 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
0265                      struct tcf_chain *newchain);
0266 
0267 #ifdef CONFIG_INET
0268 DECLARE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
0269 #endif
0270 
0271 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
0272 
0273 #else /* !CONFIG_NET_CLS_ACT */
0274 
0275 static inline int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
0276                       void *cb_priv, bool add) {
0277     return 0;
0278 }
0279 
0280 #endif /* CONFIG_NET_CLS_ACT */
0281 
0282 static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
0283                        u64 packets, u64 drops,
0284                        u64 lastuse, bool hw)
0285 {
0286 #ifdef CONFIG_NET_CLS_ACT
0287     if (!a->ops->stats_update)
0288         return;
0289 
0290     a->ops->stats_update(a, bytes, packets, drops, lastuse, hw);
0291 #endif
0292 }
0293 
0294 
0295 #endif