0001
0002 #ifndef __NET_SCHED_GENERIC_H
0003 #define __NET_SCHED_GENERIC_H
0004
0005 #include <linux/netdevice.h>
0006 #include <linux/types.h>
0007 #include <linux/rcupdate.h>
0008 #include <linux/pkt_sched.h>
0009 #include <linux/pkt_cls.h>
0010 #include <linux/percpu.h>
0011 #include <linux/dynamic_queue_limits.h>
0012 #include <linux/list.h>
0013 #include <linux/refcount.h>
0014 #include <linux/workqueue.h>
0015 #include <linux/mutex.h>
0016 #include <linux/rwsem.h>
0017 #include <linux/atomic.h>
0018 #include <linux/hashtable.h>
0019 #include <net/gen_stats.h>
0020 #include <net/rtnetlink.h>
0021 #include <net/flow_offload.h>
0022
0023 struct Qdisc_ops;
0024 struct qdisc_walker;
0025 struct tcf_walker;
0026 struct module;
0027 struct bpf_flow_keys;
0028
0029 struct qdisc_rate_table {
0030 struct tc_ratespec rate;
0031 u32 data[256];
0032 struct qdisc_rate_table *next;
0033 int refcnt;
0034 };
0035
0036 enum qdisc_state_t {
0037 __QDISC_STATE_SCHED,
0038 __QDISC_STATE_DEACTIVATED,
0039 __QDISC_STATE_MISSED,
0040 __QDISC_STATE_DRAINING,
0041 };
0042
0043 enum qdisc_state2_t {
0044
0045
0046
0047 __QDISC_STATE2_RUNNING,
0048 };
0049
0050 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
0051 #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
0052
0053 #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \
0054 QDISC_STATE_DRAINING)
0055
0056 struct qdisc_size_table {
0057 struct rcu_head rcu;
0058 struct list_head list;
0059 struct tc_sizespec szopts;
0060 int refcnt;
0061 u16 data[];
0062 };
0063
0064
0065 struct qdisc_skb_head {
0066 struct sk_buff *head;
0067 struct sk_buff *tail;
0068 __u32 qlen;
0069 spinlock_t lock;
0070 };
0071
0072 struct Qdisc {
0073 int (*enqueue)(struct sk_buff *skb,
0074 struct Qdisc *sch,
0075 struct sk_buff **to_free);
0076 struct sk_buff * (*dequeue)(struct Qdisc *sch);
0077 unsigned int flags;
0078 #define TCQ_F_BUILTIN 1
0079 #define TCQ_F_INGRESS 2
0080 #define TCQ_F_CAN_BYPASS 4
0081 #define TCQ_F_MQROOT 8
0082 #define TCQ_F_ONETXQUEUE 0x10
0083
0084
0085
0086
0087
0088
0089 #define TCQ_F_WARN_NONWC (1 << 16)
0090 #define TCQ_F_CPUSTATS 0x20
0091 #define TCQ_F_NOPARENT 0x40
0092
0093
0094 #define TCQ_F_INVISIBLE 0x80
0095 #define TCQ_F_NOLOCK 0x100
0096 #define TCQ_F_OFFLOADED 0x200
0097 u32 limit;
0098 const struct Qdisc_ops *ops;
0099 struct qdisc_size_table __rcu *stab;
0100 struct hlist_node hash;
0101 u32 handle;
0102 u32 parent;
0103
0104 struct netdev_queue *dev_queue;
0105
0106 struct net_rate_estimator __rcu *rate_est;
0107 struct gnet_stats_basic_sync __percpu *cpu_bstats;
0108 struct gnet_stats_queue __percpu *cpu_qstats;
0109 int pad;
0110 refcount_t refcnt;
0111
0112
0113
0114
0115 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
0116 struct qdisc_skb_head q;
0117 struct gnet_stats_basic_sync bstats;
0118 struct gnet_stats_queue qstats;
0119 unsigned long state;
0120 unsigned long state2;
0121 struct Qdisc *next_sched;
0122 struct sk_buff_head skb_bad_txq;
0123
0124 spinlock_t busylock ____cacheline_aligned_in_smp;
0125 spinlock_t seqlock;
0126
0127 struct rcu_head rcu;
0128 netdevice_tracker dev_tracker;
0129
0130 long privdata[] ____cacheline_aligned;
0131 };
0132
0133 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
0134 {
0135 if (qdisc->flags & TCQ_F_BUILTIN)
0136 return;
0137 refcount_inc(&qdisc->refcnt);
0138 }
0139
0140
0141
0142
0143
0144 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
0145 {
0146 if (qdisc->flags & TCQ_F_BUILTIN)
0147 return qdisc;
0148 if (refcount_inc_not_zero(&qdisc->refcnt))
0149 return qdisc;
0150 return NULL;
0151 }
0152
0153
0154
0155
0156
0157 static inline bool qdisc_is_running(struct Qdisc *qdisc)
0158 {
0159 if (qdisc->flags & TCQ_F_NOLOCK)
0160 return spin_is_locked(&qdisc->seqlock);
0161 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
0162 }
0163
0164 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
0165 {
0166 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
0167 }
0168
0169 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
0170 {
0171 return q->flags & TCQ_F_CPUSTATS;
0172 }
0173
0174 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
0175 {
0176 if (qdisc_is_percpu_stats(qdisc))
0177 return nolock_qdisc_is_empty(qdisc);
0178 return !READ_ONCE(qdisc->q.qlen);
0179 }
0180
0181
0182
0183
0184 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
0185 {
0186 if (qdisc->flags & TCQ_F_NOLOCK) {
0187 if (spin_trylock(&qdisc->seqlock))
0188 return true;
0189
0190
0191
0192
0193
0194
0195 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
0196 return false;
0197
0198
0199
0200
0201
0202 return spin_trylock(&qdisc->seqlock);
0203 }
0204 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
0205 }
0206
0207 static inline void qdisc_run_end(struct Qdisc *qdisc)
0208 {
0209 if (qdisc->flags & TCQ_F_NOLOCK) {
0210 spin_unlock(&qdisc->seqlock);
0211
0212
0213
0214
0215
0216 smp_mb();
0217
0218 if (unlikely(test_bit(__QDISC_STATE_MISSED,
0219 &qdisc->state)))
0220 __netif_schedule(qdisc);
0221 } else {
0222 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
0223 }
0224 }
0225
0226 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
0227 {
0228 return qdisc->flags & TCQ_F_ONETXQUEUE;
0229 }
0230
0231 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
0232 {
0233 #ifdef CONFIG_BQL
0234
0235 return dql_avail(&txq->dql);
0236 #else
0237 return 0;
0238 #endif
0239 }
0240
0241 struct Qdisc_class_ops {
0242 unsigned int flags;
0243
0244 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
0245 int (*graft)(struct Qdisc *, unsigned long cl,
0246 struct Qdisc *, struct Qdisc **,
0247 struct netlink_ext_ack *extack);
0248 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
0249 void (*qlen_notify)(struct Qdisc *, unsigned long);
0250
0251
0252 unsigned long (*find)(struct Qdisc *, u32 classid);
0253 int (*change)(struct Qdisc *, u32, u32,
0254 struct nlattr **, unsigned long *,
0255 struct netlink_ext_ack *);
0256 int (*delete)(struct Qdisc *, unsigned long,
0257 struct netlink_ext_ack *);
0258 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
0259
0260
0261 struct tcf_block * (*tcf_block)(struct Qdisc *sch,
0262 unsigned long arg,
0263 struct netlink_ext_ack *extack);
0264 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
0265 u32 classid);
0266 void (*unbind_tcf)(struct Qdisc *, unsigned long);
0267
0268
0269 int (*dump)(struct Qdisc *, unsigned long,
0270 struct sk_buff *skb, struct tcmsg*);
0271 int (*dump_stats)(struct Qdisc *, unsigned long,
0272 struct gnet_dump *);
0273 };
0274
0275
0276
0277
0278 enum qdisc_class_ops_flags {
0279 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
0280 };
0281
0282 struct Qdisc_ops {
0283 struct Qdisc_ops *next;
0284 const struct Qdisc_class_ops *cl_ops;
0285 char id[IFNAMSIZ];
0286 int priv_size;
0287 unsigned int static_flags;
0288
0289 int (*enqueue)(struct sk_buff *skb,
0290 struct Qdisc *sch,
0291 struct sk_buff **to_free);
0292 struct sk_buff * (*dequeue)(struct Qdisc *);
0293 struct sk_buff * (*peek)(struct Qdisc *);
0294
0295 int (*init)(struct Qdisc *sch, struct nlattr *arg,
0296 struct netlink_ext_ack *extack);
0297 void (*reset)(struct Qdisc *);
0298 void (*destroy)(struct Qdisc *);
0299 int (*change)(struct Qdisc *sch,
0300 struct nlattr *arg,
0301 struct netlink_ext_ack *extack);
0302 void (*attach)(struct Qdisc *sch);
0303 int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
0304 void (*change_real_num_tx)(struct Qdisc *sch,
0305 unsigned int new_real_tx);
0306
0307 int (*dump)(struct Qdisc *, struct sk_buff *);
0308 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
0309
0310 void (*ingress_block_set)(struct Qdisc *sch,
0311 u32 block_index);
0312 void (*egress_block_set)(struct Qdisc *sch,
0313 u32 block_index);
0314 u32 (*ingress_block_get)(struct Qdisc *sch);
0315 u32 (*egress_block_get)(struct Qdisc *sch);
0316
0317 struct module *owner;
0318 };
0319
0320
0321 struct tcf_result {
0322 union {
0323 struct {
0324 unsigned long class;
0325 u32 classid;
0326 };
0327 const struct tcf_proto *goto_tp;
0328
0329
0330 struct {
0331 bool ingress;
0332 struct gnet_stats_queue *qstats;
0333 };
0334 };
0335 };
0336
0337 struct tcf_chain;
0338
0339 struct tcf_proto_ops {
0340 struct list_head head;
0341 char kind[IFNAMSIZ];
0342
0343 int (*classify)(struct sk_buff *,
0344 const struct tcf_proto *,
0345 struct tcf_result *);
0346 int (*init)(struct tcf_proto*);
0347 void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
0348 struct netlink_ext_ack *extack);
0349
0350 void* (*get)(struct tcf_proto*, u32 handle);
0351 void (*put)(struct tcf_proto *tp, void *f);
0352 int (*change)(struct net *net, struct sk_buff *,
0353 struct tcf_proto*, unsigned long,
0354 u32 handle, struct nlattr **,
0355 void **, u32,
0356 struct netlink_ext_ack *);
0357 int (*delete)(struct tcf_proto *tp, void *arg,
0358 bool *last, bool rtnl_held,
0359 struct netlink_ext_ack *);
0360 bool (*delete_empty)(struct tcf_proto *tp);
0361 void (*walk)(struct tcf_proto *tp,
0362 struct tcf_walker *arg, bool rtnl_held);
0363 int (*reoffload)(struct tcf_proto *tp, bool add,
0364 flow_setup_cb_t *cb, void *cb_priv,
0365 struct netlink_ext_ack *extack);
0366 void (*hw_add)(struct tcf_proto *tp,
0367 void *type_data);
0368 void (*hw_del)(struct tcf_proto *tp,
0369 void *type_data);
0370 void (*bind_class)(void *, u32, unsigned long,
0371 void *, unsigned long);
0372 void * (*tmplt_create)(struct net *net,
0373 struct tcf_chain *chain,
0374 struct nlattr **tca,
0375 struct netlink_ext_ack *extack);
0376 void (*tmplt_destroy)(void *tmplt_priv);
0377
0378
0379 int (*dump)(struct net*, struct tcf_proto*, void *,
0380 struct sk_buff *skb, struct tcmsg*,
0381 bool);
0382 int (*terse_dump)(struct net *net,
0383 struct tcf_proto *tp, void *fh,
0384 struct sk_buff *skb,
0385 struct tcmsg *t, bool rtnl_held);
0386 int (*tmplt_dump)(struct sk_buff *skb,
0387 struct net *net,
0388 void *tmplt_priv);
0389
0390 struct module *owner;
0391 int flags;
0392 };
0393
0394
0395
0396
0397
0398 enum tcf_proto_ops_flags {
0399 TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
0400 };
0401
0402 struct tcf_proto {
0403
0404 struct tcf_proto __rcu *next;
0405 void __rcu *root;
0406
0407
0408 int (*classify)(struct sk_buff *,
0409 const struct tcf_proto *,
0410 struct tcf_result *);
0411 __be16 protocol;
0412
0413
0414 u32 prio;
0415 void *data;
0416 const struct tcf_proto_ops *ops;
0417 struct tcf_chain *chain;
0418
0419
0420
0421 spinlock_t lock;
0422 bool deleting;
0423 refcount_t refcnt;
0424 struct rcu_head rcu;
0425 struct hlist_node destroy_ht_node;
0426 };
0427
0428 struct qdisc_skb_cb {
0429 struct {
0430 unsigned int pkt_len;
0431 u16 slave_dev_queue_mapping;
0432 u16 tc_classid;
0433 };
0434 #define QDISC_CB_PRIV_LEN 20
0435 unsigned char data[QDISC_CB_PRIV_LEN];
0436 };
0437
0438 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
0439
0440 struct tcf_chain {
0441
0442 struct mutex filter_chain_lock;
0443 struct tcf_proto __rcu *filter_chain;
0444 struct list_head list;
0445 struct tcf_block *block;
0446 u32 index;
0447 unsigned int refcnt;
0448 unsigned int action_refcnt;
0449 bool explicitly_created;
0450 bool flushing;
0451 const struct tcf_proto_ops *tmplt_ops;
0452 void *tmplt_priv;
0453 struct rcu_head rcu;
0454 };
0455
0456 struct tcf_block {
0457
0458
0459
0460 struct mutex lock;
0461 struct list_head chain_list;
0462 u32 index;
0463 u32 classid;
0464 refcount_t refcnt;
0465 struct net *net;
0466 struct Qdisc *q;
0467 struct rw_semaphore cb_lock;
0468 struct flow_block flow_block;
0469 struct list_head owner_list;
0470 bool keep_dst;
0471 atomic_t offloadcnt;
0472 unsigned int nooffloaddevcnt;
0473 unsigned int lockeddevcnt;
0474 struct {
0475 struct tcf_chain *chain;
0476 struct list_head filter_chain_list;
0477 } chain0;
0478 struct rcu_head rcu;
0479 DECLARE_HASHTABLE(proto_destroy_ht, 7);
0480 struct mutex proto_destroy_lock;
0481 };
0482
0483 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
0484 {
0485 return lockdep_is_held(&chain->filter_chain_lock);
0486 }
0487
0488 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
0489 {
0490 return lockdep_is_held(&tp->lock);
0491 }
0492
0493 #define tcf_chain_dereference(p, chain) \
0494 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
0495
0496 #define tcf_proto_dereference(p, tp) \
0497 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
0498
0499 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
0500 {
0501 struct qdisc_skb_cb *qcb;
0502
0503 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
0504 BUILD_BUG_ON(sizeof(qcb->data) < sz);
0505 }
0506
0507 static inline int qdisc_qlen(const struct Qdisc *q)
0508 {
0509 return q->q.qlen;
0510 }
0511
0512 static inline int qdisc_qlen_sum(const struct Qdisc *q)
0513 {
0514 __u32 qlen = q->qstats.qlen;
0515 int i;
0516
0517 if (qdisc_is_percpu_stats(q)) {
0518 for_each_possible_cpu(i)
0519 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
0520 } else {
0521 qlen += q->q.qlen;
0522 }
0523
0524 return qlen;
0525 }
0526
0527 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
0528 {
0529 return (struct qdisc_skb_cb *)skb->cb;
0530 }
0531
0532 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
0533 {
0534 return &qdisc->q.lock;
0535 }
0536
0537 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
0538 {
0539 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
0540
0541 return q;
0542 }
0543
0544 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
0545 {
0546 return rcu_dereference_bh(qdisc->dev_queue->qdisc);
0547 }
0548
0549 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
0550 {
0551 return qdisc->dev_queue->qdisc_sleeping;
0552 }
0553
0554 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
0555 {
0556 struct Qdisc *root = qdisc_root_sleeping(qdisc);
0557
0558 ASSERT_RTNL();
0559 return qdisc_lock(root);
0560 }
0561
0562 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
0563 {
0564 return qdisc->dev_queue->dev;
0565 }
0566
0567 static inline void sch_tree_lock(struct Qdisc *q)
0568 {
0569 if (q->flags & TCQ_F_MQROOT)
0570 spin_lock_bh(qdisc_lock(q));
0571 else
0572 spin_lock_bh(qdisc_root_sleeping_lock(q));
0573 }
0574
0575 static inline void sch_tree_unlock(struct Qdisc *q)
0576 {
0577 if (q->flags & TCQ_F_MQROOT)
0578 spin_unlock_bh(qdisc_lock(q));
0579 else
0580 spin_unlock_bh(qdisc_root_sleeping_lock(q));
0581 }
0582
0583 extern struct Qdisc noop_qdisc;
0584 extern struct Qdisc_ops noop_qdisc_ops;
0585 extern struct Qdisc_ops pfifo_fast_ops;
0586 extern struct Qdisc_ops mq_qdisc_ops;
0587 extern struct Qdisc_ops noqueue_qdisc_ops;
0588 extern const struct Qdisc_ops *default_qdisc_ops;
0589 static inline const struct Qdisc_ops *
0590 get_default_qdisc_ops(const struct net_device *dev, int ntx)
0591 {
0592 return ntx < dev->real_num_tx_queues ?
0593 default_qdisc_ops : &pfifo_fast_ops;
0594 }
0595
0596 struct Qdisc_class_common {
0597 u32 classid;
0598 struct hlist_node hnode;
0599 };
0600
0601 struct Qdisc_class_hash {
0602 struct hlist_head *hash;
0603 unsigned int hashsize;
0604 unsigned int hashmask;
0605 unsigned int hashelems;
0606 };
0607
0608 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
0609 {
0610 id ^= id >> 8;
0611 id ^= id >> 4;
0612 return id & mask;
0613 }
0614
0615 static inline struct Qdisc_class_common *
0616 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
0617 {
0618 struct Qdisc_class_common *cl;
0619 unsigned int h;
0620
0621 if (!id)
0622 return NULL;
0623
0624 h = qdisc_class_hash(id, hash->hashmask);
0625 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
0626 if (cl->classid == id)
0627 return cl;
0628 }
0629 return NULL;
0630 }
0631
0632 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
0633 {
0634 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
0635
0636 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
0637 }
0638
0639 int qdisc_class_hash_init(struct Qdisc_class_hash *);
0640 void qdisc_class_hash_insert(struct Qdisc_class_hash *,
0641 struct Qdisc_class_common *);
0642 void qdisc_class_hash_remove(struct Qdisc_class_hash *,
0643 struct Qdisc_class_common *);
0644 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
0645 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
0646
0647 int dev_qdisc_change_tx_queue_len(struct net_device *dev);
0648 void dev_qdisc_change_real_num_tx(struct net_device *dev,
0649 unsigned int new_real_tx);
0650 void dev_init_scheduler(struct net_device *dev);
0651 void dev_shutdown(struct net_device *dev);
0652 void dev_activate(struct net_device *dev);
0653 void dev_deactivate(struct net_device *dev);
0654 void dev_deactivate_many(struct list_head *head);
0655 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
0656 struct Qdisc *qdisc);
0657 void qdisc_reset(struct Qdisc *qdisc);
0658 void qdisc_put(struct Qdisc *qdisc);
0659 void qdisc_put_unlocked(struct Qdisc *qdisc);
0660 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
0661 #ifdef CONFIG_NET_SCHED
0662 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
0663 void *type_data);
0664 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
0665 struct Qdisc *new, struct Qdisc *old,
0666 enum tc_setup_type type, void *type_data,
0667 struct netlink_ext_ack *extack);
0668 #else
0669 static inline int
0670 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
0671 void *type_data)
0672 {
0673 q->flags &= ~TCQ_F_OFFLOADED;
0674 return 0;
0675 }
0676
0677 static inline void
0678 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
0679 struct Qdisc *new, struct Qdisc *old,
0680 enum tc_setup_type type, void *type_data,
0681 struct netlink_ext_ack *extack)
0682 {
0683 }
0684 #endif
0685 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
0686 const struct Qdisc_ops *ops,
0687 struct netlink_ext_ack *extack);
0688 void qdisc_free(struct Qdisc *qdisc);
0689 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
0690 const struct Qdisc_ops *ops, u32 parentid,
0691 struct netlink_ext_ack *extack);
0692 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
0693 const struct qdisc_size_table *stab);
0694 int skb_do_redirect(struct sk_buff *);
0695
0696 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
0697 {
0698 #ifdef CONFIG_NET_CLS_ACT
0699 return skb->tc_at_ingress;
0700 #else
0701 return false;
0702 #endif
0703 }
0704
0705 static inline bool skb_skip_tc_classify(struct sk_buff *skb)
0706 {
0707 #ifdef CONFIG_NET_CLS_ACT
0708 if (skb->tc_skip_classify) {
0709 skb->tc_skip_classify = 0;
0710 return true;
0711 }
0712 #endif
0713 return false;
0714 }
0715
0716
0717 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
0718 {
0719 struct Qdisc *qdisc;
0720
0721 for (; i < dev->num_tx_queues; i++) {
0722 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
0723 if (qdisc) {
0724 spin_lock_bh(qdisc_lock(qdisc));
0725 qdisc_reset(qdisc);
0726 spin_unlock_bh(qdisc_lock(qdisc));
0727 }
0728 }
0729 }
0730
0731
0732 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
0733 {
0734 unsigned int i;
0735
0736 rcu_read_lock();
0737 for (i = 0; i < dev->num_tx_queues; i++) {
0738 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
0739 const struct Qdisc *q = rcu_dereference(txq->qdisc);
0740
0741 if (!qdisc_is_empty(q)) {
0742 rcu_read_unlock();
0743 return false;
0744 }
0745 }
0746 rcu_read_unlock();
0747 return true;
0748 }
0749
0750
0751 static inline bool qdisc_tx_changing(const struct net_device *dev)
0752 {
0753 unsigned int i;
0754
0755 for (i = 0; i < dev->num_tx_queues; i++) {
0756 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
0757 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
0758 return true;
0759 }
0760 return false;
0761 }
0762
0763
0764 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
0765 {
0766 unsigned int i;
0767
0768 for (i = 0; i < dev->num_tx_queues; i++) {
0769 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
0770 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
0771 return false;
0772 }
0773 return true;
0774 }
0775
0776 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
0777 {
0778 return qdisc_skb_cb(skb)->pkt_len;
0779 }
0780
0781
0782 enum net_xmit_qdisc_t {
0783 __NET_XMIT_STOLEN = 0x00010000,
0784 __NET_XMIT_BYPASS = 0x00020000,
0785 };
0786
0787 #ifdef CONFIG_NET_CLS_ACT
0788 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
0789 #else
0790 #define net_xmit_drop_count(e) (1)
0791 #endif
0792
0793 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
0794 const struct Qdisc *sch)
0795 {
0796 #ifdef CONFIG_NET_SCHED
0797 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
0798
0799 if (stab)
0800 __qdisc_calculate_pkt_len(skb, stab);
0801 #endif
0802 }
0803
0804 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
0805 struct sk_buff **to_free)
0806 {
0807 qdisc_calculate_pkt_len(skb, sch);
0808 return sch->enqueue(skb, sch, to_free);
0809 }
0810
0811 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
0812 __u64 bytes, __u32 packets)
0813 {
0814 u64_stats_update_begin(&bstats->syncp);
0815 u64_stats_add(&bstats->bytes, bytes);
0816 u64_stats_add(&bstats->packets, packets);
0817 u64_stats_update_end(&bstats->syncp);
0818 }
0819
0820 static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
0821 const struct sk_buff *skb)
0822 {
0823 _bstats_update(bstats,
0824 qdisc_pkt_len(skb),
0825 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
0826 }
0827
0828 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
0829 const struct sk_buff *skb)
0830 {
0831 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
0832 }
0833
0834 static inline void qdisc_bstats_update(struct Qdisc *sch,
0835 const struct sk_buff *skb)
0836 {
0837 bstats_update(&sch->bstats, skb);
0838 }
0839
0840 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
0841 const struct sk_buff *skb)
0842 {
0843 sch->qstats.backlog -= qdisc_pkt_len(skb);
0844 }
0845
0846 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
0847 const struct sk_buff *skb)
0848 {
0849 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
0850 }
0851
0852 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
0853 const struct sk_buff *skb)
0854 {
0855 sch->qstats.backlog += qdisc_pkt_len(skb);
0856 }
0857
0858 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
0859 const struct sk_buff *skb)
0860 {
0861 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
0862 }
0863
0864 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
0865 {
0866 this_cpu_inc(sch->cpu_qstats->qlen);
0867 }
0868
0869 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
0870 {
0871 this_cpu_dec(sch->cpu_qstats->qlen);
0872 }
0873
0874 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
0875 {
0876 this_cpu_inc(sch->cpu_qstats->requeues);
0877 }
0878
0879 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
0880 {
0881 sch->qstats.drops += count;
0882 }
0883
0884 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
0885 {
0886 qstats->drops++;
0887 }
0888
0889 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
0890 {
0891 qstats->overlimits++;
0892 }
0893
0894 static inline void qdisc_qstats_drop(struct Qdisc *sch)
0895 {
0896 qstats_drop_inc(&sch->qstats);
0897 }
0898
0899 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
0900 {
0901 this_cpu_inc(sch->cpu_qstats->drops);
0902 }
0903
0904 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
0905 {
0906 sch->qstats.overlimits++;
0907 }
0908
0909 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
0910 {
0911 __u32 qlen = qdisc_qlen_sum(sch);
0912
0913 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
0914 }
0915
0916 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
0917 __u32 *backlog)
0918 {
0919 struct gnet_stats_queue qstats = { 0 };
0920
0921 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
0922 *qlen = qstats.qlen + qdisc_qlen(sch);
0923 *backlog = qstats.backlog;
0924 }
0925
0926 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
0927 {
0928 __u32 qlen, backlog;
0929
0930 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
0931 qdisc_tree_reduce_backlog(sch, qlen, backlog);
0932 }
0933
0934 static inline void qdisc_purge_queue(struct Qdisc *sch)
0935 {
0936 __u32 qlen, backlog;
0937
0938 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
0939 qdisc_reset(sch);
0940 qdisc_tree_reduce_backlog(sch, qlen, backlog);
0941 }
0942
0943 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
0944 {
0945 qh->head = NULL;
0946 qh->tail = NULL;
0947 qh->qlen = 0;
0948 }
0949
0950 static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
0951 struct qdisc_skb_head *qh)
0952 {
0953 struct sk_buff *last = qh->tail;
0954
0955 if (last) {
0956 skb->next = NULL;
0957 last->next = skb;
0958 qh->tail = skb;
0959 } else {
0960 qh->tail = skb;
0961 qh->head = skb;
0962 }
0963 qh->qlen++;
0964 }
0965
0966 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
0967 {
0968 __qdisc_enqueue_tail(skb, &sch->q);
0969 qdisc_qstats_backlog_inc(sch, skb);
0970 return NET_XMIT_SUCCESS;
0971 }
0972
0973 static inline void __qdisc_enqueue_head(struct sk_buff *skb,
0974 struct qdisc_skb_head *qh)
0975 {
0976 skb->next = qh->head;
0977
0978 if (!qh->head)
0979 qh->tail = skb;
0980 qh->head = skb;
0981 qh->qlen++;
0982 }
0983
0984 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
0985 {
0986 struct sk_buff *skb = qh->head;
0987
0988 if (likely(skb != NULL)) {
0989 qh->head = skb->next;
0990 qh->qlen--;
0991 if (qh->head == NULL)
0992 qh->tail = NULL;
0993 skb->next = NULL;
0994 }
0995
0996 return skb;
0997 }
0998
0999 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
1000 {
1001 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1002
1003 if (likely(skb != NULL)) {
1004 qdisc_qstats_backlog_dec(sch, skb);
1005 qdisc_bstats_update(sch, skb);
1006 }
1007
1008 return skb;
1009 }
1010
1011
1012
1013
1014 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
1015 {
1016 skb->next = *to_free;
1017 *to_free = skb;
1018 }
1019
1020 static inline void __qdisc_drop_all(struct sk_buff *skb,
1021 struct sk_buff **to_free)
1022 {
1023 if (skb->prev)
1024 skb->prev->next = *to_free;
1025 else
1026 skb->next = *to_free;
1027 *to_free = skb;
1028 }
1029
1030 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
1031 struct qdisc_skb_head *qh,
1032 struct sk_buff **to_free)
1033 {
1034 struct sk_buff *skb = __qdisc_dequeue_head(qh);
1035
1036 if (likely(skb != NULL)) {
1037 unsigned int len = qdisc_pkt_len(skb);
1038
1039 qdisc_qstats_backlog_dec(sch, skb);
1040 __qdisc_drop(skb, to_free);
1041 return len;
1042 }
1043
1044 return 0;
1045 }
1046
1047 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
1048 {
1049 const struct qdisc_skb_head *qh = &sch->q;
1050
1051 return qh->head;
1052 }
1053
1054
1055 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
1056 {
1057 struct sk_buff *skb = skb_peek(&sch->gso_skb);
1058
1059
1060 if (!skb) {
1061 skb = sch->dequeue(sch);
1062
1063 if (skb) {
1064 __skb_queue_head(&sch->gso_skb, skb);
1065
1066 qdisc_qstats_backlog_inc(sch, skb);
1067 sch->q.qlen++;
1068 }
1069 }
1070
1071 return skb;
1072 }
1073
1074 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1075 struct sk_buff *skb)
1076 {
1077 if (qdisc_is_percpu_stats(sch)) {
1078 qdisc_qstats_cpu_backlog_dec(sch, skb);
1079 qdisc_bstats_cpu_update(sch, skb);
1080 qdisc_qstats_cpu_qlen_dec(sch);
1081 } else {
1082 qdisc_qstats_backlog_dec(sch, skb);
1083 qdisc_bstats_update(sch, skb);
1084 sch->q.qlen--;
1085 }
1086 }
1087
1088 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1089 unsigned int pkt_len)
1090 {
1091 if (qdisc_is_percpu_stats(sch)) {
1092 qdisc_qstats_cpu_qlen_inc(sch);
1093 this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1094 } else {
1095 sch->qstats.backlog += pkt_len;
1096 sch->q.qlen++;
1097 }
1098 }
1099
1100
1101 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
1102 {
1103 struct sk_buff *skb = skb_peek(&sch->gso_skb);
1104
1105 if (skb) {
1106 skb = __skb_dequeue(&sch->gso_skb);
1107 if (qdisc_is_percpu_stats(sch)) {
1108 qdisc_qstats_cpu_backlog_dec(sch, skb);
1109 qdisc_qstats_cpu_qlen_dec(sch);
1110 } else {
1111 qdisc_qstats_backlog_dec(sch, skb);
1112 sch->q.qlen--;
1113 }
1114 } else {
1115 skb = sch->dequeue(sch);
1116 }
1117
1118 return skb;
1119 }
1120
1121 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
1122 {
1123
1124
1125
1126
1127 ASSERT_RTNL();
1128 if (qh->qlen) {
1129 rtnl_kfree_skbs(qh->head, qh->tail);
1130
1131 qh->head = NULL;
1132 qh->tail = NULL;
1133 qh->qlen = 0;
1134 }
1135 }
1136
1137 static inline void qdisc_reset_queue(struct Qdisc *sch)
1138 {
1139 __qdisc_reset_queue(&sch->q);
1140 sch->qstats.backlog = 0;
1141 }
1142
1143 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1144 struct Qdisc **pold)
1145 {
1146 struct Qdisc *old;
1147
1148 sch_tree_lock(sch);
1149 old = *pold;
1150 *pold = new;
1151 if (old != NULL)
1152 qdisc_purge_queue(old);
1153 sch_tree_unlock(sch);
1154
1155 return old;
1156 }
1157
1158 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
1159 {
1160 rtnl_kfree_skbs(skb, skb);
1161 qdisc_qstats_drop(sch);
1162 }
1163
1164 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
1165 struct sk_buff **to_free)
1166 {
1167 __qdisc_drop(skb, to_free);
1168 qdisc_qstats_cpu_drop(sch);
1169
1170 return NET_XMIT_DROP;
1171 }
1172
1173 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
1174 struct sk_buff **to_free)
1175 {
1176 __qdisc_drop(skb, to_free);
1177 qdisc_qstats_drop(sch);
1178
1179 return NET_XMIT_DROP;
1180 }
1181
1182 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
1183 struct sk_buff **to_free)
1184 {
1185 __qdisc_drop_all(skb, to_free);
1186 qdisc_qstats_drop(sch);
1187
1188 return NET_XMIT_DROP;
1189 }
1190
1191
1192
1193
1194 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
1195 {
1196 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
1197 if (slot < 0)
1198 slot = 0;
1199 slot >>= rtab->rate.cell_log;
1200 if (slot > 255)
1201 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
1202 return rtab->data[slot];
1203 }
1204
1205 struct psched_ratecfg {
1206 u64 rate_bytes_ps;
1207 u32 mult;
1208 u16 overhead;
1209 u16 mpu;
1210 u8 linklayer;
1211 u8 shift;
1212 };
1213
1214 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
1215 unsigned int len)
1216 {
1217 len += r->overhead;
1218
1219 if (len < r->mpu)
1220 len = r->mpu;
1221
1222 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
1223 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
1224
1225 return ((u64)len * r->mult) >> r->shift;
1226 }
1227
1228 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1229 const struct tc_ratespec *conf,
1230 u64 rate64);
1231
1232 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
1233 const struct psched_ratecfg *r)
1234 {
1235 memset(res, 0, sizeof(*res));
1236
1237
1238
1239
1240
1241 res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
1242
1243 res->overhead = r->overhead;
1244 res->mpu = r->mpu;
1245 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
1246 }
1247
1248 struct psched_pktrate {
1249 u64 rate_pkts_ps;
1250 u32 mult;
1251 u8 shift;
1252 };
1253
1254 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
1255 unsigned int pkt_num)
1256 {
1257 return ((u64)pkt_num * r->mult) >> r->shift;
1258 }
1259
1260 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
1261
1262
1263
1264
1265 struct mini_Qdisc {
1266 struct tcf_proto *filter_list;
1267 struct tcf_block *block;
1268 struct gnet_stats_basic_sync __percpu *cpu_bstats;
1269 struct gnet_stats_queue __percpu *cpu_qstats;
1270 unsigned long rcu_state;
1271 };
1272
1273 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
1274 const struct sk_buff *skb)
1275 {
1276 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
1277 }
1278
1279 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
1280 {
1281 this_cpu_inc(miniq->cpu_qstats->drops);
1282 }
1283
1284 struct mini_Qdisc_pair {
1285 struct mini_Qdisc miniq1;
1286 struct mini_Qdisc miniq2;
1287 struct mini_Qdisc __rcu **p_miniq;
1288 };
1289
1290 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1291 struct tcf_proto *tp_head);
1292 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1293 struct mini_Qdisc __rcu **p_miniq);
1294 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1295 struct tcf_block *block);
1296
1297 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
1298
1299 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
1300
1301 #endif