Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
0002 #ifndef __LINUX_PKT_SCHED_H
0003 #define __LINUX_PKT_SCHED_H
0004 
0005 #include <linux/types.h>
0006 
0007 /* Logical priority bands not depending on specific packet scheduler.
0008    Every scheduler will map them to real traffic classes, if it has
0009    no more precise mechanism to classify packets.
0010 
0011    These numbers have no special meaning, though their coincidence
0012    with obsolete IPv6 values is not occasional :-). New IPv6 drafts
0013    preferred full anarchy inspired by diffserv group.
0014 
0015    Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
0016    class, actually, as rule it will be handled with more care than
0017    filler or even bulk.
0018  */
0019 
0020 #define TC_PRIO_BESTEFFORT      0
0021 #define TC_PRIO_FILLER          1
0022 #define TC_PRIO_BULK            2
0023 #define TC_PRIO_INTERACTIVE_BULK    4
0024 #define TC_PRIO_INTERACTIVE     6
0025 #define TC_PRIO_CONTROL         7
0026 
0027 #define TC_PRIO_MAX         15
0028 
0029 /* Generic queue statistics, available for all the elements.
0030    Particular schedulers may have also their private records.
0031  */
0032 
0033 struct tc_stats {
0034     __u64   bytes;          /* Number of enqueued bytes */
0035     __u32   packets;        /* Number of enqueued packets   */
0036     __u32   drops;          /* Packets dropped because of lack of resources */
0037     __u32   overlimits;     /* Number of throttle events when this
0038                      * flow goes out of allocated bandwidth */
0039     __u32   bps;            /* Current flow byte rate */
0040     __u32   pps;            /* Current flow packet rate */
0041     __u32   qlen;
0042     __u32   backlog;
0043 };
0044 
0045 struct tc_estimator {
0046     signed char interval;
0047     unsigned char   ewma_log;
0048 };
0049 
0050 /* "Handles"
0051    ---------
0052 
0053     All the traffic control objects have 32bit identifiers, or "handles".
0054 
0055     They can be considered as opaque numbers from user API viewpoint,
0056     but actually they always consist of two fields: major and
0057     minor numbers, which are interpreted by kernel specially,
0058     that may be used by applications, though not recommended.
0059 
0060     F.e. qdisc handles always have minor number equal to zero,
0061     classes (or flows) have major equal to parent qdisc major, and
0062     minor uniquely identifying class inside qdisc.
0063 
0064     Macros to manipulate handles:
0065  */
0066 
0067 #define TC_H_MAJ_MASK (0xFFFF0000U)
0068 #define TC_H_MIN_MASK (0x0000FFFFU)
0069 #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
0070 #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
0071 #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
0072 
0073 #define TC_H_UNSPEC (0U)
0074 #define TC_H_ROOT   (0xFFFFFFFFU)
0075 #define TC_H_INGRESS    (0xFFFFFFF1U)
0076 #define TC_H_CLSACT TC_H_INGRESS
0077 
0078 #define TC_H_MIN_PRIORITY   0xFFE0U
0079 #define TC_H_MIN_INGRESS    0xFFF2U
0080 #define TC_H_MIN_EGRESS     0xFFF3U
0081 
0082 /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
0083 enum tc_link_layer {
0084     TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
0085     TC_LINKLAYER_ETHERNET,
0086     TC_LINKLAYER_ATM,
0087 };
0088 #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
0089 
0090 struct tc_ratespec {
0091     unsigned char   cell_log;
0092     __u8        linklayer; /* lower 4 bits */
0093     unsigned short  overhead;
0094     short       cell_align;
0095     unsigned short  mpu;
0096     __u32       rate;
0097 };
0098 
0099 #define TC_RTAB_SIZE    1024
0100 
0101 struct tc_sizespec {
0102     unsigned char   cell_log;
0103     unsigned char   size_log;
0104     short       cell_align;
0105     int     overhead;
0106     unsigned int    linklayer;
0107     unsigned int    mpu;
0108     unsigned int    mtu;
0109     unsigned int    tsize;
0110 };
0111 
0112 enum {
0113     TCA_STAB_UNSPEC,
0114     TCA_STAB_BASE,
0115     TCA_STAB_DATA,
0116     __TCA_STAB_MAX
0117 };
0118 
0119 #define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
0120 
0121 /* FIFO section */
0122 
0123 struct tc_fifo_qopt {
0124     __u32   limit;  /* Queue length: bytes for bfifo, packets for pfifo */
0125 };
0126 
0127 /* SKBPRIO section */
0128 
0129 /*
0130  * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
0131  * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
0132  * to map one to one the DS field of IPV4 and IPV6 headers.
0133  * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
0134  */
0135 
0136 #define SKBPRIO_MAX_PRIORITY 64
0137 
0138 struct tc_skbprio_qopt {
0139     __u32   limit;      /* Queue length in packets. */
0140 };
0141 
0142 /* PRIO section */
0143 
0144 #define TCQ_PRIO_BANDS  16
0145 #define TCQ_MIN_PRIO_BANDS 2
0146 
0147 struct tc_prio_qopt {
0148     int bands;          /* Number of bands */
0149     __u8    priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
0150 };
0151 
0152 /* MULTIQ section */
0153 
0154 struct tc_multiq_qopt {
0155     __u16   bands;          /* Number of bands */
0156     __u16   max_bands;      /* Maximum number of queues */
0157 };
0158 
0159 /* PLUG section */
0160 
0161 #define TCQ_PLUG_BUFFER                0
0162 #define TCQ_PLUG_RELEASE_ONE           1
0163 #define TCQ_PLUG_RELEASE_INDEFINITE    2
0164 #define TCQ_PLUG_LIMIT                 3
0165 
0166 struct tc_plug_qopt {
0167     /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
0168      *  buffer any incoming packets
0169      * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
0170      *   to beginning of the next plug.
0171      * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
0172      *   Stop buffering packets until the next TCQ_PLUG_BUFFER
0173      *   command is received (just act as a pass-thru queue).
0174      * TCQ_PLUG_LIMIT: Increase/decrease queue size
0175      */
0176     int             action;
0177     __u32           limit;
0178 };
0179 
0180 /* TBF section */
0181 
0182 struct tc_tbf_qopt {
0183     struct tc_ratespec rate;
0184     struct tc_ratespec peakrate;
0185     __u32       limit;
0186     __u32       buffer;
0187     __u32       mtu;
0188 };
0189 
0190 enum {
0191     TCA_TBF_UNSPEC,
0192     TCA_TBF_PARMS,
0193     TCA_TBF_RTAB,
0194     TCA_TBF_PTAB,
0195     TCA_TBF_RATE64,
0196     TCA_TBF_PRATE64,
0197     TCA_TBF_BURST,
0198     TCA_TBF_PBURST,
0199     TCA_TBF_PAD,
0200     __TCA_TBF_MAX,
0201 };
0202 
0203 #define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
0204 
0205 
0206 /* TEQL section */
0207 
0208 /* TEQL does not require any parameters */
0209 
0210 /* SFQ section */
0211 
0212 struct tc_sfq_qopt {
0213     unsigned    quantum;    /* Bytes per round allocated to flow */
0214     int     perturb_period; /* Period of hash perturbation */
0215     __u32       limit;      /* Maximal packets in queue */
0216     unsigned    divisor;    /* Hash divisor  */
0217     unsigned    flows;      /* Maximal number of flows  */
0218 };
0219 
0220 struct tc_sfqred_stats {
0221     __u32           prob_drop;      /* Early drops, below max threshold */
0222     __u32           forced_drop;    /* Early drops, after max threshold */
0223     __u32           prob_mark;      /* Marked packets, below max threshold */
0224     __u32           forced_mark;    /* Marked packets, after max threshold */
0225     __u32           prob_mark_head; /* Marked packets, below max threshold */
0226     __u32           forced_mark_head;/* Marked packets, after max threshold */
0227 };
0228 
0229 struct tc_sfq_qopt_v1 {
0230     struct tc_sfq_qopt v0;
0231     unsigned int    depth;      /* max number of packets per flow */
0232     unsigned int    headdrop;
0233 /* SFQRED parameters */
0234     __u32       limit;      /* HARD maximal flow queue length (bytes) */
0235     __u32       qth_min;    /* Min average length threshold (bytes) */
0236     __u32       qth_max;    /* Max average length threshold (bytes) */
0237     unsigned char   Wlog;       /* log(W)       */
0238     unsigned char   Plog;       /* log(P_max/(qth_max-qth_min)) */
0239     unsigned char   Scell_log;  /* cell size for idle damping */
0240     unsigned char   flags;
0241     __u32       max_P;      /* probability, high resolution */
0242 /* SFQRED stats */
0243     struct tc_sfqred_stats stats;
0244 };
0245 
0246 
0247 struct tc_sfq_xstats {
0248     __s32       allot;
0249 };
0250 
0251 /* RED section */
0252 
0253 enum {
0254     TCA_RED_UNSPEC,
0255     TCA_RED_PARMS,
0256     TCA_RED_STAB,
0257     TCA_RED_MAX_P,
0258     __TCA_RED_MAX,
0259 };
0260 
0261 #define TCA_RED_MAX (__TCA_RED_MAX - 1)
0262 
0263 struct tc_red_qopt {
0264     __u32       limit;      /* HARD maximal queue length (bytes)    */
0265     __u32       qth_min;    /* Min average length threshold (bytes) */
0266     __u32       qth_max;    /* Max average length threshold (bytes) */
0267     unsigned char   Wlog;       /* log(W)       */
0268     unsigned char   Plog;       /* log(P_max/(qth_max-qth_min)) */
0269     unsigned char   Scell_log;  /* cell size for idle damping */
0270     unsigned char   flags;
0271 #define TC_RED_ECN      1
0272 #define TC_RED_HARDDROP     2
0273 #define TC_RED_ADAPTATIVE   4
0274 };
0275 
0276 struct tc_red_xstats {
0277     __u32           early;          /* Early drops */
0278     __u32           pdrop;          /* Drops due to queue limits */
0279     __u32           other;          /* Drops due to drop() calls */
0280     __u32           marked;         /* Marked packets */
0281 };
0282 
0283 /* GRED section */
0284 
0285 #define MAX_DPs 16
0286 
0287 enum {
0288        TCA_GRED_UNSPEC,
0289        TCA_GRED_PARMS,
0290        TCA_GRED_STAB,
0291        TCA_GRED_DPS,
0292        TCA_GRED_MAX_P,
0293        TCA_GRED_LIMIT,
0294        TCA_GRED_VQ_LIST,    /* nested TCA_GRED_VQ_ENTRY */
0295        __TCA_GRED_MAX,
0296 };
0297 
0298 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
0299 
0300 enum {
0301     TCA_GRED_VQ_ENTRY_UNSPEC,
0302     TCA_GRED_VQ_ENTRY,  /* nested TCA_GRED_VQ_* */
0303     __TCA_GRED_VQ_ENTRY_MAX,
0304 };
0305 #define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
0306 
0307 enum {
0308     TCA_GRED_VQ_UNSPEC,
0309     TCA_GRED_VQ_PAD,
0310     TCA_GRED_VQ_DP,         /* u32 */
0311     TCA_GRED_VQ_STAT_BYTES,     /* u64 */
0312     TCA_GRED_VQ_STAT_PACKETS,   /* u32 */
0313     TCA_GRED_VQ_STAT_BACKLOG,   /* u32 */
0314     TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
0315     TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
0316     TCA_GRED_VQ_STAT_FORCED_DROP,   /* u32 */
0317     TCA_GRED_VQ_STAT_FORCED_MARK,   /* u32 */
0318     TCA_GRED_VQ_STAT_PDROP,     /* u32 */
0319     TCA_GRED_VQ_STAT_OTHER,     /* u32 */
0320     TCA_GRED_VQ_FLAGS,      /* u32 */
0321     __TCA_GRED_VQ_MAX
0322 };
0323 
0324 #define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
0325 
0326 struct tc_gred_qopt {
0327     __u32       limit;        /* HARD maximal queue length (bytes)    */
0328     __u32       qth_min;      /* Min average length threshold (bytes) */
0329     __u32       qth_max;      /* Max average length threshold (bytes) */
0330     __u32       DP;           /* up to 2^32 DPs */
0331     __u32       backlog;
0332     __u32       qave;
0333     __u32       forced;
0334     __u32       early;
0335     __u32       other;
0336     __u32       pdrop;
0337     __u8        Wlog;         /* log(W)               */
0338     __u8        Plog;         /* log(P_max/(qth_max-qth_min)) */
0339     __u8        Scell_log;    /* cell size for idle damping */
0340     __u8        prio;         /* prio of this VQ */
0341     __u32       packets;
0342     __u32       bytesin;
0343 };
0344 
0345 /* gred setup */
0346 struct tc_gred_sopt {
0347     __u32       DPs;
0348     __u32       def_DP;
0349     __u8        grio;
0350     __u8        flags;
0351     __u16       pad1;
0352 };
0353 
0354 /* CHOKe section */
0355 
0356 enum {
0357     TCA_CHOKE_UNSPEC,
0358     TCA_CHOKE_PARMS,
0359     TCA_CHOKE_STAB,
0360     TCA_CHOKE_MAX_P,
0361     __TCA_CHOKE_MAX,
0362 };
0363 
0364 #define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
0365 
0366 struct tc_choke_qopt {
0367     __u32       limit;      /* Hard queue length (packets)  */
0368     __u32       qth_min;    /* Min average threshold (packets) */
0369     __u32       qth_max;    /* Max average threshold (packets) */
0370     unsigned char   Wlog;       /* log(W)       */
0371     unsigned char   Plog;       /* log(P_max/(qth_max-qth_min)) */
0372     unsigned char   Scell_log;  /* cell size for idle damping */
0373     unsigned char   flags;      /* see RED flags */
0374 };
0375 
0376 struct tc_choke_xstats {
0377     __u32       early;          /* Early drops */
0378     __u32       pdrop;          /* Drops due to queue limits */
0379     __u32       other;          /* Drops due to drop() calls */
0380     __u32       marked;         /* Marked packets */
0381     __u32       matched;    /* Drops due to flow match */
0382 };
0383 
0384 /* HTB section */
0385 #define TC_HTB_NUMPRIO      8
0386 #define TC_HTB_MAXDEPTH     8
0387 #define TC_HTB_PROTOVER     3 /* the same as HTB and TC's major */
0388 
0389 struct tc_htb_opt {
0390     struct tc_ratespec  rate;
0391     struct tc_ratespec  ceil;
0392     __u32   buffer;
0393     __u32   cbuffer;
0394     __u32   quantum;
0395     __u32   level;      /* out only */
0396     __u32   prio;
0397 };
0398 struct tc_htb_glob {
0399     __u32 version;      /* to match HTB/TC */
0400         __u32 rate2quantum; /* bps->quantum divisor */
0401         __u32 defcls;       /* default class number */
0402     __u32 debug;        /* debug flags */
0403 
0404     /* stats */
0405     __u32 direct_pkts; /* count of non shaped packets */
0406 };
0407 enum {
0408     TCA_HTB_UNSPEC,
0409     TCA_HTB_PARMS,
0410     TCA_HTB_INIT,
0411     TCA_HTB_CTAB,
0412     TCA_HTB_RTAB,
0413     TCA_HTB_DIRECT_QLEN,
0414     TCA_HTB_RATE64,
0415     TCA_HTB_CEIL64,
0416     TCA_HTB_PAD,
0417     TCA_HTB_OFFLOAD,
0418     __TCA_HTB_MAX,
0419 };
0420 
0421 #define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
0422 
0423 struct tc_htb_xstats {
0424     __u32 lends;
0425     __u32 borrows;
0426     __u32 giants;   /* unused since 'Make HTB scheduler work with TSO.' */
0427     __s32 tokens;
0428     __s32 ctokens;
0429 };
0430 
0431 /* HFSC section */
0432 
0433 struct tc_hfsc_qopt {
0434     __u16   defcls;     /* default class */
0435 };
0436 
0437 struct tc_service_curve {
0438     __u32   m1;     /* slope of the first segment in bps */
0439     __u32   d;      /* x-projection of the first segment in us */
0440     __u32   m2;     /* slope of the second segment in bps */
0441 };
0442 
0443 struct tc_hfsc_stats {
0444     __u64   work;       /* total work done */
0445     __u64   rtwork;     /* work done by real-time criteria */
0446     __u32   period;     /* current period */
0447     __u32   level;      /* class level in hierarchy */
0448 };
0449 
0450 enum {
0451     TCA_HFSC_UNSPEC,
0452     TCA_HFSC_RSC,
0453     TCA_HFSC_FSC,
0454     TCA_HFSC_USC,
0455     __TCA_HFSC_MAX,
0456 };
0457 
0458 #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
0459 
0460 
0461 /* CBQ section */
0462 
0463 #define TC_CBQ_MAXPRIO      8
0464 #define TC_CBQ_MAXLEVEL     8
0465 #define TC_CBQ_DEF_EWMA     5
0466 
0467 struct tc_cbq_lssopt {
0468     unsigned char   change;
0469     unsigned char   flags;
0470 #define TCF_CBQ_LSS_BOUNDED 1
0471 #define TCF_CBQ_LSS_ISOLATED    2
0472     unsigned char   ewma_log;
0473     unsigned char   level;
0474 #define TCF_CBQ_LSS_FLAGS   1
0475 #define TCF_CBQ_LSS_EWMA    2
0476 #define TCF_CBQ_LSS_MAXIDLE 4
0477 #define TCF_CBQ_LSS_MINIDLE 8
0478 #define TCF_CBQ_LSS_OFFTIME 0x10
0479 #define TCF_CBQ_LSS_AVPKT   0x20
0480     __u32       maxidle;
0481     __u32       minidle;
0482     __u32       offtime;
0483     __u32       avpkt;
0484 };
0485 
0486 struct tc_cbq_wrropt {
0487     unsigned char   flags;
0488     unsigned char   priority;
0489     unsigned char   cpriority;
0490     unsigned char   __reserved;
0491     __u32       allot;
0492     __u32       weight;
0493 };
0494 
0495 struct tc_cbq_ovl {
0496     unsigned char   strategy;
0497 #define TC_CBQ_OVL_CLASSIC  0
0498 #define TC_CBQ_OVL_DELAY    1
0499 #define TC_CBQ_OVL_LOWPRIO  2
0500 #define TC_CBQ_OVL_DROP     3
0501 #define TC_CBQ_OVL_RCLASSIC 4
0502     unsigned char   priority2;
0503     __u16       pad;
0504     __u32       penalty;
0505 };
0506 
0507 struct tc_cbq_police {
0508     unsigned char   police;
0509     unsigned char   __res1;
0510     unsigned short  __res2;
0511 };
0512 
0513 struct tc_cbq_fopt {
0514     __u32       split;
0515     __u32       defmap;
0516     __u32       defchange;
0517 };
0518 
0519 struct tc_cbq_xstats {
0520     __u32       borrows;
0521     __u32       overactions;
0522     __s32       avgidle;
0523     __s32       undertime;
0524 };
0525 
0526 enum {
0527     TCA_CBQ_UNSPEC,
0528     TCA_CBQ_LSSOPT,
0529     TCA_CBQ_WRROPT,
0530     TCA_CBQ_FOPT,
0531     TCA_CBQ_OVL_STRATEGY,
0532     TCA_CBQ_RATE,
0533     TCA_CBQ_RTAB,
0534     TCA_CBQ_POLICE,
0535     __TCA_CBQ_MAX,
0536 };
0537 
0538 #define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
0539 
0540 /* dsmark section */
0541 
0542 enum {
0543     TCA_DSMARK_UNSPEC,
0544     TCA_DSMARK_INDICES,
0545     TCA_DSMARK_DEFAULT_INDEX,
0546     TCA_DSMARK_SET_TC_INDEX,
0547     TCA_DSMARK_MASK,
0548     TCA_DSMARK_VALUE,
0549     __TCA_DSMARK_MAX,
0550 };
0551 
0552 #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
0553 
0554 /* ATM  section */
0555 
0556 enum {
0557     TCA_ATM_UNSPEC,
0558     TCA_ATM_FD,     /* file/socket descriptor */
0559     TCA_ATM_PTR,        /* pointer to descriptor - later */
0560     TCA_ATM_HDR,        /* LL header */
0561     TCA_ATM_EXCESS,     /* excess traffic class (0 for CLP)  */
0562     TCA_ATM_ADDR,       /* PVC address (for output only) */
0563     TCA_ATM_STATE,      /* VC state (ATM_VS_*; for output only) */
0564     __TCA_ATM_MAX,
0565 };
0566 
0567 #define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
0568 
0569 /* Network emulator */
0570 
0571 enum {
0572     TCA_NETEM_UNSPEC,
0573     TCA_NETEM_CORR,
0574     TCA_NETEM_DELAY_DIST,
0575     TCA_NETEM_REORDER,
0576     TCA_NETEM_CORRUPT,
0577     TCA_NETEM_LOSS,
0578     TCA_NETEM_RATE,
0579     TCA_NETEM_ECN,
0580     TCA_NETEM_RATE64,
0581     TCA_NETEM_PAD,
0582     TCA_NETEM_LATENCY64,
0583     TCA_NETEM_JITTER64,
0584     TCA_NETEM_SLOT,
0585     TCA_NETEM_SLOT_DIST,
0586     __TCA_NETEM_MAX,
0587 };
0588 
0589 #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
0590 
0591 struct tc_netem_qopt {
0592     __u32   latency;    /* added delay (us) */
0593     __u32   limit;      /* fifo limit (packets) */
0594     __u32   loss;       /* random packet loss (0=none ~0=100%) */
0595     __u32   gap;        /* re-ordering gap (0 for none) */
0596     __u32   duplicate;  /* random packet dup  (0=none ~0=100%) */
0597     __u32   jitter;     /* random jitter in latency (us) */
0598 };
0599 
0600 struct tc_netem_corr {
0601     __u32   delay_corr; /* delay correlation */
0602     __u32   loss_corr;  /* packet loss correlation */
0603     __u32   dup_corr;   /* duplicate correlation  */
0604 };
0605 
0606 struct tc_netem_reorder {
0607     __u32   probability;
0608     __u32   correlation;
0609 };
0610 
0611 struct tc_netem_corrupt {
0612     __u32   probability;
0613     __u32   correlation;
0614 };
0615 
0616 struct tc_netem_rate {
0617     __u32   rate;   /* byte/s */
0618     __s32   packet_overhead;
0619     __u32   cell_size;
0620     __s32   cell_overhead;
0621 };
0622 
0623 struct tc_netem_slot {
0624     __s64   min_delay; /* nsec */
0625     __s64   max_delay;
0626     __s32   max_packets;
0627     __s32   max_bytes;
0628     __s64   dist_delay; /* nsec */
0629     __s64   dist_jitter; /* nsec */
0630 };
0631 
0632 enum {
0633     NETEM_LOSS_UNSPEC,
0634     NETEM_LOSS_GI,      /* General Intuitive - 4 state model */
0635     NETEM_LOSS_GE,      /* Gilbert Elliot models */
0636     __NETEM_LOSS_MAX
0637 };
0638 #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
0639 
0640 /* State transition probabilities for 4 state model */
0641 struct tc_netem_gimodel {
0642     __u32   p13;
0643     __u32   p31;
0644     __u32   p32;
0645     __u32   p14;
0646     __u32   p23;
0647 };
0648 
0649 /* Gilbert-Elliot models */
0650 struct tc_netem_gemodel {
0651     __u32 p;
0652     __u32 r;
0653     __u32 h;
0654     __u32 k1;
0655 };
0656 
0657 #define NETEM_DIST_SCALE    8192
0658 #define NETEM_DIST_MAX      16384
0659 
0660 /* DRR */
0661 
0662 enum {
0663     TCA_DRR_UNSPEC,
0664     TCA_DRR_QUANTUM,
0665     __TCA_DRR_MAX
0666 };
0667 
0668 #define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
0669 
0670 struct tc_drr_stats {
0671     __u32   deficit;
0672 };
0673 
0674 /* MQPRIO */
0675 #define TC_QOPT_BITMASK 15
0676 #define TC_QOPT_MAX_QUEUE 16
0677 
0678 enum {
0679     TC_MQPRIO_HW_OFFLOAD_NONE,  /* no offload requested */
0680     TC_MQPRIO_HW_OFFLOAD_TCS,   /* offload TCs, no queue counts */
0681     __TC_MQPRIO_HW_OFFLOAD_MAX
0682 };
0683 
0684 #define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
0685 
0686 enum {
0687     TC_MQPRIO_MODE_DCB,
0688     TC_MQPRIO_MODE_CHANNEL,
0689     __TC_MQPRIO_MODE_MAX
0690 };
0691 
0692 #define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
0693 
0694 enum {
0695     TC_MQPRIO_SHAPER_DCB,
0696     TC_MQPRIO_SHAPER_BW_RATE,   /* Add new shapers below */
0697     __TC_MQPRIO_SHAPER_MAX
0698 };
0699 
0700 #define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
0701 
0702 struct tc_mqprio_qopt {
0703     __u8    num_tc;
0704     __u8    prio_tc_map[TC_QOPT_BITMASK + 1];
0705     __u8    hw;
0706     __u16   count[TC_QOPT_MAX_QUEUE];
0707     __u16   offset[TC_QOPT_MAX_QUEUE];
0708 };
0709 
0710 #define TC_MQPRIO_F_MODE        0x1
0711 #define TC_MQPRIO_F_SHAPER      0x2
0712 #define TC_MQPRIO_F_MIN_RATE        0x4
0713 #define TC_MQPRIO_F_MAX_RATE        0x8
0714 
0715 enum {
0716     TCA_MQPRIO_UNSPEC,
0717     TCA_MQPRIO_MODE,
0718     TCA_MQPRIO_SHAPER,
0719     TCA_MQPRIO_MIN_RATE64,
0720     TCA_MQPRIO_MAX_RATE64,
0721     __TCA_MQPRIO_MAX,
0722 };
0723 
0724 #define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
0725 
0726 /* SFB */
0727 
0728 enum {
0729     TCA_SFB_UNSPEC,
0730     TCA_SFB_PARMS,
0731     __TCA_SFB_MAX,
0732 };
0733 
0734 #define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
0735 
0736 /*
0737  * Note: increment, decrement are Q0.16 fixed-point values.
0738  */
0739 struct tc_sfb_qopt {
0740     __u32 rehash_interval;  /* delay between hash move, in ms */
0741     __u32 warmup_time;  /* double buffering warmup time in ms (warmup_time < rehash_interval) */
0742     __u32 max;      /* max len of qlen_min */
0743     __u32 bin_size;     /* maximum queue length per bin */
0744     __u32 increment;    /* probability increment, (d1 in Blue) */
0745     __u32 decrement;    /* probability decrement, (d2 in Blue) */
0746     __u32 limit;        /* max SFB queue length */
0747     __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
0748     __u32 penalty_burst;
0749 };
0750 
0751 struct tc_sfb_xstats {
0752     __u32 earlydrop;
0753     __u32 penaltydrop;
0754     __u32 bucketdrop;
0755     __u32 queuedrop;
0756     __u32 childdrop; /* drops in child qdisc */
0757     __u32 marked;
0758     __u32 maxqlen;
0759     __u32 maxprob;
0760     __u32 avgprob;
0761 };
0762 
0763 #define SFB_MAX_PROB 0xFFFF
0764 
0765 /* QFQ */
0766 enum {
0767     TCA_QFQ_UNSPEC,
0768     TCA_QFQ_WEIGHT,
0769     TCA_QFQ_LMAX,
0770     __TCA_QFQ_MAX
0771 };
0772 
0773 #define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
0774 
0775 struct tc_qfq_stats {
0776     __u32 weight;
0777     __u32 lmax;
0778 };
0779 
0780 /* CODEL */
0781 
0782 enum {
0783     TCA_CODEL_UNSPEC,
0784     TCA_CODEL_TARGET,
0785     TCA_CODEL_LIMIT,
0786     TCA_CODEL_INTERVAL,
0787     TCA_CODEL_ECN,
0788     TCA_CODEL_CE_THRESHOLD,
0789     __TCA_CODEL_MAX
0790 };
0791 
0792 #define TCA_CODEL_MAX   (__TCA_CODEL_MAX - 1)
0793 
0794 struct tc_codel_xstats {
0795     __u32   maxpacket; /* largest packet we've seen so far */
0796     __u32   count;     /* how many drops we've done since the last time we
0797                 * entered dropping state
0798                 */
0799     __u32   lastcount; /* count at entry to dropping state */
0800     __u32   ldelay;    /* in-queue delay seen by most recently dequeued packet */
0801     __s32   drop_next; /* time to drop next packet */
0802     __u32   drop_overlimit; /* number of time max qdisc packet limit was hit */
0803     __u32   ecn_mark;  /* number of packets we ECN marked instead of dropped */
0804     __u32   dropping;  /* are we in dropping state ? */
0805     __u32   ce_mark;   /* number of CE marked packets because of ce_threshold */
0806 };
0807 
0808 /* FQ_CODEL */
0809 
0810 enum {
0811     TCA_FQ_CODEL_UNSPEC,
0812     TCA_FQ_CODEL_TARGET,
0813     TCA_FQ_CODEL_LIMIT,
0814     TCA_FQ_CODEL_INTERVAL,
0815     TCA_FQ_CODEL_ECN,
0816     TCA_FQ_CODEL_FLOWS,
0817     TCA_FQ_CODEL_QUANTUM,
0818     TCA_FQ_CODEL_CE_THRESHOLD,
0819     TCA_FQ_CODEL_DROP_BATCH_SIZE,
0820     TCA_FQ_CODEL_MEMORY_LIMIT,
0821     __TCA_FQ_CODEL_MAX
0822 };
0823 
0824 #define TCA_FQ_CODEL_MAX    (__TCA_FQ_CODEL_MAX - 1)
0825 
0826 enum {
0827     TCA_FQ_CODEL_XSTATS_QDISC,
0828     TCA_FQ_CODEL_XSTATS_CLASS,
0829 };
0830 
0831 struct tc_fq_codel_qd_stats {
0832     __u32   maxpacket;  /* largest packet we've seen so far */
0833     __u32   drop_overlimit; /* number of time max qdisc
0834                  * packet limit was hit
0835                  */
0836     __u32   ecn_mark;   /* number of packets we ECN marked
0837                  * instead of being dropped
0838                  */
0839     __u32   new_flow_count; /* number of time packets
0840                  * created a 'new flow'
0841                  */
0842     __u32   new_flows_len;  /* count of flows in new list */
0843     __u32   old_flows_len;  /* count of flows in old list */
0844     __u32   ce_mark;    /* packets above ce_threshold */
0845     __u32   memory_usage;   /* in bytes */
0846     __u32   drop_overmemory;
0847 };
0848 
0849 struct tc_fq_codel_cl_stats {
0850     __s32   deficit;
0851     __u32   ldelay;     /* in-queue delay seen by most recently
0852                  * dequeued packet
0853                  */
0854     __u32   count;
0855     __u32   lastcount;
0856     __u32   dropping;
0857     __s32   drop_next;
0858 };
0859 
0860 struct tc_fq_codel_xstats {
0861     __u32   type;
0862     union {
0863         struct tc_fq_codel_qd_stats qdisc_stats;
0864         struct tc_fq_codel_cl_stats class_stats;
0865     };
0866 };
0867 
0868 /* FQ */
0869 
0870 enum {
0871     TCA_FQ_UNSPEC,
0872 
0873     TCA_FQ_PLIMIT,      /* limit of total number of packets in queue */
0874 
0875     TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
0876 
0877     TCA_FQ_QUANTUM,     /* RR quantum */
0878 
0879     TCA_FQ_INITIAL_QUANTUM,     /* RR quantum for new flow */
0880 
0881     TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
0882 
0883     TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
0884 
0885     TCA_FQ_FLOW_MAX_RATE,   /* per flow max rate */
0886 
0887     TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
0888 
0889     TCA_FQ_FLOW_REFILL_DELAY,   /* flow credit refill delay in usec */
0890 
0891     TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
0892 
0893     TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
0894 
0895     TCA_FQ_CE_THRESHOLD,    /* DCTCP-like CE-marking threshold */
0896 
0897     __TCA_FQ_MAX
0898 };
0899 
0900 #define TCA_FQ_MAX  (__TCA_FQ_MAX - 1)
0901 
0902 struct tc_fq_qd_stats {
0903     __u64   gc_flows;
0904     __u64   highprio_packets;
0905     __u64   tcp_retrans;
0906     __u64   throttled;
0907     __u64   flows_plimit;
0908     __u64   pkts_too_long;
0909     __u64   allocation_errors;
0910     __s64   time_next_delayed_flow;
0911     __u32   flows;
0912     __u32   inactive_flows;
0913     __u32   throttled_flows;
0914     __u32   unthrottle_latency_ns;
0915     __u64   ce_mark;        /* packets above ce_threshold */
0916 };
0917 
0918 /* Heavy-Hitter Filter */
0919 
0920 enum {
0921     TCA_HHF_UNSPEC,
0922     TCA_HHF_BACKLOG_LIMIT,
0923     TCA_HHF_QUANTUM,
0924     TCA_HHF_HH_FLOWS_LIMIT,
0925     TCA_HHF_RESET_TIMEOUT,
0926     TCA_HHF_ADMIT_BYTES,
0927     TCA_HHF_EVICT_TIMEOUT,
0928     TCA_HHF_NON_HH_WEIGHT,
0929     __TCA_HHF_MAX
0930 };
0931 
0932 #define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
0933 
0934 struct tc_hhf_xstats {
0935     __u32   drop_overlimit; /* number of times max qdisc packet limit
0936                  * was hit
0937                  */
0938     __u32   hh_overlimit;   /* number of times max heavy-hitters was hit */
0939     __u32   hh_tot_count;   /* number of captured heavy-hitters so far */
0940     __u32   hh_cur_count;   /* number of current heavy-hitters */
0941 };
0942 
0943 /* PIE */
0944 enum {
0945     TCA_PIE_UNSPEC,
0946     TCA_PIE_TARGET,
0947     TCA_PIE_LIMIT,
0948     TCA_PIE_TUPDATE,
0949     TCA_PIE_ALPHA,
0950     TCA_PIE_BETA,
0951     TCA_PIE_ECN,
0952     TCA_PIE_BYTEMODE,
0953     __TCA_PIE_MAX
0954 };
0955 #define TCA_PIE_MAX   (__TCA_PIE_MAX - 1)
0956 
0957 struct tc_pie_xstats {
0958     __u32 prob;             /* current probability */
0959     __u32 delay;            /* current delay in ms */
0960     __u32 avg_dq_rate;      /* current average dq_rate in bits/pie_time */
0961     __u32 packets_in;       /* total number of packets enqueued */
0962     __u32 dropped;          /* packets dropped due to pie_action */
0963     __u32 overlimit;        /* dropped due to lack of space in queue */
0964     __u32 maxq;             /* maximum queue size */
0965     __u32 ecn_mark;         /* packets marked with ecn*/
0966 };
0967 
0968 /* CBS */
0969 struct tc_cbs_qopt {
0970     __u8 offload;
0971     __u8 _pad[3];
0972     __s32 hicredit;
0973     __s32 locredit;
0974     __s32 idleslope;
0975     __s32 sendslope;
0976 };
0977 
0978 enum {
0979     TCA_CBS_UNSPEC,
0980     TCA_CBS_PARMS,
0981     __TCA_CBS_MAX,
0982 };
0983 
0984 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
0985 
0986 
0987 /* ETF */
0988 struct tc_etf_qopt {
0989     __s32 delta;
0990     __s32 clockid;
0991     __u32 flags;
0992 #define TC_ETF_DEADLINE_MODE_ON BIT(0)
0993 #define TC_ETF_OFFLOAD_ON   BIT(1)
0994 };
0995 
0996 enum {
0997     TCA_ETF_UNSPEC,
0998     TCA_ETF_PARMS,
0999     __TCA_ETF_MAX,
1000 };
1001 
1002 #define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
1003 
1004 
1005 /* CAKE */
1006 enum {
1007     TCA_CAKE_UNSPEC,
1008     TCA_CAKE_PAD,
1009     TCA_CAKE_BASE_RATE64,
1010     TCA_CAKE_DIFFSERV_MODE,
1011     TCA_CAKE_ATM,
1012     TCA_CAKE_FLOW_MODE,
1013     TCA_CAKE_OVERHEAD,
1014     TCA_CAKE_RTT,
1015     TCA_CAKE_TARGET,
1016     TCA_CAKE_AUTORATE,
1017     TCA_CAKE_MEMORY,
1018     TCA_CAKE_NAT,
1019     TCA_CAKE_RAW,
1020     TCA_CAKE_WASH,
1021     TCA_CAKE_MPU,
1022     TCA_CAKE_INGRESS,
1023     TCA_CAKE_ACK_FILTER,
1024     TCA_CAKE_SPLIT_GSO,
1025     __TCA_CAKE_MAX
1026 };
1027 #define TCA_CAKE_MAX    (__TCA_CAKE_MAX - 1)
1028 
1029 enum {
1030     __TCA_CAKE_STATS_INVALID,
1031     TCA_CAKE_STATS_PAD,
1032     TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
1033     TCA_CAKE_STATS_MEMORY_LIMIT,
1034     TCA_CAKE_STATS_MEMORY_USED,
1035     TCA_CAKE_STATS_AVG_NETOFF,
1036     TCA_CAKE_STATS_MIN_NETLEN,
1037     TCA_CAKE_STATS_MAX_NETLEN,
1038     TCA_CAKE_STATS_MIN_ADJLEN,
1039     TCA_CAKE_STATS_MAX_ADJLEN,
1040     TCA_CAKE_STATS_TIN_STATS,
1041     TCA_CAKE_STATS_DEFICIT,
1042     TCA_CAKE_STATS_COBALT_COUNT,
1043     TCA_CAKE_STATS_DROPPING,
1044     TCA_CAKE_STATS_DROP_NEXT_US,
1045     TCA_CAKE_STATS_P_DROP,
1046     TCA_CAKE_STATS_BLUE_TIMER_US,
1047     __TCA_CAKE_STATS_MAX
1048 };
1049 #define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
1050 
1051 enum {
1052     __TCA_CAKE_TIN_STATS_INVALID,
1053     TCA_CAKE_TIN_STATS_PAD,
1054     TCA_CAKE_TIN_STATS_SENT_PACKETS,
1055     TCA_CAKE_TIN_STATS_SENT_BYTES64,
1056     TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
1057     TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
1058     TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
1059     TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
1060     TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
1061     TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
1062     TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
1063     TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
1064     TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
1065     TCA_CAKE_TIN_STATS_TARGET_US,
1066     TCA_CAKE_TIN_STATS_INTERVAL_US,
1067     TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
1068     TCA_CAKE_TIN_STATS_WAY_MISSES,
1069     TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
1070     TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
1071     TCA_CAKE_TIN_STATS_AVG_DELAY_US,
1072     TCA_CAKE_TIN_STATS_BASE_DELAY_US,
1073     TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
1074     TCA_CAKE_TIN_STATS_BULK_FLOWS,
1075     TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
1076     TCA_CAKE_TIN_STATS_MAX_SKBLEN,
1077     TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
1078     __TCA_CAKE_TIN_STATS_MAX
1079 };
1080 #define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
1081 #define TC_CAKE_MAX_TINS (8)
1082 
1083 enum {
1084     CAKE_FLOW_NONE = 0,
1085     CAKE_FLOW_SRC_IP,
1086     CAKE_FLOW_DST_IP,
1087     CAKE_FLOW_HOSTS,    /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
1088     CAKE_FLOW_FLOWS,
1089     CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
1090     CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
1091     CAKE_FLOW_TRIPLE,   /* = CAKE_FLOW_HOSTS  | CAKE_FLOW_FLOWS */
1092     CAKE_FLOW_MAX,
1093 };
1094 
1095 enum {
1096     CAKE_DIFFSERV_DIFFSERV3 = 0,
1097     CAKE_DIFFSERV_DIFFSERV4,
1098     CAKE_DIFFSERV_DIFFSERV8,
1099     CAKE_DIFFSERV_BESTEFFORT,
1100     CAKE_DIFFSERV_PRECEDENCE,
1101     CAKE_DIFFSERV_MAX
1102 };
1103 
1104 enum {
1105     CAKE_ACK_NONE = 0,
1106     CAKE_ACK_FILTER,
1107     CAKE_ACK_AGGRESSIVE,
1108     CAKE_ACK_MAX
1109 };
1110 
1111 enum {
1112     CAKE_ATM_NONE = 0,
1113     CAKE_ATM_ATM,
1114     CAKE_ATM_PTM,
1115     CAKE_ATM_MAX
1116 };
1117 
1118 
1119 /* TAPRIO */
1120 enum {
1121     TC_TAPRIO_CMD_SET_GATES = 0x00,
1122     TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
1123     TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
1124 };
1125 
1126 enum {
1127     TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
1128     TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
1129     TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
1130     TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
1131     TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
1132     __TCA_TAPRIO_SCHED_ENTRY_MAX,
1133 };
1134 #define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
1135 
1136 /* The format for schedule entry list is:
1137  * [TCA_TAPRIO_SCHED_ENTRY_LIST]
1138  *   [TCA_TAPRIO_SCHED_ENTRY]
1139  *     [TCA_TAPRIO_SCHED_ENTRY_CMD]
1140  *     [TCA_TAPRIO_SCHED_ENTRY_GATES]
1141  *     [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
1142  */
1143 enum {
1144     TCA_TAPRIO_SCHED_UNSPEC,
1145     TCA_TAPRIO_SCHED_ENTRY,
1146     __TCA_TAPRIO_SCHED_MAX,
1147 };
1148 
1149 #define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
1150 
1151 enum {
1152     TCA_TAPRIO_ATTR_UNSPEC,
1153     TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
1154     TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
1155     TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
1156     TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
1157     TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
1158     TCA_TAPRIO_PAD,
1159     __TCA_TAPRIO_ATTR_MAX,
1160 };
1161 
1162 #define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
1163 
1164 #endif