Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (C) 2013 Cisco Systems, Inc, 2013.
0003  *
0004  * Author: Vijay Subramanian <vijaynsu@cisco.com>
0005  * Author: Mythili Prabhu <mysuryan@cisco.com>
0006  *
0007  * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
0008  * University of Oslo, Norway.
0009  *
0010  * References:
0011  * RFC 8033: https://tools.ietf.org/html/rfc8033
0012  */
0013 
0014 #include <linux/module.h>
0015 #include <linux/slab.h>
0016 #include <linux/types.h>
0017 #include <linux/kernel.h>
0018 #include <linux/errno.h>
0019 #include <linux/skbuff.h>
0020 #include <net/pkt_sched.h>
0021 #include <net/inet_ecn.h>
0022 #include <net/pie.h>
0023 
0024 /* private data for the Qdisc */
0025 struct pie_sched_data {
0026     struct pie_vars vars;
0027     struct pie_params params;
0028     struct pie_stats stats;
0029     struct timer_list adapt_timer;
0030     struct Qdisc *sch;
0031 };
0032 
0033 bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
0034             struct pie_vars *vars, u32 backlog, u32 packet_size)
0035 {
0036     u64 rnd;
0037     u64 local_prob = vars->prob;
0038     u32 mtu = psched_mtu(qdisc_dev(sch));
0039 
0040     /* If there is still burst allowance left skip random early drop */
0041     if (vars->burst_time > 0)
0042         return false;
0043 
0044     /* If current delay is less than half of target, and
0045      * if drop prob is low already, disable early_drop
0046      */
0047     if ((vars->qdelay < params->target / 2) &&
0048         (vars->prob < MAX_PROB / 5))
0049         return false;
0050 
0051     /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
0052      * similar to min_th in RED
0053      */
0054     if (backlog < 2 * mtu)
0055         return false;
0056 
0057     /* If bytemode is turned on, use packet size to compute new
0058      * probablity. Smaller packets will have lower drop prob in this case
0059      */
0060     if (params->bytemode && packet_size <= mtu)
0061         local_prob = (u64)packet_size * div_u64(local_prob, mtu);
0062     else
0063         local_prob = vars->prob;
0064 
0065     if (local_prob == 0)
0066         vars->accu_prob = 0;
0067     else
0068         vars->accu_prob += local_prob;
0069 
0070     if (vars->accu_prob < (MAX_PROB / 100) * 85)
0071         return false;
0072     if (vars->accu_prob >= (MAX_PROB / 2) * 17)
0073         return true;
0074 
0075     prandom_bytes(&rnd, 8);
0076     if ((rnd >> BITS_PER_BYTE) < local_prob) {
0077         vars->accu_prob = 0;
0078         return true;
0079     }
0080 
0081     return false;
0082 }
0083 EXPORT_SYMBOL_GPL(pie_drop_early);
0084 
0085 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
0086                  struct sk_buff **to_free)
0087 {
0088     struct pie_sched_data *q = qdisc_priv(sch);
0089     bool enqueue = false;
0090 
0091     if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
0092         q->stats.overlimit++;
0093         goto out;
0094     }
0095 
0096     if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
0097                 skb->len)) {
0098         enqueue = true;
0099     } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
0100            INET_ECN_set_ce(skb)) {
0101         /* If packet is ecn capable, mark it if drop probability
0102          * is lower than 10%, else drop it.
0103          */
0104         q->stats.ecn_mark++;
0105         enqueue = true;
0106     }
0107 
0108     /* we can enqueue the packet */
0109     if (enqueue) {
0110         /* Set enqueue time only when dq_rate_estimator is disabled. */
0111         if (!q->params.dq_rate_estimator)
0112             pie_set_enqueue_time(skb);
0113 
0114         q->stats.packets_in++;
0115         if (qdisc_qlen(sch) > q->stats.maxq)
0116             q->stats.maxq = qdisc_qlen(sch);
0117 
0118         return qdisc_enqueue_tail(skb, sch);
0119     }
0120 
0121 out:
0122     q->stats.dropped++;
0123     q->vars.accu_prob = 0;
0124     return qdisc_drop(skb, sch, to_free);
0125 }
0126 
0127 static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
0128     [TCA_PIE_TARGET]        = {.type = NLA_U32},
0129     [TCA_PIE_LIMIT]         = {.type = NLA_U32},
0130     [TCA_PIE_TUPDATE]       = {.type = NLA_U32},
0131     [TCA_PIE_ALPHA]         = {.type = NLA_U32},
0132     [TCA_PIE_BETA]          = {.type = NLA_U32},
0133     [TCA_PIE_ECN]           = {.type = NLA_U32},
0134     [TCA_PIE_BYTEMODE]      = {.type = NLA_U32},
0135     [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
0136 };
0137 
0138 static int pie_change(struct Qdisc *sch, struct nlattr *opt,
0139               struct netlink_ext_ack *extack)
0140 {
0141     struct pie_sched_data *q = qdisc_priv(sch);
0142     struct nlattr *tb[TCA_PIE_MAX + 1];
0143     unsigned int qlen, dropped = 0;
0144     int err;
0145 
0146     if (!opt)
0147         return -EINVAL;
0148 
0149     err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
0150                       NULL);
0151     if (err < 0)
0152         return err;
0153 
0154     sch_tree_lock(sch);
0155 
0156     /* convert from microseconds to pschedtime */
0157     if (tb[TCA_PIE_TARGET]) {
0158         /* target is in us */
0159         u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
0160 
0161         /* convert to pschedtime */
0162         q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
0163     }
0164 
0165     /* tupdate is in jiffies */
0166     if (tb[TCA_PIE_TUPDATE])
0167         q->params.tupdate =
0168             usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
0169 
0170     if (tb[TCA_PIE_LIMIT]) {
0171         u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
0172 
0173         q->params.limit = limit;
0174         sch->limit = limit;
0175     }
0176 
0177     if (tb[TCA_PIE_ALPHA])
0178         q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
0179 
0180     if (tb[TCA_PIE_BETA])
0181         q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
0182 
0183     if (tb[TCA_PIE_ECN])
0184         q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
0185 
0186     if (tb[TCA_PIE_BYTEMODE])
0187         q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
0188 
0189     if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
0190         q->params.dq_rate_estimator =
0191                 nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]);
0192 
0193     /* Drop excess packets if new limit is lower */
0194     qlen = sch->q.qlen;
0195     while (sch->q.qlen > sch->limit) {
0196         struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
0197 
0198         dropped += qdisc_pkt_len(skb);
0199         qdisc_qstats_backlog_dec(sch, skb);
0200         rtnl_qdisc_drop(skb, sch);
0201     }
0202     qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
0203 
0204     sch_tree_unlock(sch);
0205     return 0;
0206 }
0207 
0208 void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
0209              struct pie_vars *vars, u32 backlog)
0210 {
0211     psched_time_t now = psched_get_time();
0212     u32 dtime = 0;
0213 
0214     /* If dq_rate_estimator is disabled, calculate qdelay using the
0215      * packet timestamp.
0216      */
0217     if (!params->dq_rate_estimator) {
0218         vars->qdelay = now - pie_get_enqueue_time(skb);
0219 
0220         if (vars->dq_tstamp != DTIME_INVALID)
0221             dtime = now - vars->dq_tstamp;
0222 
0223         vars->dq_tstamp = now;
0224 
0225         if (backlog == 0)
0226             vars->qdelay = 0;
0227 
0228         if (dtime == 0)
0229             return;
0230 
0231         goto burst_allowance_reduction;
0232     }
0233 
0234     /* If current queue is about 10 packets or more and dq_count is unset
0235      * we have enough packets to calculate the drain rate. Save
0236      * current time as dq_tstamp and start measurement cycle.
0237      */
0238     if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
0239         vars->dq_tstamp = psched_get_time();
0240         vars->dq_count = 0;
0241     }
0242 
0243     /* Calculate the average drain rate from this value. If queue length
0244      * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset
0245      * the dq_count to -1 as we don't have enough packets to calculate the
0246      * drain rate anymore. The following if block is entered only when we
0247      * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
0248      * and we calculate the drain rate for the threshold here.  dq_count is
0249      * in bytes, time difference in psched_time, hence rate is in
0250      * bytes/psched_time.
0251      */
0252     if (vars->dq_count != DQCOUNT_INVALID) {
0253         vars->dq_count += skb->len;
0254 
0255         if (vars->dq_count >= QUEUE_THRESHOLD) {
0256             u32 count = vars->dq_count << PIE_SCALE;
0257 
0258             dtime = now - vars->dq_tstamp;
0259 
0260             if (dtime == 0)
0261                 return;
0262 
0263             count = count / dtime;
0264 
0265             if (vars->avg_dq_rate == 0)
0266                 vars->avg_dq_rate = count;
0267             else
0268                 vars->avg_dq_rate =
0269                     (vars->avg_dq_rate -
0270                      (vars->avg_dq_rate >> 3)) + (count >> 3);
0271 
0272             /* If the queue has receded below the threshold, we hold
0273              * on to the last drain rate calculated, else we reset
0274              * dq_count to 0 to re-enter the if block when the next
0275              * packet is dequeued
0276              */
0277             if (backlog < QUEUE_THRESHOLD) {
0278                 vars->dq_count = DQCOUNT_INVALID;
0279             } else {
0280                 vars->dq_count = 0;
0281                 vars->dq_tstamp = psched_get_time();
0282             }
0283 
0284             goto burst_allowance_reduction;
0285         }
0286     }
0287 
0288     return;
0289 
0290 burst_allowance_reduction:
0291     if (vars->burst_time > 0) {
0292         if (vars->burst_time > dtime)
0293             vars->burst_time -= dtime;
0294         else
0295             vars->burst_time = 0;
0296     }
0297 }
0298 EXPORT_SYMBOL_GPL(pie_process_dequeue);
0299 
0300 void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
0301                    u32 backlog)
0302 {
0303     psched_time_t qdelay = 0;   /* in pschedtime */
0304     psched_time_t qdelay_old = 0;   /* in pschedtime */
0305     s64 delta = 0;      /* determines the change in probability */
0306     u64 oldprob;
0307     u64 alpha, beta;
0308     u32 power;
0309     bool update_prob = true;
0310 
0311     if (params->dq_rate_estimator) {
0312         qdelay_old = vars->qdelay;
0313         vars->qdelay_old = vars->qdelay;
0314 
0315         if (vars->avg_dq_rate > 0)
0316             qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate;
0317         else
0318             qdelay = 0;
0319     } else {
0320         qdelay = vars->qdelay;
0321         qdelay_old = vars->qdelay_old;
0322     }
0323 
0324     /* If qdelay is zero and backlog is not, it means backlog is very small,
0325      * so we do not update probabilty in this round.
0326      */
0327     if (qdelay == 0 && backlog != 0)
0328         update_prob = false;
0329 
0330     /* In the algorithm, alpha and beta are between 0 and 2 with typical
0331      * value for alpha as 0.125. In this implementation, we use values 0-32
0332      * passed from user space to represent this. Also, alpha and beta have
0333      * unit of HZ and need to be scaled before they can used to update
0334      * probability. alpha/beta are updated locally below by scaling down
0335      * by 16 to come to 0-2 range.
0336      */
0337     alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
0338     beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
0339 
0340     /* We scale alpha and beta differently depending on how heavy the
0341      * congestion is. Please see RFC 8033 for details.
0342      */
0343     if (vars->prob < MAX_PROB / 10) {
0344         alpha >>= 1;
0345         beta >>= 1;
0346 
0347         power = 100;
0348         while (vars->prob < div_u64(MAX_PROB, power) &&
0349                power <= 1000000) {
0350             alpha >>= 2;
0351             beta >>= 2;
0352             power *= 10;
0353         }
0354     }
0355 
0356     /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
0357     delta += alpha * (qdelay - params->target);
0358     delta += beta * (qdelay - qdelay_old);
0359 
0360     oldprob = vars->prob;
0361 
0362     /* to ensure we increase probability in steps of no more than 2% */
0363     if (delta > (s64)(MAX_PROB / (100 / 2)) &&
0364         vars->prob >= MAX_PROB / 10)
0365         delta = (MAX_PROB / 100) * 2;
0366 
0367     /* Non-linear drop:
0368      * Tune drop probability to increase quickly for high delays(>= 250ms)
0369      * 250ms is derived through experiments and provides error protection
0370      */
0371 
0372     if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
0373         delta += MAX_PROB / (100 / 2);
0374 
0375     vars->prob += delta;
0376 
0377     if (delta > 0) {
0378         /* prevent overflow */
0379         if (vars->prob < oldprob) {
0380             vars->prob = MAX_PROB;
0381             /* Prevent normalization error. If probability is at
0382              * maximum value already, we normalize it here, and
0383              * skip the check to do a non-linear drop in the next
0384              * section.
0385              */
0386             update_prob = false;
0387         }
0388     } else {
0389         /* prevent underflow */
0390         if (vars->prob > oldprob)
0391             vars->prob = 0;
0392     }
0393 
0394     /* Non-linear drop in probability: Reduce drop probability quickly if
0395      * delay is 0 for 2 consecutive Tupdate periods.
0396      */
0397 
0398     if (qdelay == 0 && qdelay_old == 0 && update_prob)
0399         /* Reduce drop probability to 98.4% */
0400         vars->prob -= vars->prob / 64;
0401 
0402     vars->qdelay = qdelay;
0403     vars->backlog_old = backlog;
0404 
0405     /* We restart the measurement cycle if the following conditions are met
0406      * 1. If the delay has been low for 2 consecutive Tupdate periods
0407      * 2. Calculated drop probability is zero
0408      * 3. If average dq_rate_estimator is enabled, we have at least one
0409      *    estimate for the avg_dq_rate ie., is a non-zero value
0410      */
0411     if ((vars->qdelay < params->target / 2) &&
0412         (vars->qdelay_old < params->target / 2) &&
0413         vars->prob == 0 &&
0414         (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) {
0415         pie_vars_init(vars);
0416     }
0417 
0418     if (!params->dq_rate_estimator)
0419         vars->qdelay_old = qdelay;
0420 }
0421 EXPORT_SYMBOL_GPL(pie_calculate_probability);
0422 
0423 static void pie_timer(struct timer_list *t)
0424 {
0425     struct pie_sched_data *q = from_timer(q, t, adapt_timer);
0426     struct Qdisc *sch = q->sch;
0427     spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
0428 
0429     spin_lock(root_lock);
0430     pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
0431 
0432     /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
0433     if (q->params.tupdate)
0434         mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
0435     spin_unlock(root_lock);
0436 }
0437 
0438 static int pie_init(struct Qdisc *sch, struct nlattr *opt,
0439             struct netlink_ext_ack *extack)
0440 {
0441     struct pie_sched_data *q = qdisc_priv(sch);
0442 
0443     pie_params_init(&q->params);
0444     pie_vars_init(&q->vars);
0445     sch->limit = q->params.limit;
0446 
0447     q->sch = sch;
0448     timer_setup(&q->adapt_timer, pie_timer, 0);
0449 
0450     if (opt) {
0451         int err = pie_change(sch, opt, extack);
0452 
0453         if (err)
0454             return err;
0455     }
0456 
0457     mod_timer(&q->adapt_timer, jiffies + HZ / 2);
0458     return 0;
0459 }
0460 
0461 static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
0462 {
0463     struct pie_sched_data *q = qdisc_priv(sch);
0464     struct nlattr *opts;
0465 
0466     opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
0467     if (!opts)
0468         goto nla_put_failure;
0469 
0470     /* convert target from pschedtime to us */
0471     if (nla_put_u32(skb, TCA_PIE_TARGET,
0472             ((u32)PSCHED_TICKS2NS(q->params.target)) /
0473             NSEC_PER_USEC) ||
0474         nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
0475         nla_put_u32(skb, TCA_PIE_TUPDATE,
0476             jiffies_to_usecs(q->params.tupdate)) ||
0477         nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
0478         nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
0479         nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
0480         nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode) ||
0481         nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
0482             q->params.dq_rate_estimator))
0483         goto nla_put_failure;
0484 
0485     return nla_nest_end(skb, opts);
0486 
0487 nla_put_failure:
0488     nla_nest_cancel(skb, opts);
0489     return -1;
0490 }
0491 
0492 static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
0493 {
0494     struct pie_sched_data *q = qdisc_priv(sch);
0495     struct tc_pie_xstats st = {
0496         .prob       = q->vars.prob << BITS_PER_BYTE,
0497         .delay      = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
0498                    NSEC_PER_USEC,
0499         .packets_in = q->stats.packets_in,
0500         .overlimit  = q->stats.overlimit,
0501         .maxq       = q->stats.maxq,
0502         .dropped    = q->stats.dropped,
0503         .ecn_mark   = q->stats.ecn_mark,
0504     };
0505 
0506     /* avg_dq_rate is only valid if dq_rate_estimator is enabled */
0507     st.dq_rate_estimating = q->params.dq_rate_estimator;
0508 
0509     /* unscale and return dq_rate in bytes per sec */
0510     if (q->params.dq_rate_estimator)
0511         st.avg_dq_rate = q->vars.avg_dq_rate *
0512                  (PSCHED_TICKS_PER_SEC) >> PIE_SCALE;
0513 
0514     return gnet_stats_copy_app(d, &st, sizeof(st));
0515 }
0516 
0517 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
0518 {
0519     struct pie_sched_data *q = qdisc_priv(sch);
0520     struct sk_buff *skb = qdisc_dequeue_head(sch);
0521 
0522     if (!skb)
0523         return NULL;
0524 
0525     pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog);
0526     return skb;
0527 }
0528 
0529 static void pie_reset(struct Qdisc *sch)
0530 {
0531     struct pie_sched_data *q = qdisc_priv(sch);
0532 
0533     qdisc_reset_queue(sch);
0534     pie_vars_init(&q->vars);
0535 }
0536 
0537 static void pie_destroy(struct Qdisc *sch)
0538 {
0539     struct pie_sched_data *q = qdisc_priv(sch);
0540 
0541     q->params.tupdate = 0;
0542     del_timer_sync(&q->adapt_timer);
0543 }
0544 
0545 static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
0546     .id     = "pie",
0547     .priv_size  = sizeof(struct pie_sched_data),
0548     .enqueue    = pie_qdisc_enqueue,
0549     .dequeue    = pie_qdisc_dequeue,
0550     .peek       = qdisc_peek_dequeued,
0551     .init       = pie_init,
0552     .destroy    = pie_destroy,
0553     .reset      = pie_reset,
0554     .change     = pie_change,
0555     .dump       = pie_dump,
0556     .dump_stats = pie_dump_stats,
0557     .owner      = THIS_MODULE,
0558 };
0559 
0560 static int __init pie_module_init(void)
0561 {
0562     return register_qdisc(&pie_qdisc_ops);
0563 }
0564 
0565 static void __exit pie_module_exit(void)
0566 {
0567     unregister_qdisc(&pie_qdisc_ops);
0568 }
0569 
0570 module_init(pie_module_init);
0571 module_exit(pie_module_exit);
0572 
0573 MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
0574 MODULE_AUTHOR("Vijay Subramanian");
0575 MODULE_AUTHOR("Mythili Prabhu");
0576 MODULE_LICENSE("GPL");