Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * kernel/sched/loadavg.c
0004  *
0005  * This file contains the magic bits required to compute the global loadavg
0006  * figure. Its a silly number but people think its important. We go through
0007  * great pains to make it work on big machines and tickless kernels.
0008  */
0009 
0010 /*
0011  * Global load-average calculations
0012  *
0013  * We take a distributed and async approach to calculating the global load-avg
0014  * in order to minimize overhead.
0015  *
0016  * The global load average is an exponentially decaying average of nr_running +
0017  * nr_uninterruptible.
0018  *
0019  * Once every LOAD_FREQ:
0020  *
0021  *   nr_active = 0;
0022  *   for_each_possible_cpu(cpu)
0023  *  nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
0024  *
0025  *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
0026  *
0027  * Due to a number of reasons the above turns in the mess below:
0028  *
0029  *  - for_each_possible_cpu() is prohibitively expensive on machines with
0030  *    serious number of CPUs, therefore we need to take a distributed approach
0031  *    to calculating nr_active.
0032  *
0033  *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
0034  *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
0035  *
0036  *    So assuming nr_active := 0 when we start out -- true per definition, we
0037  *    can simply take per-CPU deltas and fold those into a global accumulate
0038  *    to obtain the same result. See calc_load_fold_active().
0039  *
0040  *    Furthermore, in order to avoid synchronizing all per-CPU delta folding
0041  *    across the machine, we assume 10 ticks is sufficient time for every
0042  *    CPU to have completed this task.
0043  *
0044  *    This places an upper-bound on the IRQ-off latency of the machine. Then
0045  *    again, being late doesn't loose the delta, just wrecks the sample.
0046  *
0047  *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-CPU because
0048  *    this would add another cross-CPU cacheline miss and atomic operation
0049  *    to the wakeup path. Instead we increment on whatever CPU the task ran
0050  *    when it went into uninterruptible state and decrement on whatever CPU
0051  *    did the wakeup. This means that only the sum of nr_uninterruptible over
0052  *    all CPUs yields the correct result.
0053  *
0054  *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
0055  */
0056 
0057 /* Variables and functions for calc_load */
0058 atomic_long_t calc_load_tasks;
0059 unsigned long calc_load_update;
0060 unsigned long avenrun[3];
0061 EXPORT_SYMBOL(avenrun); /* should be removed */
0062 
0063 /**
0064  * get_avenrun - get the load average array
0065  * @loads:  pointer to dest load array
0066  * @offset: offset to add
0067  * @shift:  shift count to shift the result left
0068  *
0069  * These values are estimates at best, so no need for locking.
0070  */
0071 void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
0072 {
0073     loads[0] = (avenrun[0] + offset) << shift;
0074     loads[1] = (avenrun[1] + offset) << shift;
0075     loads[2] = (avenrun[2] + offset) << shift;
0076 }
0077 
0078 long calc_load_fold_active(struct rq *this_rq, long adjust)
0079 {
0080     long nr_active, delta = 0;
0081 
0082     nr_active = this_rq->nr_running - adjust;
0083     nr_active += (int)this_rq->nr_uninterruptible;
0084 
0085     if (nr_active != this_rq->calc_load_active) {
0086         delta = nr_active - this_rq->calc_load_active;
0087         this_rq->calc_load_active = nr_active;
0088     }
0089 
0090     return delta;
0091 }
0092 
0093 /**
0094  * fixed_power_int - compute: x^n, in O(log n) time
0095  *
0096  * @x:         base of the power
0097  * @frac_bits: fractional bits of @x
0098  * @n:         power to raise @x to.
0099  *
0100  * By exploiting the relation between the definition of the natural power
0101  * function: x^n := x*x*...*x (x multiplied by itself for n times), and
0102  * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
0103  * (where: n_i \elem {0, 1}, the binary vector representing n),
0104  * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
0105  * of course trivially computable in O(log_2 n), the length of our binary
0106  * vector.
0107  */
0108 static unsigned long
0109 fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
0110 {
0111     unsigned long result = 1UL << frac_bits;
0112 
0113     if (n) {
0114         for (;;) {
0115             if (n & 1) {
0116                 result *= x;
0117                 result += 1UL << (frac_bits - 1);
0118                 result >>= frac_bits;
0119             }
0120             n >>= 1;
0121             if (!n)
0122                 break;
0123             x *= x;
0124             x += 1UL << (frac_bits - 1);
0125             x >>= frac_bits;
0126         }
0127     }
0128 
0129     return result;
0130 }
0131 
0132 /*
0133  * a1 = a0 * e + a * (1 - e)
0134  *
0135  * a2 = a1 * e + a * (1 - e)
0136  *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
0137  *    = a0 * e^2 + a * (1 - e) * (1 + e)
0138  *
0139  * a3 = a2 * e + a * (1 - e)
0140  *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
0141  *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
0142  *
0143  *  ...
0144  *
0145  * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
0146  *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
0147  *    = a0 * e^n + a * (1 - e^n)
0148  *
0149  * [1] application of the geometric series:
0150  *
0151  *              n         1 - x^(n+1)
0152  *     S_n := \Sum x^i = -------------
0153  *             i=0          1 - x
0154  */
0155 unsigned long
0156 calc_load_n(unsigned long load, unsigned long exp,
0157         unsigned long active, unsigned int n)
0158 {
0159     return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
0160 }
0161 
0162 #ifdef CONFIG_NO_HZ_COMMON
0163 /*
0164  * Handle NO_HZ for the global load-average.
0165  *
0166  * Since the above described distributed algorithm to compute the global
0167  * load-average relies on per-CPU sampling from the tick, it is affected by
0168  * NO_HZ.
0169  *
0170  * The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon
0171  * entering NO_HZ state such that we can include this as an 'extra' CPU delta
0172  * when we read the global state.
0173  *
0174  * Obviously reality has to ruin such a delightfully simple scheme:
0175  *
0176  *  - When we go NO_HZ idle during the window, we can negate our sample
0177  *    contribution, causing under-accounting.
0178  *
0179  *    We avoid this by keeping two NO_HZ-delta counters and flipping them
0180  *    when the window starts, thus separating old and new NO_HZ load.
0181  *
0182  *    The only trick is the slight shift in index flip for read vs write.
0183  *
0184  *        0s            5s            10s           15s
0185  *          +10           +10           +10           +10
0186  *        |-|-----------|-|-----------|-|-----------|-|
0187  *    r:0 0 1           1 0           0 1           1 0
0188  *    w:0 1 1           0 0           1 1           0 0
0189  *
0190  *    This ensures we'll fold the old NO_HZ contribution in this window while
0191  *    accumulating the new one.
0192  *
0193  *  - When we wake up from NO_HZ during the window, we push up our
0194  *    contribution, since we effectively move our sample point to a known
0195  *    busy state.
0196  *
0197  *    This is solved by pushing the window forward, and thus skipping the
0198  *    sample, for this CPU (effectively using the NO_HZ-delta for this CPU which
0199  *    was in effect at the time the window opened). This also solves the issue
0200  *    of having to deal with a CPU having been in NO_HZ for multiple LOAD_FREQ
0201  *    intervals.
0202  *
0203  * When making the ILB scale, we should try to pull this in as well.
0204  */
0205 static atomic_long_t calc_load_nohz[2];
0206 static int calc_load_idx;
0207 
0208 static inline int calc_load_write_idx(void)
0209 {
0210     int idx = calc_load_idx;
0211 
0212     /*
0213      * See calc_global_nohz(), if we observe the new index, we also
0214      * need to observe the new update time.
0215      */
0216     smp_rmb();
0217 
0218     /*
0219      * If the folding window started, make sure we start writing in the
0220      * next NO_HZ-delta.
0221      */
0222     if (!time_before(jiffies, READ_ONCE(calc_load_update)))
0223         idx++;
0224 
0225     return idx & 1;
0226 }
0227 
0228 static inline int calc_load_read_idx(void)
0229 {
0230     return calc_load_idx & 1;
0231 }
0232 
0233 static void calc_load_nohz_fold(struct rq *rq)
0234 {
0235     long delta;
0236 
0237     delta = calc_load_fold_active(rq, 0);
0238     if (delta) {
0239         int idx = calc_load_write_idx();
0240 
0241         atomic_long_add(delta, &calc_load_nohz[idx]);
0242     }
0243 }
0244 
0245 void calc_load_nohz_start(void)
0246 {
0247     /*
0248      * We're going into NO_HZ mode, if there's any pending delta, fold it
0249      * into the pending NO_HZ delta.
0250      */
0251     calc_load_nohz_fold(this_rq());
0252 }
0253 
0254 /*
0255  * Keep track of the load for NOHZ_FULL, must be called between
0256  * calc_load_nohz_{start,stop}().
0257  */
0258 void calc_load_nohz_remote(struct rq *rq)
0259 {
0260     calc_load_nohz_fold(rq);
0261 }
0262 
0263 void calc_load_nohz_stop(void)
0264 {
0265     struct rq *this_rq = this_rq();
0266 
0267     /*
0268      * If we're still before the pending sample window, we're done.
0269      */
0270     this_rq->calc_load_update = READ_ONCE(calc_load_update);
0271     if (time_before(jiffies, this_rq->calc_load_update))
0272         return;
0273 
0274     /*
0275      * We woke inside or after the sample window, this means we're already
0276      * accounted through the nohz accounting, so skip the entire deal and
0277      * sync up for the next window.
0278      */
0279     if (time_before(jiffies, this_rq->calc_load_update + 10))
0280         this_rq->calc_load_update += LOAD_FREQ;
0281 }
0282 
0283 static long calc_load_nohz_read(void)
0284 {
0285     int idx = calc_load_read_idx();
0286     long delta = 0;
0287 
0288     if (atomic_long_read(&calc_load_nohz[idx]))
0289         delta = atomic_long_xchg(&calc_load_nohz[idx], 0);
0290 
0291     return delta;
0292 }
0293 
0294 /*
0295  * NO_HZ can leave us missing all per-CPU ticks calling
0296  * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
0297  * calc_load_nohz per calc_load_nohz_start(), all we need to do is fold
0298  * in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary.
0299  *
0300  * Once we've updated the global active value, we need to apply the exponential
0301  * weights adjusted to the number of cycles missed.
0302  */
0303 static void calc_global_nohz(void)
0304 {
0305     unsigned long sample_window;
0306     long delta, active, n;
0307 
0308     sample_window = READ_ONCE(calc_load_update);
0309     if (!time_before(jiffies, sample_window + 10)) {
0310         /*
0311          * Catch-up, fold however many we are behind still
0312          */
0313         delta = jiffies - sample_window - 10;
0314         n = 1 + (delta / LOAD_FREQ);
0315 
0316         active = atomic_long_read(&calc_load_tasks);
0317         active = active > 0 ? active * FIXED_1 : 0;
0318 
0319         avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
0320         avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
0321         avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
0322 
0323         WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
0324     }
0325 
0326     /*
0327      * Flip the NO_HZ index...
0328      *
0329      * Make sure we first write the new time then flip the index, so that
0330      * calc_load_write_idx() will see the new time when it reads the new
0331      * index, this avoids a double flip messing things up.
0332      */
0333     smp_wmb();
0334     calc_load_idx++;
0335 }
0336 #else /* !CONFIG_NO_HZ_COMMON */
0337 
0338 static inline long calc_load_nohz_read(void) { return 0; }
0339 static inline void calc_global_nohz(void) { }
0340 
0341 #endif /* CONFIG_NO_HZ_COMMON */
0342 
0343 /*
0344  * calc_load - update the avenrun load estimates 10 ticks after the
0345  * CPUs have updated calc_load_tasks.
0346  *
0347  * Called from the global timer code.
0348  */
0349 void calc_global_load(void)
0350 {
0351     unsigned long sample_window;
0352     long active, delta;
0353 
0354     sample_window = READ_ONCE(calc_load_update);
0355     if (time_before(jiffies, sample_window + 10))
0356         return;
0357 
0358     /*
0359      * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
0360      */
0361     delta = calc_load_nohz_read();
0362     if (delta)
0363         atomic_long_add(delta, &calc_load_tasks);
0364 
0365     active = atomic_long_read(&calc_load_tasks);
0366     active = active > 0 ? active * FIXED_1 : 0;
0367 
0368     avenrun[0] = calc_load(avenrun[0], EXP_1, active);
0369     avenrun[1] = calc_load(avenrun[1], EXP_5, active);
0370     avenrun[2] = calc_load(avenrun[2], EXP_15, active);
0371 
0372     WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
0373 
0374     /*
0375      * In case we went to NO_HZ for multiple LOAD_FREQ intervals
0376      * catch up in bulk.
0377      */
0378     calc_global_nohz();
0379 }
0380 
0381 /*
0382  * Called from scheduler_tick() to periodically update this CPU's
0383  * active count.
0384  */
0385 void calc_global_load_tick(struct rq *this_rq)
0386 {
0387     long delta;
0388 
0389     if (time_before(jiffies, this_rq->calc_load_update))
0390         return;
0391 
0392     delta  = calc_load_fold_active(this_rq, 0);
0393     if (delta)
0394         atomic_long_add(delta, &calc_load_tasks);
0395 
0396     this_rq->calc_load_update += LOAD_FREQ;
0397 }