Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  Floating proportions with flexible aging period
0004  *
0005  *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
0006  *
0007  * The goal of this code is: Given different types of event, measure proportion
0008  * of each type of event over time. The proportions are measured with
0009  * exponentially decaying history to give smooth transitions. A formula
0010  * expressing proportion of event of type 'j' is:
0011  *
0012  *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
0013  *
0014  * Where x_{i,j} is j's number of events in i-th last time period and x_i is
0015  * total number of events in i-th last time period.
0016  *
0017  * Note that p_{j}'s are normalised, i.e.
0018  *
0019  *   \Sum_{j} p_{j} = 1,
0020  *
0021  * This formula can be straightforwardly computed by maintaining denominator
0022  * (let's call it 'd') and for each event type its numerator (let's call it
0023  * 'n_j'). When an event of type 'j' happens, we simply need to do:
0024  *   n_j++; d++;
0025  *
0026  * When a new period is declared, we could do:
0027  *   d /= 2
0028  *   for each j
0029  *     n_j /= 2
0030  *
0031  * To avoid iteration over all event types, we instead shift numerator of event
0032  * j lazily when someone asks for a proportion of event j or when event j
0033  * occurs. This can bit trivially implemented by remembering last period in
0034  * which something happened with proportion of type j.
0035  */
0036 #include <linux/flex_proportions.h>
0037 
0038 int fprop_global_init(struct fprop_global *p, gfp_t gfp)
0039 {
0040     int err;
0041 
0042     p->period = 0;
0043     /* Use 1 to avoid dealing with periods with 0 events... */
0044     err = percpu_counter_init(&p->events, 1, gfp);
0045     if (err)
0046         return err;
0047     seqcount_init(&p->sequence);
0048     return 0;
0049 }
0050 
0051 void fprop_global_destroy(struct fprop_global *p)
0052 {
0053     percpu_counter_destroy(&p->events);
0054 }
0055 
0056 /*
0057  * Declare @periods new periods. It is upto the caller to make sure period
0058  * transitions cannot happen in parallel.
0059  *
0060  * The function returns true if the proportions are still defined and false
0061  * if aging zeroed out all events. This can be used to detect whether declaring
0062  * further periods has any effect.
0063  */
0064 bool fprop_new_period(struct fprop_global *p, int periods)
0065 {
0066     s64 events = percpu_counter_sum(&p->events);
0067 
0068     /*
0069      * Don't do anything if there are no events.
0070      */
0071     if (events <= 1)
0072         return false;
0073     write_seqcount_begin(&p->sequence);
0074     if (periods < 64)
0075         events -= events >> periods;
0076     /* Use addition to avoid losing events happening between sum and set */
0077     percpu_counter_add(&p->events, -events);
0078     p->period += periods;
0079     write_seqcount_end(&p->sequence);
0080 
0081     return true;
0082 }
0083 
0084 /*
0085  * ---- SINGLE ----
0086  */
0087 
0088 int fprop_local_init_single(struct fprop_local_single *pl)
0089 {
0090     pl->events = 0;
0091     pl->period = 0;
0092     raw_spin_lock_init(&pl->lock);
0093     return 0;
0094 }
0095 
0096 void fprop_local_destroy_single(struct fprop_local_single *pl)
0097 {
0098 }
0099 
0100 static void fprop_reflect_period_single(struct fprop_global *p,
0101                     struct fprop_local_single *pl)
0102 {
0103     unsigned int period = p->period;
0104     unsigned long flags;
0105 
0106     /* Fast path - period didn't change */
0107     if (pl->period == period)
0108         return;
0109     raw_spin_lock_irqsave(&pl->lock, flags);
0110     /* Someone updated pl->period while we were spinning? */
0111     if (pl->period >= period) {
0112         raw_spin_unlock_irqrestore(&pl->lock, flags);
0113         return;
0114     }
0115     /* Aging zeroed our fraction? */
0116     if (period - pl->period < BITS_PER_LONG)
0117         pl->events >>= period - pl->period;
0118     else
0119         pl->events = 0;
0120     pl->period = period;
0121     raw_spin_unlock_irqrestore(&pl->lock, flags);
0122 }
0123 
0124 /* Event of type pl happened */
0125 void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
0126 {
0127     fprop_reflect_period_single(p, pl);
0128     pl->events++;
0129     percpu_counter_add(&p->events, 1);
0130 }
0131 
0132 /* Return fraction of events of type pl */
0133 void fprop_fraction_single(struct fprop_global *p,
0134                struct fprop_local_single *pl,
0135                unsigned long *numerator, unsigned long *denominator)
0136 {
0137     unsigned int seq;
0138     s64 num, den;
0139 
0140     do {
0141         seq = read_seqcount_begin(&p->sequence);
0142         fprop_reflect_period_single(p, pl);
0143         num = pl->events;
0144         den = percpu_counter_read_positive(&p->events);
0145     } while (read_seqcount_retry(&p->sequence, seq));
0146 
0147     /*
0148      * Make fraction <= 1 and denominator > 0 even in presence of percpu
0149      * counter errors
0150      */
0151     if (den <= num) {
0152         if (num)
0153             den = num;
0154         else
0155             den = 1;
0156     }
0157     *denominator = den;
0158     *numerator = num;
0159 }
0160 
0161 /*
0162  * ---- PERCPU ----
0163  */
0164 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
0165 
0166 int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
0167 {
0168     int err;
0169 
0170     err = percpu_counter_init(&pl->events, 0, gfp);
0171     if (err)
0172         return err;
0173     pl->period = 0;
0174     raw_spin_lock_init(&pl->lock);
0175     return 0;
0176 }
0177 
0178 void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
0179 {
0180     percpu_counter_destroy(&pl->events);
0181 }
0182 
0183 static void fprop_reflect_period_percpu(struct fprop_global *p,
0184                     struct fprop_local_percpu *pl)
0185 {
0186     unsigned int period = p->period;
0187     unsigned long flags;
0188 
0189     /* Fast path - period didn't change */
0190     if (pl->period == period)
0191         return;
0192     raw_spin_lock_irqsave(&pl->lock, flags);
0193     /* Someone updated pl->period while we were spinning? */
0194     if (pl->period >= period) {
0195         raw_spin_unlock_irqrestore(&pl->lock, flags);
0196         return;
0197     }
0198     /* Aging zeroed our fraction? */
0199     if (period - pl->period < BITS_PER_LONG) {
0200         s64 val = percpu_counter_read(&pl->events);
0201 
0202         if (val < (nr_cpu_ids * PROP_BATCH))
0203             val = percpu_counter_sum(&pl->events);
0204 
0205         percpu_counter_add_batch(&pl->events,
0206             -val + (val >> (period-pl->period)), PROP_BATCH);
0207     } else
0208         percpu_counter_set(&pl->events, 0);
0209     pl->period = period;
0210     raw_spin_unlock_irqrestore(&pl->lock, flags);
0211 }
0212 
0213 /* Event of type pl happened */
0214 void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
0215         long nr)
0216 {
0217     fprop_reflect_period_percpu(p, pl);
0218     percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
0219     percpu_counter_add(&p->events, nr);
0220 }
0221 
0222 void fprop_fraction_percpu(struct fprop_global *p,
0223                struct fprop_local_percpu *pl,
0224                unsigned long *numerator, unsigned long *denominator)
0225 {
0226     unsigned int seq;
0227     s64 num, den;
0228 
0229     do {
0230         seq = read_seqcount_begin(&p->sequence);
0231         fprop_reflect_period_percpu(p, pl);
0232         num = percpu_counter_read_positive(&pl->events);
0233         den = percpu_counter_read_positive(&p->events);
0234     } while (read_seqcount_retry(&p->sequence, seq));
0235 
0236     /*
0237      * Make fraction <= 1 and denominator > 0 even in presence of percpu
0238      * counter errors
0239      */
0240     if (den <= num) {
0241         if (num)
0242             den = num;
0243         else
0244             den = 1;
0245     }
0246     *denominator = den;
0247     *numerator = num;
0248 }
0249 
0250 /*
0251  * Like __fprop_add_percpu() except that event is counted only if the given
0252  * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
0253  */
0254 void __fprop_add_percpu_max(struct fprop_global *p,
0255         struct fprop_local_percpu *pl, int max_frac, long nr)
0256 {
0257     if (unlikely(max_frac < FPROP_FRAC_BASE)) {
0258         unsigned long numerator, denominator;
0259         s64 tmp;
0260 
0261         fprop_fraction_percpu(p, pl, &numerator, &denominator);
0262         /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
0263         tmp = (u64)denominator * max_frac -
0264                     ((u64)numerator << FPROP_FRAC_SHIFT);
0265         if (tmp < 0) {
0266             /* Maximum fraction already exceeded? */
0267             return;
0268         } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
0269             /* Add just enough for the fraction to saturate */
0270             nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
0271                     FPROP_FRAC_BASE - max_frac);
0272         }
0273     }
0274 
0275     __fprop_add_percpu(p, pl, nr);
0276 }