0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include <linux/flex_proportions.h>
0037
0038 int fprop_global_init(struct fprop_global *p, gfp_t gfp)
0039 {
0040 int err;
0041
0042 p->period = 0;
0043
0044 err = percpu_counter_init(&p->events, 1, gfp);
0045 if (err)
0046 return err;
0047 seqcount_init(&p->sequence);
0048 return 0;
0049 }
0050
0051 void fprop_global_destroy(struct fprop_global *p)
0052 {
0053 percpu_counter_destroy(&p->events);
0054 }
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 bool fprop_new_period(struct fprop_global *p, int periods)
0065 {
0066 s64 events = percpu_counter_sum(&p->events);
0067
0068
0069
0070
0071 if (events <= 1)
0072 return false;
0073 write_seqcount_begin(&p->sequence);
0074 if (periods < 64)
0075 events -= events >> periods;
0076
0077 percpu_counter_add(&p->events, -events);
0078 p->period += periods;
0079 write_seqcount_end(&p->sequence);
0080
0081 return true;
0082 }
0083
0084
0085
0086
0087
0088 int fprop_local_init_single(struct fprop_local_single *pl)
0089 {
0090 pl->events = 0;
0091 pl->period = 0;
0092 raw_spin_lock_init(&pl->lock);
0093 return 0;
0094 }
0095
0096 void fprop_local_destroy_single(struct fprop_local_single *pl)
0097 {
0098 }
0099
0100 static void fprop_reflect_period_single(struct fprop_global *p,
0101 struct fprop_local_single *pl)
0102 {
0103 unsigned int period = p->period;
0104 unsigned long flags;
0105
0106
0107 if (pl->period == period)
0108 return;
0109 raw_spin_lock_irqsave(&pl->lock, flags);
0110
0111 if (pl->period >= period) {
0112 raw_spin_unlock_irqrestore(&pl->lock, flags);
0113 return;
0114 }
0115
0116 if (period - pl->period < BITS_PER_LONG)
0117 pl->events >>= period - pl->period;
0118 else
0119 pl->events = 0;
0120 pl->period = period;
0121 raw_spin_unlock_irqrestore(&pl->lock, flags);
0122 }
0123
0124
0125 void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
0126 {
0127 fprop_reflect_period_single(p, pl);
0128 pl->events++;
0129 percpu_counter_add(&p->events, 1);
0130 }
0131
0132
0133 void fprop_fraction_single(struct fprop_global *p,
0134 struct fprop_local_single *pl,
0135 unsigned long *numerator, unsigned long *denominator)
0136 {
0137 unsigned int seq;
0138 s64 num, den;
0139
0140 do {
0141 seq = read_seqcount_begin(&p->sequence);
0142 fprop_reflect_period_single(p, pl);
0143 num = pl->events;
0144 den = percpu_counter_read_positive(&p->events);
0145 } while (read_seqcount_retry(&p->sequence, seq));
0146
0147
0148
0149
0150
0151 if (den <= num) {
0152 if (num)
0153 den = num;
0154 else
0155 den = 1;
0156 }
0157 *denominator = den;
0158 *numerator = num;
0159 }
0160
0161
0162
0163
0164 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
0165
0166 int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
0167 {
0168 int err;
0169
0170 err = percpu_counter_init(&pl->events, 0, gfp);
0171 if (err)
0172 return err;
0173 pl->period = 0;
0174 raw_spin_lock_init(&pl->lock);
0175 return 0;
0176 }
0177
0178 void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
0179 {
0180 percpu_counter_destroy(&pl->events);
0181 }
0182
0183 static void fprop_reflect_period_percpu(struct fprop_global *p,
0184 struct fprop_local_percpu *pl)
0185 {
0186 unsigned int period = p->period;
0187 unsigned long flags;
0188
0189
0190 if (pl->period == period)
0191 return;
0192 raw_spin_lock_irqsave(&pl->lock, flags);
0193
0194 if (pl->period >= period) {
0195 raw_spin_unlock_irqrestore(&pl->lock, flags);
0196 return;
0197 }
0198
0199 if (period - pl->period < BITS_PER_LONG) {
0200 s64 val = percpu_counter_read(&pl->events);
0201
0202 if (val < (nr_cpu_ids * PROP_BATCH))
0203 val = percpu_counter_sum(&pl->events);
0204
0205 percpu_counter_add_batch(&pl->events,
0206 -val + (val >> (period-pl->period)), PROP_BATCH);
0207 } else
0208 percpu_counter_set(&pl->events, 0);
0209 pl->period = period;
0210 raw_spin_unlock_irqrestore(&pl->lock, flags);
0211 }
0212
0213
0214 void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
0215 long nr)
0216 {
0217 fprop_reflect_period_percpu(p, pl);
0218 percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
0219 percpu_counter_add(&p->events, nr);
0220 }
0221
0222 void fprop_fraction_percpu(struct fprop_global *p,
0223 struct fprop_local_percpu *pl,
0224 unsigned long *numerator, unsigned long *denominator)
0225 {
0226 unsigned int seq;
0227 s64 num, den;
0228
0229 do {
0230 seq = read_seqcount_begin(&p->sequence);
0231 fprop_reflect_period_percpu(p, pl);
0232 num = percpu_counter_read_positive(&pl->events);
0233 den = percpu_counter_read_positive(&p->events);
0234 } while (read_seqcount_retry(&p->sequence, seq));
0235
0236
0237
0238
0239
0240 if (den <= num) {
0241 if (num)
0242 den = num;
0243 else
0244 den = 1;
0245 }
0246 *denominator = den;
0247 *numerator = num;
0248 }
0249
0250
0251
0252
0253
0254 void __fprop_add_percpu_max(struct fprop_global *p,
0255 struct fprop_local_percpu *pl, int max_frac, long nr)
0256 {
0257 if (unlikely(max_frac < FPROP_FRAC_BASE)) {
0258 unsigned long numerator, denominator;
0259 s64 tmp;
0260
0261 fprop_fraction_percpu(p, pl, &numerator, &denominator);
0262
0263 tmp = (u64)denominator * max_frac -
0264 ((u64)numerator << FPROP_FRAC_SHIFT);
0265 if (tmp < 0) {
0266
0267 return;
0268 } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
0269
0270 nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
0271 FPROP_FRAC_BASE - max_frac);
0272 }
0273 }
0274
0275 __fprop_add_percpu(p, pl, nr);
0276 }