0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/uaccess.h>
0014 #include <linux/bitops.h>
0015 #include <linux/module.h>
0016 #include <linux/types.h>
0017 #include <linux/kernel.h>
0018 #include <linux/jiffies.h>
0019 #include <linux/string.h>
0020 #include <linux/mm.h>
0021 #include <linux/socket.h>
0022 #include <linux/sockios.h>
0023 #include <linux/in.h>
0024 #include <linux/errno.h>
0025 #include <linux/interrupt.h>
0026 #include <linux/netdevice.h>
0027 #include <linux/skbuff.h>
0028 #include <linux/rtnetlink.h>
0029 #include <linux/init.h>
0030 #include <linux/slab.h>
0031 #include <linux/seqlock.h>
0032 #include <net/sock.h>
0033 #include <net/gen_stats.h>
0034
0035
0036
0037
0038
0039
0040
0041
0042 struct net_rate_estimator {
0043 struct gnet_stats_basic_sync *bstats;
0044 spinlock_t *stats_lock;
0045 bool running;
0046 struct gnet_stats_basic_sync __percpu *cpu_bstats;
0047 u8 ewma_log;
0048 u8 intvl_log;
0049
0050 seqcount_t seq;
0051 u64 last_packets;
0052 u64 last_bytes;
0053
0054 u64 avpps;
0055 u64 avbps;
0056
0057 unsigned long next_jiffies;
0058 struct timer_list timer;
0059 struct rcu_head rcu;
0060 };
0061
0062 static void est_fetch_counters(struct net_rate_estimator *e,
0063 struct gnet_stats_basic_sync *b)
0064 {
0065 gnet_stats_basic_sync_init(b);
0066 if (e->stats_lock)
0067 spin_lock(e->stats_lock);
0068
0069 gnet_stats_add_basic(b, e->cpu_bstats, e->bstats, e->running);
0070
0071 if (e->stats_lock)
0072 spin_unlock(e->stats_lock);
0073
0074 }
0075
0076 static void est_timer(struct timer_list *t)
0077 {
0078 struct net_rate_estimator *est = from_timer(est, t, timer);
0079 struct gnet_stats_basic_sync b;
0080 u64 b_bytes, b_packets;
0081 u64 rate, brate;
0082
0083 est_fetch_counters(est, &b);
0084 b_bytes = u64_stats_read(&b.bytes);
0085 b_packets = u64_stats_read(&b.packets);
0086
0087 brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log);
0088 brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
0089
0090 rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
0091 rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
0092
0093 write_seqcount_begin(&est->seq);
0094 est->avbps += brate;
0095 est->avpps += rate;
0096 write_seqcount_end(&est->seq);
0097
0098 est->last_bytes = b_bytes;
0099 est->last_packets = b_packets;
0100
0101 est->next_jiffies += ((HZ/4) << est->intvl_log);
0102
0103 if (unlikely(time_after_eq(jiffies, est->next_jiffies))) {
0104
0105 est->next_jiffies = jiffies + 1;
0106 }
0107 mod_timer(&est->timer, est->next_jiffies);
0108 }
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
0131 struct gnet_stats_basic_sync __percpu *cpu_bstats,
0132 struct net_rate_estimator __rcu **rate_est,
0133 spinlock_t *lock,
0134 bool running,
0135 struct nlattr *opt)
0136 {
0137 struct gnet_estimator *parm = nla_data(opt);
0138 struct net_rate_estimator *old, *est;
0139 struct gnet_stats_basic_sync b;
0140 int intvl_log;
0141
0142 if (nla_len(opt) < sizeof(*parm))
0143 return -EINVAL;
0144
0145
0146
0147
0148
0149 if (parm->interval < -2 || parm->interval > 3)
0150 return -EINVAL;
0151
0152 if (parm->ewma_log == 0 || parm->ewma_log >= 31)
0153 return -EINVAL;
0154
0155 est = kzalloc(sizeof(*est), GFP_KERNEL);
0156 if (!est)
0157 return -ENOBUFS;
0158
0159 seqcount_init(&est->seq);
0160 intvl_log = parm->interval + 2;
0161 est->bstats = bstats;
0162 est->stats_lock = lock;
0163 est->running = running;
0164 est->ewma_log = parm->ewma_log;
0165 est->intvl_log = intvl_log;
0166 est->cpu_bstats = cpu_bstats;
0167
0168 if (lock)
0169 local_bh_disable();
0170 est_fetch_counters(est, &b);
0171 if (lock)
0172 local_bh_enable();
0173 est->last_bytes = u64_stats_read(&b.bytes);
0174 est->last_packets = u64_stats_read(&b.packets);
0175
0176 if (lock)
0177 spin_lock_bh(lock);
0178 old = rcu_dereference_protected(*rate_est, 1);
0179 if (old) {
0180 del_timer_sync(&old->timer);
0181 est->avbps = old->avbps;
0182 est->avpps = old->avpps;
0183 }
0184
0185 est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
0186 timer_setup(&est->timer, est_timer, 0);
0187 mod_timer(&est->timer, est->next_jiffies);
0188
0189 rcu_assign_pointer(*rate_est, est);
0190 if (lock)
0191 spin_unlock_bh(lock);
0192 if (old)
0193 kfree_rcu(old, rcu);
0194 return 0;
0195 }
0196 EXPORT_SYMBOL(gen_new_estimator);
0197
0198
0199
0200
0201
0202
0203
0204
0205 void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
0206 {
0207 struct net_rate_estimator *est;
0208
0209 est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
0210 if (est) {
0211 del_timer_sync(&est->timer);
0212 kfree_rcu(est, rcu);
0213 }
0214 }
0215 EXPORT_SYMBOL(gen_kill_estimator);
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233 int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
0234 struct gnet_stats_basic_sync __percpu *cpu_bstats,
0235 struct net_rate_estimator __rcu **rate_est,
0236 spinlock_t *lock,
0237 bool running, struct nlattr *opt)
0238 {
0239 return gen_new_estimator(bstats, cpu_bstats, rate_est,
0240 lock, running, opt);
0241 }
0242 EXPORT_SYMBOL(gen_replace_estimator);
0243
0244
0245
0246
0247
0248
0249
0250 bool gen_estimator_active(struct net_rate_estimator __rcu **rate_est)
0251 {
0252 return !!rcu_access_pointer(*rate_est);
0253 }
0254 EXPORT_SYMBOL(gen_estimator_active);
0255
0256 bool gen_estimator_read(struct net_rate_estimator __rcu **rate_est,
0257 struct gnet_stats_rate_est64 *sample)
0258 {
0259 struct net_rate_estimator *est;
0260 unsigned seq;
0261
0262 rcu_read_lock();
0263 est = rcu_dereference(*rate_est);
0264 if (!est) {
0265 rcu_read_unlock();
0266 return false;
0267 }
0268
0269 do {
0270 seq = read_seqcount_begin(&est->seq);
0271 sample->bps = est->avbps >> 8;
0272 sample->pps = est->avpps >> 8;
0273 } while (read_seqcount_retry(&est->seq, seq));
0274
0275 rcu_read_unlock();
0276 return true;
0277 }
0278 EXPORT_SYMBOL(gen_estimator_read);