0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/types.h>
0013 #include <linux/kernel.h>
0014 #include <linux/module.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/socket.h>
0017 #include <linux/rtnetlink.h>
0018 #include <linux/gen_stats.h>
0019 #include <net/netlink.h>
0020 #include <net/gen_stats.h>
0021 #include <net/sch_generic.h>
0022
0023 static inline int
0024 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
0025 {
0026 if (nla_put_64bit(d->skb, type, size, buf, padattr))
0027 goto nla_put_failure;
0028 return 0;
0029
0030 nla_put_failure:
0031 if (d->lock)
0032 spin_unlock_bh(d->lock);
0033 kfree(d->xstats);
0034 d->xstats = NULL;
0035 d->xstats_len = 0;
0036 return -1;
0037 }
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 int
0059 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
0060 int xstats_type, spinlock_t *lock,
0061 struct gnet_dump *d, int padattr)
0062 __acquires(lock)
0063 {
0064 memset(d, 0, sizeof(*d));
0065
0066 if (type)
0067 d->tail = (struct nlattr *)skb_tail_pointer(skb);
0068 d->skb = skb;
0069 d->compat_tc_stats = tc_stats_type;
0070 d->compat_xstats = xstats_type;
0071 d->padattr = padattr;
0072 if (lock) {
0073 d->lock = lock;
0074 spin_lock_bh(lock);
0075 }
0076 if (d->tail) {
0077 int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
0078
0079
0080
0081
0082
0083
0084
0085 if (ret == 0 && d->tail->nla_type == padattr)
0086 d->tail = (struct nlattr *)((char *)d->tail +
0087 NLA_ALIGN(d->tail->nla_len));
0088 return ret;
0089 }
0090
0091 return 0;
0092 }
0093 EXPORT_SYMBOL(gnet_stats_start_copy_compat);
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 int
0110 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
0111 struct gnet_dump *d, int padattr)
0112 {
0113 return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
0114 }
0115 EXPORT_SYMBOL(gnet_stats_start_copy);
0116
0117
0118 void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
0119 {
0120 u64_stats_set(&b->bytes, 0);
0121 u64_stats_set(&b->packets, 0);
0122 u64_stats_init(&b->syncp);
0123 }
0124 EXPORT_SYMBOL(gnet_stats_basic_sync_init);
0125
0126 static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
0127 struct gnet_stats_basic_sync __percpu *cpu)
0128 {
0129 u64 t_bytes = 0, t_packets = 0;
0130 int i;
0131
0132 for_each_possible_cpu(i) {
0133 struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
0134 unsigned int start;
0135 u64 bytes, packets;
0136
0137 do {
0138 start = u64_stats_fetch_begin_irq(&bcpu->syncp);
0139 bytes = u64_stats_read(&bcpu->bytes);
0140 packets = u64_stats_read(&bcpu->packets);
0141 } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
0142
0143 t_bytes += bytes;
0144 t_packets += packets;
0145 }
0146 _bstats_update(bstats, t_bytes, t_packets);
0147 }
0148
0149 void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
0150 struct gnet_stats_basic_sync __percpu *cpu,
0151 struct gnet_stats_basic_sync *b, bool running)
0152 {
0153 unsigned int start;
0154 u64 bytes = 0;
0155 u64 packets = 0;
0156
0157 WARN_ON_ONCE((cpu || running) && in_hardirq());
0158
0159 if (cpu) {
0160 gnet_stats_add_basic_cpu(bstats, cpu);
0161 return;
0162 }
0163 do {
0164 if (running)
0165 start = u64_stats_fetch_begin_irq(&b->syncp);
0166 bytes = u64_stats_read(&b->bytes);
0167 packets = u64_stats_read(&b->packets);
0168 } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
0169
0170 _bstats_update(bstats, bytes, packets);
0171 }
0172 EXPORT_SYMBOL(gnet_stats_add_basic);
0173
0174 static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
0175 struct gnet_stats_basic_sync __percpu *cpu,
0176 struct gnet_stats_basic_sync *b, bool running)
0177 {
0178 unsigned int start;
0179
0180 if (cpu) {
0181 u64 t_bytes = 0, t_packets = 0;
0182 int i;
0183
0184 for_each_possible_cpu(i) {
0185 struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
0186 unsigned int start;
0187 u64 bytes, packets;
0188
0189 do {
0190 start = u64_stats_fetch_begin_irq(&bcpu->syncp);
0191 bytes = u64_stats_read(&bcpu->bytes);
0192 packets = u64_stats_read(&bcpu->packets);
0193 } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
0194
0195 t_bytes += bytes;
0196 t_packets += packets;
0197 }
0198 *ret_bytes = t_bytes;
0199 *ret_packets = t_packets;
0200 return;
0201 }
0202 do {
0203 if (running)
0204 start = u64_stats_fetch_begin_irq(&b->syncp);
0205 *ret_bytes = u64_stats_read(&b->bytes);
0206 *ret_packets = u64_stats_read(&b->packets);
0207 } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
0208 }
0209
0210 static int
0211 ___gnet_stats_copy_basic(struct gnet_dump *d,
0212 struct gnet_stats_basic_sync __percpu *cpu,
0213 struct gnet_stats_basic_sync *b,
0214 int type, bool running)
0215 {
0216 u64 bstats_bytes, bstats_packets;
0217
0218 gnet_stats_read_basic(&bstats_bytes, &bstats_packets, cpu, b, running);
0219
0220 if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
0221 d->tc_stats.bytes = bstats_bytes;
0222 d->tc_stats.packets = bstats_packets;
0223 }
0224
0225 if (d->tail) {
0226 struct gnet_stats_basic sb;
0227 int res;
0228
0229 memset(&sb, 0, sizeof(sb));
0230 sb.bytes = bstats_bytes;
0231 sb.packets = bstats_packets;
0232 res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
0233 if (res < 0 || sb.packets == bstats_packets)
0234 return res;
0235
0236 return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets,
0237 sizeof(bstats_packets), TCA_STATS_PAD);
0238 }
0239 return 0;
0240 }
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259 int
0260 gnet_stats_copy_basic(struct gnet_dump *d,
0261 struct gnet_stats_basic_sync __percpu *cpu,
0262 struct gnet_stats_basic_sync *b,
0263 bool running)
0264 {
0265 return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running);
0266 }
0267 EXPORT_SYMBOL(gnet_stats_copy_basic);
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 int
0287 gnet_stats_copy_basic_hw(struct gnet_dump *d,
0288 struct gnet_stats_basic_sync __percpu *cpu,
0289 struct gnet_stats_basic_sync *b,
0290 bool running)
0291 {
0292 return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running);
0293 }
0294 EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 int
0308 gnet_stats_copy_rate_est(struct gnet_dump *d,
0309 struct net_rate_estimator __rcu **rate_est)
0310 {
0311 struct gnet_stats_rate_est64 sample;
0312 struct gnet_stats_rate_est est;
0313 int res;
0314
0315 if (!gen_estimator_read(rate_est, &sample))
0316 return 0;
0317 est.bps = min_t(u64, UINT_MAX, sample.bps);
0318
0319 est.pps = sample.pps;
0320
0321 if (d->compat_tc_stats) {
0322 d->tc_stats.bps = est.bps;
0323 d->tc_stats.pps = est.pps;
0324 }
0325
0326 if (d->tail) {
0327 res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
0328 TCA_STATS_PAD);
0329 if (res < 0 || est.bps == sample.bps)
0330 return res;
0331
0332 return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
0333 sizeof(sample), TCA_STATS_PAD);
0334 }
0335
0336 return 0;
0337 }
0338 EXPORT_SYMBOL(gnet_stats_copy_rate_est);
0339
0340 static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats,
0341 const struct gnet_stats_queue __percpu *q)
0342 {
0343 int i;
0344
0345 for_each_possible_cpu(i) {
0346 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
0347
0348 qstats->qlen += qcpu->qlen;
0349 qstats->backlog += qcpu->backlog;
0350 qstats->drops += qcpu->drops;
0351 qstats->requeues += qcpu->requeues;
0352 qstats->overlimits += qcpu->overlimits;
0353 }
0354 }
0355
0356 void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
0357 const struct gnet_stats_queue __percpu *cpu,
0358 const struct gnet_stats_queue *q)
0359 {
0360 if (cpu) {
0361 gnet_stats_add_queue_cpu(qstats, cpu);
0362 } else {
0363 qstats->qlen += q->qlen;
0364 qstats->backlog += q->backlog;
0365 qstats->drops += q->drops;
0366 qstats->requeues += q->requeues;
0367 qstats->overlimits += q->overlimits;
0368 }
0369 }
0370 EXPORT_SYMBOL(gnet_stats_add_queue);
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 int
0387 gnet_stats_copy_queue(struct gnet_dump *d,
0388 struct gnet_stats_queue __percpu *cpu_q,
0389 struct gnet_stats_queue *q, __u32 qlen)
0390 {
0391 struct gnet_stats_queue qstats = {0};
0392
0393 gnet_stats_add_queue(&qstats, cpu_q, q);
0394 qstats.qlen = qlen;
0395
0396 if (d->compat_tc_stats) {
0397 d->tc_stats.drops = qstats.drops;
0398 d->tc_stats.qlen = qstats.qlen;
0399 d->tc_stats.backlog = qstats.backlog;
0400 d->tc_stats.overlimits = qstats.overlimits;
0401 }
0402
0403 if (d->tail)
0404 return gnet_stats_copy(d, TCA_STATS_QUEUE,
0405 &qstats, sizeof(qstats),
0406 TCA_STATS_PAD);
0407
0408 return 0;
0409 }
0410 EXPORT_SYMBOL(gnet_stats_copy_queue);
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425 int
0426 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
0427 {
0428 if (d->compat_xstats) {
0429 d->xstats = kmemdup(st, len, GFP_ATOMIC);
0430 if (!d->xstats)
0431 goto err_out;
0432 d->xstats_len = len;
0433 }
0434
0435 if (d->tail)
0436 return gnet_stats_copy(d, TCA_STATS_APP, st, len,
0437 TCA_STATS_PAD);
0438
0439 return 0;
0440
0441 err_out:
0442 if (d->lock)
0443 spin_unlock_bh(d->lock);
0444 d->xstats_len = 0;
0445 return -1;
0446 }
0447 EXPORT_SYMBOL(gnet_stats_copy_app);
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461 int
0462 gnet_stats_finish_copy(struct gnet_dump *d)
0463 {
0464 if (d->tail)
0465 d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
0466
0467 if (d->compat_tc_stats)
0468 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
0469 sizeof(d->tc_stats), d->padattr) < 0)
0470 return -1;
0471
0472 if (d->compat_xstats && d->xstats) {
0473 if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
0474 d->xstats_len, d->padattr) < 0)
0475 return -1;
0476 }
0477
0478 if (d->lock)
0479 spin_unlock_bh(d->lock);
0480 kfree(d->xstats);
0481 d->xstats = NULL;
0482 d->xstats_len = 0;
0483 return 0;
0484 }
0485 EXPORT_SYMBOL(gnet_stats_finish_copy);