Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * net/core/gen_stats.c
0004  *
0005  * Authors:  Thomas Graf <tgraf@suug.ch>
0006  *           Jamal Hadi Salim
0007  *           Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
0008  *
0009  * See Documentation/networking/gen_stats.rst
0010  */
0011 
0012 #include <linux/types.h>
0013 #include <linux/kernel.h>
0014 #include <linux/module.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/socket.h>
0017 #include <linux/rtnetlink.h>
0018 #include <linux/gen_stats.h>
0019 #include <net/netlink.h>
0020 #include <net/gen_stats.h>
0021 #include <net/sch_generic.h>
0022 
0023 static inline int
0024 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
0025 {
0026     if (nla_put_64bit(d->skb, type, size, buf, padattr))
0027         goto nla_put_failure;
0028     return 0;
0029 
0030 nla_put_failure:
0031     if (d->lock)
0032         spin_unlock_bh(d->lock);
0033     kfree(d->xstats);
0034     d->xstats = NULL;
0035     d->xstats_len = 0;
0036     return -1;
0037 }
0038 
0039 /**
0040  * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
0041  * @skb: socket buffer to put statistics TLVs into
0042  * @type: TLV type for top level statistic TLV
0043  * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
0044  * @xstats_type: TLV type for backward compatibility xstats TLV
0045  * @lock: statistics lock
0046  * @d: dumping handle
0047  * @padattr: padding attribute
0048  *
0049  * Initializes the dumping handle, grabs the statistic lock and appends
0050  * an empty TLV header to the socket buffer for use a container for all
0051  * other statistic TLVS.
0052  *
0053  * The dumping handle is marked to be in backward compatibility mode telling
0054  * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
0055  *
0056  * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
0057  */
0058 int
0059 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
0060                  int xstats_type, spinlock_t *lock,
0061                  struct gnet_dump *d, int padattr)
0062     __acquires(lock)
0063 {
0064     memset(d, 0, sizeof(*d));
0065 
0066     if (type)
0067         d->tail = (struct nlattr *)skb_tail_pointer(skb);
0068     d->skb = skb;
0069     d->compat_tc_stats = tc_stats_type;
0070     d->compat_xstats = xstats_type;
0071     d->padattr = padattr;
0072     if (lock) {
0073         d->lock = lock;
0074         spin_lock_bh(lock);
0075     }
0076     if (d->tail) {
0077         int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
0078 
0079         /* The initial attribute added in gnet_stats_copy() may be
0080          * preceded by a padding attribute, in which case d->tail will
0081          * end up pointing at the padding instead of the real attribute.
0082          * Fix this so gnet_stats_finish_copy() adjusts the length of
0083          * the right attribute.
0084          */
0085         if (ret == 0 && d->tail->nla_type == padattr)
0086             d->tail = (struct nlattr *)((char *)d->tail +
0087                             NLA_ALIGN(d->tail->nla_len));
0088         return ret;
0089     }
0090 
0091     return 0;
0092 }
0093 EXPORT_SYMBOL(gnet_stats_start_copy_compat);
0094 
0095 /**
0096  * gnet_stats_start_copy - start dumping procedure in compatibility mode
0097  * @skb: socket buffer to put statistics TLVs into
0098  * @type: TLV type for top level statistic TLV
0099  * @lock: statistics lock
0100  * @d: dumping handle
0101  * @padattr: padding attribute
0102  *
0103  * Initializes the dumping handle, grabs the statistic lock and appends
0104  * an empty TLV header to the socket buffer for use a container for all
0105  * other statistic TLVS.
0106  *
0107  * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
0108  */
0109 int
0110 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
0111               struct gnet_dump *d, int padattr)
0112 {
0113     return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
0114 }
0115 EXPORT_SYMBOL(gnet_stats_start_copy);
0116 
0117 /* Must not be inlined, due to u64_stats seqcount_t lockdep key */
0118 void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
0119 {
0120     u64_stats_set(&b->bytes, 0);
0121     u64_stats_set(&b->packets, 0);
0122     u64_stats_init(&b->syncp);
0123 }
0124 EXPORT_SYMBOL(gnet_stats_basic_sync_init);
0125 
0126 static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
0127                      struct gnet_stats_basic_sync __percpu *cpu)
0128 {
0129     u64 t_bytes = 0, t_packets = 0;
0130     int i;
0131 
0132     for_each_possible_cpu(i) {
0133         struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
0134         unsigned int start;
0135         u64 bytes, packets;
0136 
0137         do {
0138             start = u64_stats_fetch_begin_irq(&bcpu->syncp);
0139             bytes = u64_stats_read(&bcpu->bytes);
0140             packets = u64_stats_read(&bcpu->packets);
0141         } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
0142 
0143         t_bytes += bytes;
0144         t_packets += packets;
0145     }
0146     _bstats_update(bstats, t_bytes, t_packets);
0147 }
0148 
0149 void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
0150               struct gnet_stats_basic_sync __percpu *cpu,
0151               struct gnet_stats_basic_sync *b, bool running)
0152 {
0153     unsigned int start;
0154     u64 bytes = 0;
0155     u64 packets = 0;
0156 
0157     WARN_ON_ONCE((cpu || running) && in_hardirq());
0158 
0159     if (cpu) {
0160         gnet_stats_add_basic_cpu(bstats, cpu);
0161         return;
0162     }
0163     do {
0164         if (running)
0165             start = u64_stats_fetch_begin_irq(&b->syncp);
0166         bytes = u64_stats_read(&b->bytes);
0167         packets = u64_stats_read(&b->packets);
0168     } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
0169 
0170     _bstats_update(bstats, bytes, packets);
0171 }
0172 EXPORT_SYMBOL(gnet_stats_add_basic);
0173 
0174 static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
0175                   struct gnet_stats_basic_sync __percpu *cpu,
0176                   struct gnet_stats_basic_sync *b, bool running)
0177 {
0178     unsigned int start;
0179 
0180     if (cpu) {
0181         u64 t_bytes = 0, t_packets = 0;
0182         int i;
0183 
0184         for_each_possible_cpu(i) {
0185             struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
0186             unsigned int start;
0187             u64 bytes, packets;
0188 
0189             do {
0190                 start = u64_stats_fetch_begin_irq(&bcpu->syncp);
0191                 bytes = u64_stats_read(&bcpu->bytes);
0192                 packets = u64_stats_read(&bcpu->packets);
0193             } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
0194 
0195             t_bytes += bytes;
0196             t_packets += packets;
0197         }
0198         *ret_bytes = t_bytes;
0199         *ret_packets = t_packets;
0200         return;
0201     }
0202     do {
0203         if (running)
0204             start = u64_stats_fetch_begin_irq(&b->syncp);
0205         *ret_bytes = u64_stats_read(&b->bytes);
0206         *ret_packets = u64_stats_read(&b->packets);
0207     } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
0208 }
0209 
0210 static int
0211 ___gnet_stats_copy_basic(struct gnet_dump *d,
0212              struct gnet_stats_basic_sync __percpu *cpu,
0213              struct gnet_stats_basic_sync *b,
0214              int type, bool running)
0215 {
0216     u64 bstats_bytes, bstats_packets;
0217 
0218     gnet_stats_read_basic(&bstats_bytes, &bstats_packets, cpu, b, running);
0219 
0220     if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
0221         d->tc_stats.bytes = bstats_bytes;
0222         d->tc_stats.packets = bstats_packets;
0223     }
0224 
0225     if (d->tail) {
0226         struct gnet_stats_basic sb;
0227         int res;
0228 
0229         memset(&sb, 0, sizeof(sb));
0230         sb.bytes = bstats_bytes;
0231         sb.packets = bstats_packets;
0232         res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
0233         if (res < 0 || sb.packets == bstats_packets)
0234             return res;
0235         /* emit 64bit stats only if needed */
0236         return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets,
0237                        sizeof(bstats_packets), TCA_STATS_PAD);
0238     }
0239     return 0;
0240 }
0241 
0242 /**
0243  * gnet_stats_copy_basic - copy basic statistics into statistic TLV
0244  * @d: dumping handle
0245  * @cpu: copy statistic per cpu
0246  * @b: basic statistics
0247  * @running: true if @b represents a running qdisc, thus @b's
0248  *           internal values might change during basic reads.
0249  *           Only used if @cpu is NULL
0250  *
0251  * Context: task; must not be run from IRQ or BH contexts
0252  *
0253  * Appends the basic statistics to the top level TLV created by
0254  * gnet_stats_start_copy().
0255  *
0256  * Returns 0 on success or -1 with the statistic lock released
0257  * if the room in the socket buffer was not sufficient.
0258  */
0259 int
0260 gnet_stats_copy_basic(struct gnet_dump *d,
0261               struct gnet_stats_basic_sync __percpu *cpu,
0262               struct gnet_stats_basic_sync *b,
0263               bool running)
0264 {
0265     return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running);
0266 }
0267 EXPORT_SYMBOL(gnet_stats_copy_basic);
0268 
0269 /**
0270  * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV
0271  * @d: dumping handle
0272  * @cpu: copy statistic per cpu
0273  * @b: basic statistics
0274  * @running: true if @b represents a running qdisc, thus @b's
0275  *           internal values might change during basic reads.
0276  *           Only used if @cpu is NULL
0277  *
0278  * Context: task; must not be run from IRQ or BH contexts
0279  *
0280  * Appends the basic statistics to the top level TLV created by
0281  * gnet_stats_start_copy().
0282  *
0283  * Returns 0 on success or -1 with the statistic lock released
0284  * if the room in the socket buffer was not sufficient.
0285  */
0286 int
0287 gnet_stats_copy_basic_hw(struct gnet_dump *d,
0288              struct gnet_stats_basic_sync __percpu *cpu,
0289              struct gnet_stats_basic_sync *b,
0290              bool running)
0291 {
0292     return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running);
0293 }
0294 EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
0295 
0296 /**
0297  * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
0298  * @d: dumping handle
0299  * @rate_est: rate estimator
0300  *
0301  * Appends the rate estimator statistics to the top level TLV created by
0302  * gnet_stats_start_copy().
0303  *
0304  * Returns 0 on success or -1 with the statistic lock released
0305  * if the room in the socket buffer was not sufficient.
0306  */
0307 int
0308 gnet_stats_copy_rate_est(struct gnet_dump *d,
0309              struct net_rate_estimator __rcu **rate_est)
0310 {
0311     struct gnet_stats_rate_est64 sample;
0312     struct gnet_stats_rate_est est;
0313     int res;
0314 
0315     if (!gen_estimator_read(rate_est, &sample))
0316         return 0;
0317     est.bps = min_t(u64, UINT_MAX, sample.bps);
0318     /* we have some time before reaching 2^32 packets per second */
0319     est.pps = sample.pps;
0320 
0321     if (d->compat_tc_stats) {
0322         d->tc_stats.bps = est.bps;
0323         d->tc_stats.pps = est.pps;
0324     }
0325 
0326     if (d->tail) {
0327         res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
0328                       TCA_STATS_PAD);
0329         if (res < 0 || est.bps == sample.bps)
0330             return res;
0331         /* emit 64bit stats only if needed */
0332         return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
0333                        sizeof(sample), TCA_STATS_PAD);
0334     }
0335 
0336     return 0;
0337 }
0338 EXPORT_SYMBOL(gnet_stats_copy_rate_est);
0339 
0340 static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats,
0341                      const struct gnet_stats_queue __percpu *q)
0342 {
0343     int i;
0344 
0345     for_each_possible_cpu(i) {
0346         const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
0347 
0348         qstats->qlen += qcpu->qlen;
0349         qstats->backlog += qcpu->backlog;
0350         qstats->drops += qcpu->drops;
0351         qstats->requeues += qcpu->requeues;
0352         qstats->overlimits += qcpu->overlimits;
0353     }
0354 }
0355 
0356 void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
0357               const struct gnet_stats_queue __percpu *cpu,
0358               const struct gnet_stats_queue *q)
0359 {
0360     if (cpu) {
0361         gnet_stats_add_queue_cpu(qstats, cpu);
0362     } else {
0363         qstats->qlen += q->qlen;
0364         qstats->backlog += q->backlog;
0365         qstats->drops += q->drops;
0366         qstats->requeues += q->requeues;
0367         qstats->overlimits += q->overlimits;
0368     }
0369 }
0370 EXPORT_SYMBOL(gnet_stats_add_queue);
0371 
0372 /**
0373  * gnet_stats_copy_queue - copy queue statistics into statistics TLV
0374  * @d: dumping handle
0375  * @cpu_q: per cpu queue statistics
0376  * @q: queue statistics
0377  * @qlen: queue length statistics
0378  *
0379  * Appends the queue statistics to the top level TLV created by
0380  * gnet_stats_start_copy(). Using per cpu queue statistics if
0381  * they are available.
0382  *
0383  * Returns 0 on success or -1 with the statistic lock released
0384  * if the room in the socket buffer was not sufficient.
0385  */
0386 int
0387 gnet_stats_copy_queue(struct gnet_dump *d,
0388               struct gnet_stats_queue __percpu *cpu_q,
0389               struct gnet_stats_queue *q, __u32 qlen)
0390 {
0391     struct gnet_stats_queue qstats = {0};
0392 
0393     gnet_stats_add_queue(&qstats, cpu_q, q);
0394     qstats.qlen = qlen;
0395 
0396     if (d->compat_tc_stats) {
0397         d->tc_stats.drops = qstats.drops;
0398         d->tc_stats.qlen = qstats.qlen;
0399         d->tc_stats.backlog = qstats.backlog;
0400         d->tc_stats.overlimits = qstats.overlimits;
0401     }
0402 
0403     if (d->tail)
0404         return gnet_stats_copy(d, TCA_STATS_QUEUE,
0405                        &qstats, sizeof(qstats),
0406                        TCA_STATS_PAD);
0407 
0408     return 0;
0409 }
0410 EXPORT_SYMBOL(gnet_stats_copy_queue);
0411 
0412 /**
0413  * gnet_stats_copy_app - copy application specific statistics into statistics TLV
0414  * @d: dumping handle
0415  * @st: application specific statistics data
0416  * @len: length of data
0417  *
0418  * Appends the application specific statistics to the top level TLV created by
0419  * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
0420  * handle is in backward compatibility mode.
0421  *
0422  * Returns 0 on success or -1 with the statistic lock released
0423  * if the room in the socket buffer was not sufficient.
0424  */
0425 int
0426 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
0427 {
0428     if (d->compat_xstats) {
0429         d->xstats = kmemdup(st, len, GFP_ATOMIC);
0430         if (!d->xstats)
0431             goto err_out;
0432         d->xstats_len = len;
0433     }
0434 
0435     if (d->tail)
0436         return gnet_stats_copy(d, TCA_STATS_APP, st, len,
0437                        TCA_STATS_PAD);
0438 
0439     return 0;
0440 
0441 err_out:
0442     if (d->lock)
0443         spin_unlock_bh(d->lock);
0444     d->xstats_len = 0;
0445     return -1;
0446 }
0447 EXPORT_SYMBOL(gnet_stats_copy_app);
0448 
0449 /**
0450  * gnet_stats_finish_copy - finish dumping procedure
0451  * @d: dumping handle
0452  *
0453  * Corrects the length of the top level TLV to include all TLVs added
0454  * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
0455  * if gnet_stats_start_copy_compat() was used and releases the statistics
0456  * lock.
0457  *
0458  * Returns 0 on success or -1 with the statistic lock released
0459  * if the room in the socket buffer was not sufficient.
0460  */
0461 int
0462 gnet_stats_finish_copy(struct gnet_dump *d)
0463 {
0464     if (d->tail)
0465         d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
0466 
0467     if (d->compat_tc_stats)
0468         if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
0469                     sizeof(d->tc_stats), d->padattr) < 0)
0470             return -1;
0471 
0472     if (d->compat_xstats && d->xstats) {
0473         if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
0474                     d->xstats_len, d->padattr) < 0)
0475             return -1;
0476     }
0477 
0478     if (d->lock)
0479         spin_unlock_bh(d->lock);
0480     kfree(d->xstats);
0481     d->xstats = NULL;
0482     d->xstats_len = 0;
0483     return 0;
0484 }
0485 EXPORT_SYMBOL(gnet_stats_finish_copy);