Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_U64_STATS_SYNC_H
0003 #define _LINUX_U64_STATS_SYNC_H
0004 
0005 /*
0006  * Protect against 64-bit values tearing on 32-bit architectures. This is
0007  * typically used for statistics read/update in different subsystems.
0008  *
0009  * Key points :
0010  *
0011  * -  Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
0012  * -  The whole thing is a no-op on 64-bit architectures.
0013  *
0014  * Usage constraints:
0015  *
0016  * 1) Write side must ensure mutual exclusion, or one seqcount update could
0017  *    be lost, thus blocking readers forever.
0018  *
0019  * 2) Write side must disable preemption, or a seqcount reader can preempt the
0020  *    writer and also spin forever.
0021  *
0022  * 3) Write side must use the _irqsave() variant if other writers, or a reader,
0023  *    can be invoked from an IRQ context.
0024  *
0025  * 4) If reader fetches several counters, there is no guarantee the whole values
0026  *    are consistent w.r.t. each other (remember point #2: seqcounts are not
0027  *    used for 64bit architectures).
0028  *
0029  * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
0030  *    pure reads.
0031  *
0032  * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
0033  *    might be updated from a hardirq or softirq context (remember point #1:
0034  *    seqcounts are not used for UP kernels). 32-bit UP stat readers could read
0035  *    corrupted 64-bit values otherwise.
0036  *
0037  * Usage :
0038  *
0039  * Stats producer (writer) should use following template granted it already got
0040  * an exclusive access to counters (a lock is already taken, or per cpu
0041  * data is used [in a non preemptable context])
0042  *
0043  *   spin_lock_bh(...) or other synchronization to get exclusive access
0044  *   ...
0045  *   u64_stats_update_begin(&stats->syncp);
0046  *   u64_stats_add(&stats->bytes64, len); // non atomic operation
0047  *   u64_stats_inc(&stats->packets64);    // non atomic operation
0048  *   u64_stats_update_end(&stats->syncp);
0049  *
0050  * While a consumer (reader) should use following template to get consistent
0051  * snapshot for each variable (but no guarantee on several ones)
0052  *
0053  * u64 tbytes, tpackets;
0054  * unsigned int start;
0055  *
0056  * do {
0057  *         start = u64_stats_fetch_begin(&stats->syncp);
0058  *         tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
0059  *         tpackets = u64_stats_read(&stats->packets64); // non atomic operation
0060  * } while (u64_stats_fetch_retry(&stats->syncp, start));
0061  *
0062  *
0063  * Example of use in drivers/net/loopback.c, using per_cpu containers,
0064  * in BH disabled context.
0065  */
0066 #include <linux/seqlock.h>
0067 
0068 struct u64_stats_sync {
0069 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0070     seqcount_t  seq;
0071 #endif
0072 };
0073 
0074 #if BITS_PER_LONG == 64
0075 #include <asm/local64.h>
0076 
0077 typedef struct {
0078     local64_t   v;
0079 } u64_stats_t ;
0080 
0081 static inline u64 u64_stats_read(const u64_stats_t *p)
0082 {
0083     return local64_read(&p->v);
0084 }
0085 
0086 static inline void u64_stats_set(u64_stats_t *p, u64 val)
0087 {
0088     local64_set(&p->v, val);
0089 }
0090 
0091 static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
0092 {
0093     local64_add(val, &p->v);
0094 }
0095 
0096 static inline void u64_stats_inc(u64_stats_t *p)
0097 {
0098     local64_inc(&p->v);
0099 }
0100 
0101 #else
0102 
0103 typedef struct {
0104     u64     v;
0105 } u64_stats_t;
0106 
0107 static inline u64 u64_stats_read(const u64_stats_t *p)
0108 {
0109     return p->v;
0110 }
0111 
0112 static inline void u64_stats_set(u64_stats_t *p, u64 val)
0113 {
0114     p->v = val;
0115 }
0116 
0117 static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
0118 {
0119     p->v += val;
0120 }
0121 
0122 static inline void u64_stats_inc(u64_stats_t *p)
0123 {
0124     p->v++;
0125 }
0126 #endif
0127 
0128 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0129 #define u64_stats_init(syncp)   seqcount_init(&(syncp)->seq)
0130 #else
0131 static inline void u64_stats_init(struct u64_stats_sync *syncp)
0132 {
0133 }
0134 #endif
0135 
0136 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
0137 {
0138 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0139     if (IS_ENABLED(CONFIG_PREEMPT_RT))
0140         preempt_disable();
0141     write_seqcount_begin(&syncp->seq);
0142 #endif
0143 }
0144 
0145 static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
0146 {
0147 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0148     write_seqcount_end(&syncp->seq);
0149     if (IS_ENABLED(CONFIG_PREEMPT_RT))
0150         preempt_enable();
0151 #endif
0152 }
0153 
0154 static inline unsigned long
0155 u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
0156 {
0157     unsigned long flags = 0;
0158 
0159 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0160     if (IS_ENABLED(CONFIG_PREEMPT_RT))
0161         preempt_disable();
0162     else
0163         local_irq_save(flags);
0164     write_seqcount_begin(&syncp->seq);
0165 #endif
0166     return flags;
0167 }
0168 
0169 static inline void
0170 u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
0171                 unsigned long flags)
0172 {
0173 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0174     write_seqcount_end(&syncp->seq);
0175     if (IS_ENABLED(CONFIG_PREEMPT_RT))
0176         preempt_enable();
0177     else
0178         local_irq_restore(flags);
0179 #endif
0180 }
0181 
0182 static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
0183 {
0184 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0185     return read_seqcount_begin(&syncp->seq);
0186 #else
0187     return 0;
0188 #endif
0189 }
0190 
0191 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
0192 {
0193 #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
0194     preempt_disable();
0195 #endif
0196     return __u64_stats_fetch_begin(syncp);
0197 }
0198 
0199 static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
0200                      unsigned int start)
0201 {
0202 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0203     return read_seqcount_retry(&syncp->seq, start);
0204 #else
0205     return false;
0206 #endif
0207 }
0208 
0209 static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
0210                      unsigned int start)
0211 {
0212 #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
0213     preempt_enable();
0214 #endif
0215     return __u64_stats_fetch_retry(syncp, start);
0216 }
0217 
0218 /*
0219  * In case irq handlers can update u64 counters, readers can use following helpers
0220  * - SMP 32bit arches use seqcount protection, irq safe.
0221  * - UP 32bit must disable irqs.
0222  * - 64bit have no problem atomically reading u64 values, irq safe.
0223  */
0224 static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
0225 {
0226 #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
0227     preempt_disable();
0228 #elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
0229     local_irq_disable();
0230 #endif
0231     return __u64_stats_fetch_begin(syncp);
0232 }
0233 
0234 static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
0235                          unsigned int start)
0236 {
0237 #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
0238     preempt_enable();
0239 #elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
0240     local_irq_enable();
0241 #endif
0242     return __u64_stats_fetch_retry(syncp, start);
0243 }
0244 
0245 #endif /* _LINUX_U64_STATS_SYNC_H */