0001
0002 #ifndef _LINUX_U64_STATS_SYNC_H
0003 #define _LINUX_U64_STATS_SYNC_H
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 #include <linux/seqlock.h>
0067
0068 struct u64_stats_sync {
0069 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0070 seqcount_t seq;
0071 #endif
0072 };
0073
0074 #if BITS_PER_LONG == 64
0075 #include <asm/local64.h>
0076
0077 typedef struct {
0078 local64_t v;
0079 } u64_stats_t ;
0080
0081 static inline u64 u64_stats_read(const u64_stats_t *p)
0082 {
0083 return local64_read(&p->v);
0084 }
0085
0086 static inline void u64_stats_set(u64_stats_t *p, u64 val)
0087 {
0088 local64_set(&p->v, val);
0089 }
0090
0091 static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
0092 {
0093 local64_add(val, &p->v);
0094 }
0095
0096 static inline void u64_stats_inc(u64_stats_t *p)
0097 {
0098 local64_inc(&p->v);
0099 }
0100
0101 #else
0102
0103 typedef struct {
0104 u64 v;
0105 } u64_stats_t;
0106
0107 static inline u64 u64_stats_read(const u64_stats_t *p)
0108 {
0109 return p->v;
0110 }
0111
0112 static inline void u64_stats_set(u64_stats_t *p, u64 val)
0113 {
0114 p->v = val;
0115 }
0116
0117 static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
0118 {
0119 p->v += val;
0120 }
0121
0122 static inline void u64_stats_inc(u64_stats_t *p)
0123 {
0124 p->v++;
0125 }
0126 #endif
0127
0128 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0129 #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
0130 #else
0131 static inline void u64_stats_init(struct u64_stats_sync *syncp)
0132 {
0133 }
0134 #endif
0135
0136 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
0137 {
0138 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0139 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0140 preempt_disable();
0141 write_seqcount_begin(&syncp->seq);
0142 #endif
0143 }
0144
0145 static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
0146 {
0147 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0148 write_seqcount_end(&syncp->seq);
0149 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0150 preempt_enable();
0151 #endif
0152 }
0153
0154 static inline unsigned long
0155 u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
0156 {
0157 unsigned long flags = 0;
0158
0159 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0160 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0161 preempt_disable();
0162 else
0163 local_irq_save(flags);
0164 write_seqcount_begin(&syncp->seq);
0165 #endif
0166 return flags;
0167 }
0168
0169 static inline void
0170 u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
0171 unsigned long flags)
0172 {
0173 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0174 write_seqcount_end(&syncp->seq);
0175 if (IS_ENABLED(CONFIG_PREEMPT_RT))
0176 preempt_enable();
0177 else
0178 local_irq_restore(flags);
0179 #endif
0180 }
0181
0182 static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
0183 {
0184 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0185 return read_seqcount_begin(&syncp->seq);
0186 #else
0187 return 0;
0188 #endif
0189 }
0190
0191 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
0192 {
0193 #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
0194 preempt_disable();
0195 #endif
0196 return __u64_stats_fetch_begin(syncp);
0197 }
0198
0199 static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
0200 unsigned int start)
0201 {
0202 #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
0203 return read_seqcount_retry(&syncp->seq, start);
0204 #else
0205 return false;
0206 #endif
0207 }
0208
0209 static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
0210 unsigned int start)
0211 {
0212 #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
0213 preempt_enable();
0214 #endif
0215 return __u64_stats_fetch_retry(syncp, start);
0216 }
0217
0218
0219
0220
0221
0222
0223
0224 static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
0225 {
0226 #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
0227 preempt_disable();
0228 #elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
0229 local_irq_disable();
0230 #endif
0231 return __u64_stats_fetch_begin(syncp);
0232 }
0233
0234 static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
0235 unsigned int start)
0236 {
0237 #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
0238 preempt_enable();
0239 #elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
0240 local_irq_enable();
0241 #endif
0242 return __u64_stats_fetch_retry(syncp, start);
0243 }
0244
0245 #endif