Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_PERCPU_COUNTER_H
0003 #define _LINUX_PERCPU_COUNTER_H
0004 /*
0005  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
0006  *
0007  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
0008  */
0009 
0010 #include <linux/spinlock.h>
0011 #include <linux/smp.h>
0012 #include <linux/list.h>
0013 #include <linux/threads.h>
0014 #include <linux/percpu.h>
0015 #include <linux/types.h>
0016 #include <linux/gfp.h>
0017 
0018 #ifdef CONFIG_SMP
0019 
0020 struct percpu_counter {
0021     raw_spinlock_t lock;
0022     s64 count;
0023 #ifdef CONFIG_HOTPLUG_CPU
0024     struct list_head list;  /* All percpu_counters are on a list */
0025 #endif
0026     s32 __percpu *counters;
0027 };
0028 
0029 extern int percpu_counter_batch;
0030 
0031 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
0032               struct lock_class_key *key);
0033 
0034 #define percpu_counter_init(fbc, value, gfp)                \
0035     ({                              \
0036         static struct lock_class_key __key;         \
0037                                     \
0038         __percpu_counter_init(fbc, value, gfp, &__key);     \
0039     })
0040 
0041 void percpu_counter_destroy(struct percpu_counter *fbc);
0042 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
0043 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
0044                   s32 batch);
0045 s64 __percpu_counter_sum(struct percpu_counter *fbc);
0046 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
0047 void percpu_counter_sync(struct percpu_counter *fbc);
0048 
0049 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
0050 {
0051     return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
0052 }
0053 
0054 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
0055 {
0056     percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
0057 }
0058 
0059 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
0060 {
0061     s64 ret = __percpu_counter_sum(fbc);
0062     return ret < 0 ? 0 : ret;
0063 }
0064 
0065 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
0066 {
0067     return __percpu_counter_sum(fbc);
0068 }
0069 
0070 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
0071 {
0072     return fbc->count;
0073 }
0074 
0075 /*
0076  * It is possible for the percpu_counter_read() to return a small negative
0077  * number for some counter which should never be negative.
0078  *
0079  */
0080 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
0081 {
0082     /* Prevent reloads of fbc->count */
0083     s64 ret = READ_ONCE(fbc->count);
0084 
0085     if (ret >= 0)
0086         return ret;
0087     return 0;
0088 }
0089 
0090 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
0091 {
0092     return (fbc->counters != NULL);
0093 }
0094 
0095 #else /* !CONFIG_SMP */
0096 
0097 struct percpu_counter {
0098     s64 count;
0099 };
0100 
0101 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
0102                       gfp_t gfp)
0103 {
0104     fbc->count = amount;
0105     return 0;
0106 }
0107 
0108 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
0109 {
0110 }
0111 
0112 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
0113 {
0114     fbc->count = amount;
0115 }
0116 
0117 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
0118 {
0119     if (fbc->count > rhs)
0120         return 1;
0121     else if (fbc->count < rhs)
0122         return -1;
0123     else
0124         return 0;
0125 }
0126 
0127 static inline int
0128 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
0129 {
0130     return percpu_counter_compare(fbc, rhs);
0131 }
0132 
0133 static inline void
0134 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
0135 {
0136     preempt_disable();
0137     fbc->count += amount;
0138     preempt_enable();
0139 }
0140 
0141 static inline void
0142 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
0143 {
0144     percpu_counter_add(fbc, amount);
0145 }
0146 
0147 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
0148 {
0149     return fbc->count;
0150 }
0151 
0152 /*
0153  * percpu_counter is intended to track positive numbers. In the UP case the
0154  * number should never be negative.
0155  */
0156 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
0157 {
0158     return fbc->count;
0159 }
0160 
0161 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
0162 {
0163     return percpu_counter_read_positive(fbc);
0164 }
0165 
0166 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
0167 {
0168     return percpu_counter_read(fbc);
0169 }
0170 
0171 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
0172 {
0173     return true;
0174 }
0175 
0176 static inline void percpu_counter_sync(struct percpu_counter *fbc)
0177 {
0178 }
0179 #endif  /* CONFIG_SMP */
0180 
0181 static inline void percpu_counter_inc(struct percpu_counter *fbc)
0182 {
0183     percpu_counter_add(fbc, 1);
0184 }
0185 
0186 static inline void percpu_counter_dec(struct percpu_counter *fbc)
0187 {
0188     percpu_counter_add(fbc, -1);
0189 }
0190 
0191 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
0192 {
0193     percpu_counter_add(fbc, -amount);
0194 }
0195 
0196 #endif /* _LINUX_PERCPU_COUNTER_H */