Back to home page

LXR

 
 

    


0001 /*
0002  * Fast batching percpu counters.
0003  */
0004 
0005 #include <linux/percpu_counter.h>
0006 #include <linux/notifier.h>
0007 #include <linux/mutex.h>
0008 #include <linux/init.h>
0009 #include <linux/cpu.h>
0010 #include <linux/module.h>
0011 #include <linux/debugobjects.h>
0012 
0013 #ifdef CONFIG_HOTPLUG_CPU
0014 static LIST_HEAD(percpu_counters);
0015 static DEFINE_SPINLOCK(percpu_counters_lock);
0016 #endif
0017 
0018 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
0019 
0020 static struct debug_obj_descr percpu_counter_debug_descr;
0021 
0022 static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
0023 {
0024     struct percpu_counter *fbc = addr;
0025 
0026     switch (state) {
0027     case ODEBUG_STATE_ACTIVE:
0028         percpu_counter_destroy(fbc);
0029         debug_object_free(fbc, &percpu_counter_debug_descr);
0030         return true;
0031     default:
0032         return false;
0033     }
0034 }
0035 
0036 static struct debug_obj_descr percpu_counter_debug_descr = {
0037     .name       = "percpu_counter",
0038     .fixup_free = percpu_counter_fixup_free,
0039 };
0040 
0041 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
0042 {
0043     debug_object_init(fbc, &percpu_counter_debug_descr);
0044     debug_object_activate(fbc, &percpu_counter_debug_descr);
0045 }
0046 
0047 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
0048 {
0049     debug_object_deactivate(fbc, &percpu_counter_debug_descr);
0050     debug_object_free(fbc, &percpu_counter_debug_descr);
0051 }
0052 
0053 #else   /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
0054 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
0055 { }
0056 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
0057 { }
0058 #endif  /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
0059 
0060 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
0061 {
0062     int cpu;
0063     unsigned long flags;
0064 
0065     raw_spin_lock_irqsave(&fbc->lock, flags);
0066     for_each_possible_cpu(cpu) {
0067         s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
0068         *pcount = 0;
0069     }
0070     fbc->count = amount;
0071     raw_spin_unlock_irqrestore(&fbc->lock, flags);
0072 }
0073 EXPORT_SYMBOL(percpu_counter_set);
0074 
0075 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
0076 {
0077     s64 count;
0078 
0079     preempt_disable();
0080     count = __this_cpu_read(*fbc->counters) + amount;
0081     if (count >= batch || count <= -batch) {
0082         unsigned long flags;
0083         raw_spin_lock_irqsave(&fbc->lock, flags);
0084         fbc->count += count;
0085         __this_cpu_sub(*fbc->counters, count - amount);
0086         raw_spin_unlock_irqrestore(&fbc->lock, flags);
0087     } else {
0088         this_cpu_add(*fbc->counters, amount);
0089     }
0090     preempt_enable();
0091 }
0092 EXPORT_SYMBOL(__percpu_counter_add);
0093 
0094 /*
0095  * Add up all the per-cpu counts, return the result.  This is a more accurate
0096  * but much slower version of percpu_counter_read_positive()
0097  */
0098 s64 __percpu_counter_sum(struct percpu_counter *fbc)
0099 {
0100     s64 ret;
0101     int cpu;
0102     unsigned long flags;
0103 
0104     raw_spin_lock_irqsave(&fbc->lock, flags);
0105     ret = fbc->count;
0106     for_each_online_cpu(cpu) {
0107         s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
0108         ret += *pcount;
0109     }
0110     raw_spin_unlock_irqrestore(&fbc->lock, flags);
0111     return ret;
0112 }
0113 EXPORT_SYMBOL(__percpu_counter_sum);
0114 
0115 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
0116               struct lock_class_key *key)
0117 {
0118     unsigned long flags __maybe_unused;
0119 
0120     raw_spin_lock_init(&fbc->lock);
0121     lockdep_set_class(&fbc->lock, key);
0122     fbc->count = amount;
0123     fbc->counters = alloc_percpu_gfp(s32, gfp);
0124     if (!fbc->counters)
0125         return -ENOMEM;
0126 
0127     debug_percpu_counter_activate(fbc);
0128 
0129 #ifdef CONFIG_HOTPLUG_CPU
0130     INIT_LIST_HEAD(&fbc->list);
0131     spin_lock_irqsave(&percpu_counters_lock, flags);
0132     list_add(&fbc->list, &percpu_counters);
0133     spin_unlock_irqrestore(&percpu_counters_lock, flags);
0134 #endif
0135     return 0;
0136 }
0137 EXPORT_SYMBOL(__percpu_counter_init);
0138 
0139 void percpu_counter_destroy(struct percpu_counter *fbc)
0140 {
0141     unsigned long flags __maybe_unused;
0142 
0143     if (!fbc->counters)
0144         return;
0145 
0146     debug_percpu_counter_deactivate(fbc);
0147 
0148 #ifdef CONFIG_HOTPLUG_CPU
0149     spin_lock_irqsave(&percpu_counters_lock, flags);
0150     list_del(&fbc->list);
0151     spin_unlock_irqrestore(&percpu_counters_lock, flags);
0152 #endif
0153     free_percpu(fbc->counters);
0154     fbc->counters = NULL;
0155 }
0156 EXPORT_SYMBOL(percpu_counter_destroy);
0157 
0158 int percpu_counter_batch __read_mostly = 32;
0159 EXPORT_SYMBOL(percpu_counter_batch);
0160 
0161 static int compute_batch_value(unsigned int cpu)
0162 {
0163     int nr = num_online_cpus();
0164 
0165     percpu_counter_batch = max(32, nr*2);
0166     return 0;
0167 }
0168 
0169 static int percpu_counter_cpu_dead(unsigned int cpu)
0170 {
0171 #ifdef CONFIG_HOTPLUG_CPU
0172     struct percpu_counter *fbc;
0173 
0174     compute_batch_value(cpu);
0175 
0176     spin_lock_irq(&percpu_counters_lock);
0177     list_for_each_entry(fbc, &percpu_counters, list) {
0178         s32 *pcount;
0179         unsigned long flags;
0180 
0181         raw_spin_lock_irqsave(&fbc->lock, flags);
0182         pcount = per_cpu_ptr(fbc->counters, cpu);
0183         fbc->count += *pcount;
0184         *pcount = 0;
0185         raw_spin_unlock_irqrestore(&fbc->lock, flags);
0186     }
0187     spin_unlock_irq(&percpu_counters_lock);
0188 #endif
0189     return 0;
0190 }
0191 
0192 /*
0193  * Compare counter against given value.
0194  * Return 1 if greater, 0 if equal and -1 if less
0195  */
0196 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
0197 {
0198     s64 count;
0199 
0200     count = percpu_counter_read(fbc);
0201     /* Check to see if rough count will be sufficient for comparison */
0202     if (abs(count - rhs) > (batch * num_online_cpus())) {
0203         if (count > rhs)
0204             return 1;
0205         else
0206             return -1;
0207     }
0208     /* Need to use precise count */
0209     count = percpu_counter_sum(fbc);
0210     if (count > rhs)
0211         return 1;
0212     else if (count < rhs)
0213         return -1;
0214     else
0215         return 0;
0216 }
0217 EXPORT_SYMBOL(__percpu_counter_compare);
0218 
0219 static int __init percpu_counter_startup(void)
0220 {
0221     int ret;
0222 
0223     ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
0224                 compute_batch_value, NULL);
0225     WARN_ON(ret < 0);
0226     ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
0227                     "lib/percpu_cnt:dead", NULL,
0228                     percpu_counter_cpu_dead);
0229     WARN_ON(ret < 0);
0230     return 0;
0231 }
0232 module_init(percpu_counter_startup);