Back to home page

LXR

 
 

    


0001 #define pr_fmt(fmt) "%s: " fmt "\n", __func__
0002 
0003 #include <linux/kernel.h>
0004 #include <linux/sched.h>
0005 #include <linux/wait.h>
0006 #include <linux/percpu-refcount.h>
0007 
0008 /*
0009  * Initially, a percpu refcount is just a set of percpu counters. Initially, we
0010  * don't try to detect the ref hitting 0 - which means that get/put can just
0011  * increment or decrement the local counter. Note that the counter on a
0012  * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
0013  * percpu counters will all sum to the correct value
0014  *
0015  * (More precisely: because modular arithmetic is commutative the sum of all the
0016  * percpu_count vars will be equal to what it would have been if all the gets
0017  * and puts were done to a single integer, even if some of the percpu integers
0018  * overflow or underflow).
0019  *
0020  * The real trick to implementing percpu refcounts is shutdown. We can't detect
0021  * the ref hitting 0 on every put - this would require global synchronization
0022  * and defeat the whole purpose of using percpu refs.
0023  *
0024  * What we do is require the user to keep track of the initial refcount; we know
0025  * the ref can't hit 0 before the user drops the initial ref, so as long as we
0026  * convert to non percpu mode before the initial ref is dropped everything
0027  * works.
0028  *
0029  * Converting to non percpu mode is done with some RCUish stuff in
0030  * percpu_ref_kill. Additionally, we need a bias value so that the
0031  * atomic_long_t can't hit 0 before we've added up all the percpu refs.
0032  */
0033 
0034 #define PERCPU_COUNT_BIAS   (1LU << (BITS_PER_LONG - 1))
0035 
0036 static DEFINE_SPINLOCK(percpu_ref_switch_lock);
0037 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
0038 
0039 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
0040 {
0041     return (unsigned long __percpu *)
0042         (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
0043 }
0044 
0045 /**
0046  * percpu_ref_init - initialize a percpu refcount
0047  * @ref: percpu_ref to initialize
0048  * @release: function which will be called when refcount hits 0
0049  * @flags: PERCPU_REF_INIT_* flags
0050  * @gfp: allocation mask to use
0051  *
0052  * Initializes @ref.  If @flags is zero, @ref starts in percpu mode with a
0053  * refcount of 1; analagous to atomic_long_set(ref, 1).  See the
0054  * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
0055  *
0056  * Note that @release must not sleep - it may potentially be called from RCU
0057  * callback context by percpu_ref_kill().
0058  */
0059 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
0060             unsigned int flags, gfp_t gfp)
0061 {
0062     size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
0063                  __alignof__(unsigned long));
0064     unsigned long start_count = 0;
0065 
0066     ref->percpu_count_ptr = (unsigned long)
0067         __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
0068     if (!ref->percpu_count_ptr)
0069         return -ENOMEM;
0070 
0071     ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
0072 
0073     if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
0074         ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
0075     else
0076         start_count += PERCPU_COUNT_BIAS;
0077 
0078     if (flags & PERCPU_REF_INIT_DEAD)
0079         ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
0080     else
0081         start_count++;
0082 
0083     atomic_long_set(&ref->count, start_count);
0084 
0085     ref->release = release;
0086     ref->confirm_switch = NULL;
0087     return 0;
0088 }
0089 EXPORT_SYMBOL_GPL(percpu_ref_init);
0090 
0091 /**
0092  * percpu_ref_exit - undo percpu_ref_init()
0093  * @ref: percpu_ref to exit
0094  *
0095  * This function exits @ref.  The caller is responsible for ensuring that
0096  * @ref is no longer in active use.  The usual places to invoke this
0097  * function from are the @ref->release() callback or in init failure path
0098  * where percpu_ref_init() succeeded but other parts of the initialization
0099  * of the embedding object failed.
0100  */
0101 void percpu_ref_exit(struct percpu_ref *ref)
0102 {
0103     unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
0104 
0105     if (percpu_count) {
0106         /* non-NULL confirm_switch indicates switching in progress */
0107         WARN_ON_ONCE(ref->confirm_switch);
0108         free_percpu(percpu_count);
0109         ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
0110     }
0111 }
0112 EXPORT_SYMBOL_GPL(percpu_ref_exit);
0113 
0114 static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
0115 {
0116     struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
0117 
0118     ref->confirm_switch(ref);
0119     ref->confirm_switch = NULL;
0120     wake_up_all(&percpu_ref_switch_waitq);
0121 
0122     /* drop ref from percpu_ref_switch_to_atomic() */
0123     percpu_ref_put(ref);
0124 }
0125 
0126 static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
0127 {
0128     struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
0129     unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
0130     unsigned long count = 0;
0131     int cpu;
0132 
0133     for_each_possible_cpu(cpu)
0134         count += *per_cpu_ptr(percpu_count, cpu);
0135 
0136     pr_debug("global %ld percpu %ld",
0137          atomic_long_read(&ref->count), (long)count);
0138 
0139     /*
0140      * It's crucial that we sum the percpu counters _before_ adding the sum
0141      * to &ref->count; since gets could be happening on one cpu while puts
0142      * happen on another, adding a single cpu's count could cause
0143      * @ref->count to hit 0 before we've got a consistent value - but the
0144      * sum of all the counts will be consistent and correct.
0145      *
0146      * Subtracting the bias value then has to happen _after_ adding count to
0147      * &ref->count; we need the bias value to prevent &ref->count from
0148      * reaching 0 before we add the percpu counts. But doing it at the same
0149      * time is equivalent and saves us atomic operations:
0150      */
0151     atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
0152 
0153     WARN_ONCE(atomic_long_read(&ref->count) <= 0,
0154           "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
0155           ref->release, atomic_long_read(&ref->count));
0156 
0157     /* @ref is viewed as dead on all CPUs, send out switch confirmation */
0158     percpu_ref_call_confirm_rcu(rcu);
0159 }
0160 
0161 static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
0162 {
0163 }
0164 
0165 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
0166                       percpu_ref_func_t *confirm_switch)
0167 {
0168     if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
0169         if (confirm_switch)
0170             confirm_switch(ref);
0171         return;
0172     }
0173 
0174     /* switching from percpu to atomic */
0175     ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
0176 
0177     /*
0178      * Non-NULL ->confirm_switch is used to indicate that switching is
0179      * in progress.  Use noop one if unspecified.
0180      */
0181     ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
0182 
0183     percpu_ref_get(ref);    /* put after confirmation */
0184     call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
0185 }
0186 
0187 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
0188 {
0189     unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
0190     int cpu;
0191 
0192     BUG_ON(!percpu_count);
0193 
0194     if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
0195         return;
0196 
0197     atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
0198 
0199     /*
0200      * Restore per-cpu operation.  smp_store_release() is paired with
0201      * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
0202      * that the zeroing is visible to all percpu accesses which can see
0203      * the following __PERCPU_REF_ATOMIC clearing.
0204      */
0205     for_each_possible_cpu(cpu)
0206         *per_cpu_ptr(percpu_count, cpu) = 0;
0207 
0208     smp_store_release(&ref->percpu_count_ptr,
0209               ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
0210 }
0211 
0212 static void __percpu_ref_switch_mode(struct percpu_ref *ref,
0213                      percpu_ref_func_t *confirm_switch)
0214 {
0215     lockdep_assert_held(&percpu_ref_switch_lock);
0216 
0217     /*
0218      * If the previous ATOMIC switching hasn't finished yet, wait for
0219      * its completion.  If the caller ensures that ATOMIC switching
0220      * isn't in progress, this function can be called from any context.
0221      */
0222     wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
0223                 percpu_ref_switch_lock);
0224 
0225     if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
0226         __percpu_ref_switch_to_atomic(ref, confirm_switch);
0227     else
0228         __percpu_ref_switch_to_percpu(ref);
0229 }
0230 
0231 /**
0232  * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
0233  * @ref: percpu_ref to switch to atomic mode
0234  * @confirm_switch: optional confirmation callback
0235  *
0236  * There's no reason to use this function for the usual reference counting.
0237  * Use percpu_ref_kill[_and_confirm]().
0238  *
0239  * Schedule switching of @ref to atomic mode.  All its percpu counts will
0240  * be collected to the main atomic counter.  On completion, when all CPUs
0241  * are guaraneed to be in atomic mode, @confirm_switch, which may not
0242  * block, is invoked.  This function may be invoked concurrently with all
0243  * the get/put operations and can safely be mixed with kill and reinit
0244  * operations.  Note that @ref will stay in atomic mode across kill/reinit
0245  * cycles until percpu_ref_switch_to_percpu() is called.
0246  *
0247  * This function may block if @ref is in the process of switching to atomic
0248  * mode.  If the caller ensures that @ref is not in the process of
0249  * switching to atomic mode, this function can be called from any context.
0250  */
0251 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
0252                  percpu_ref_func_t *confirm_switch)
0253 {
0254     unsigned long flags;
0255 
0256     spin_lock_irqsave(&percpu_ref_switch_lock, flags);
0257 
0258     ref->force_atomic = true;
0259     __percpu_ref_switch_mode(ref, confirm_switch);
0260 
0261     spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
0262 }
0263 
0264 /**
0265  * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
0266  * @ref: percpu_ref to switch to percpu mode
0267  *
0268  * There's no reason to use this function for the usual reference counting.
0269  * To re-use an expired ref, use percpu_ref_reinit().
0270  *
0271  * Switch @ref to percpu mode.  This function may be invoked concurrently
0272  * with all the get/put operations and can safely be mixed with kill and
0273  * reinit operations.  This function reverses the sticky atomic state set
0274  * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic().  If @ref is
0275  * dying or dead, the actual switching takes place on the following
0276  * percpu_ref_reinit().
0277  *
0278  * This function may block if @ref is in the process of switching to atomic
0279  * mode.  If the caller ensures that @ref is not in the process of
0280  * switching to atomic mode, this function can be called from any context.
0281  */
0282 void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
0283 {
0284     unsigned long flags;
0285 
0286     spin_lock_irqsave(&percpu_ref_switch_lock, flags);
0287 
0288     ref->force_atomic = false;
0289     __percpu_ref_switch_mode(ref, NULL);
0290 
0291     spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
0292 }
0293 
0294 /**
0295  * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
0296  * @ref: percpu_ref to kill
0297  * @confirm_kill: optional confirmation callback
0298  *
0299  * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
0300  * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be
0301  * called after @ref is seen as dead from all CPUs at which point all
0302  * further invocations of percpu_ref_tryget_live() will fail.  See
0303  * percpu_ref_tryget_live() for details.
0304  *
0305  * This function normally doesn't block and can be called from any context
0306  * but it may block if @confirm_kill is specified and @ref is in the
0307  * process of switching to atomic mode by percpu_ref_switch_to_atomic().
0308  */
0309 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
0310                  percpu_ref_func_t *confirm_kill)
0311 {
0312     unsigned long flags;
0313 
0314     spin_lock_irqsave(&percpu_ref_switch_lock, flags);
0315 
0316     WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
0317           "%s called more than once on %pf!", __func__, ref->release);
0318 
0319     ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
0320     __percpu_ref_switch_mode(ref, confirm_kill);
0321     percpu_ref_put(ref);
0322 
0323     spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
0324 }
0325 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
0326 
0327 /**
0328  * percpu_ref_reinit - re-initialize a percpu refcount
0329  * @ref: perpcu_ref to re-initialize
0330  *
0331  * Re-initialize @ref so that it's in the same state as when it finished
0332  * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD.  @ref must have been
0333  * initialized successfully and reached 0 but not exited.
0334  *
0335  * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
0336  * this function is in progress.
0337  */
0338 void percpu_ref_reinit(struct percpu_ref *ref)
0339 {
0340     unsigned long flags;
0341 
0342     spin_lock_irqsave(&percpu_ref_switch_lock, flags);
0343 
0344     WARN_ON_ONCE(!percpu_ref_is_zero(ref));
0345 
0346     ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
0347     percpu_ref_get(ref);
0348     __percpu_ref_switch_mode(ref, NULL);
0349 
0350     spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
0351 }
0352 EXPORT_SYMBOL_GPL(percpu_ref_reinit);