Back to home page

LXR

 
 

    


0001 #include <linux/export.h>
0002 #include <linux/lockref.h>
0003 
0004 #if USE_CMPXCHG_LOCKREF
0005 
0006 /*
0007  * Note that the "cmpxchg()" reloads the "old" value for the
0008  * failure case.
0009  */
0010 #define CMPXCHG_LOOP(CODE, SUCCESS) do {                    \
0011     struct lockref old;                         \
0012     BUILD_BUG_ON(sizeof(old) != 8);                     \
0013     old.lock_count = READ_ONCE(lockref->lock_count);            \
0014     while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
0015         struct lockref new = old, prev = old;               \
0016         CODE                                \
0017         old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,    \
0018                            old.lock_count,      \
0019                            new.lock_count);     \
0020         if (likely(old.lock_count == prev.lock_count)) {        \
0021             SUCCESS;                        \
0022         }                               \
0023         cpu_relax();                            \
0024     }                                   \
0025 } while (0)
0026 
0027 #else
0028 
0029 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
0030 
0031 #endif
0032 
0033 /**
0034  * lockref_get - Increments reference count unconditionally
0035  * @lockref: pointer to lockref structure
0036  *
0037  * This operation is only valid if you already hold a reference
0038  * to the object, so you know the count cannot be zero.
0039  */
0040 void lockref_get(struct lockref *lockref)
0041 {
0042     CMPXCHG_LOOP(
0043         new.count++;
0044     ,
0045         return;
0046     );
0047 
0048     spin_lock(&lockref->lock);
0049     lockref->count++;
0050     spin_unlock(&lockref->lock);
0051 }
0052 EXPORT_SYMBOL(lockref_get);
0053 
0054 /**
0055  * lockref_get_not_zero - Increments count unless the count is 0 or dead
0056  * @lockref: pointer to lockref structure
0057  * Return: 1 if count updated successfully or 0 if count was zero
0058  */
0059 int lockref_get_not_zero(struct lockref *lockref)
0060 {
0061     int retval;
0062 
0063     CMPXCHG_LOOP(
0064         new.count++;
0065         if (old.count <= 0)
0066             return 0;
0067     ,
0068         return 1;
0069     );
0070 
0071     spin_lock(&lockref->lock);
0072     retval = 0;
0073     if (lockref->count > 0) {
0074         lockref->count++;
0075         retval = 1;
0076     }
0077     spin_unlock(&lockref->lock);
0078     return retval;
0079 }
0080 EXPORT_SYMBOL(lockref_get_not_zero);
0081 
0082 /**
0083  * lockref_get_or_lock - Increments count unless the count is 0 or dead
0084  * @lockref: pointer to lockref structure
0085  * Return: 1 if count updated successfully or 0 if count was zero
0086  * and we got the lock instead.
0087  */
0088 int lockref_get_or_lock(struct lockref *lockref)
0089 {
0090     CMPXCHG_LOOP(
0091         new.count++;
0092         if (old.count <= 0)
0093             break;
0094     ,
0095         return 1;
0096     );
0097 
0098     spin_lock(&lockref->lock);
0099     if (lockref->count <= 0)
0100         return 0;
0101     lockref->count++;
0102     spin_unlock(&lockref->lock);
0103     return 1;
0104 }
0105 EXPORT_SYMBOL(lockref_get_or_lock);
0106 
0107 /**
0108  * lockref_put_return - Decrement reference count if possible
0109  * @lockref: pointer to lockref structure
0110  *
0111  * Decrement the reference count and return the new value.
0112  * If the lockref was dead or locked, return an error.
0113  */
0114 int lockref_put_return(struct lockref *lockref)
0115 {
0116     CMPXCHG_LOOP(
0117         new.count--;
0118         if (old.count <= 0)
0119             return -1;
0120     ,
0121         return new.count;
0122     );
0123     return -1;
0124 }
0125 EXPORT_SYMBOL(lockref_put_return);
0126 
0127 /**
0128  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
0129  * @lockref: pointer to lockref structure
0130  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
0131  */
0132 int lockref_put_or_lock(struct lockref *lockref)
0133 {
0134     CMPXCHG_LOOP(
0135         new.count--;
0136         if (old.count <= 1)
0137             break;
0138     ,
0139         return 1;
0140     );
0141 
0142     spin_lock(&lockref->lock);
0143     if (lockref->count <= 1)
0144         return 0;
0145     lockref->count--;
0146     spin_unlock(&lockref->lock);
0147     return 1;
0148 }
0149 EXPORT_SYMBOL(lockref_put_or_lock);
0150 
0151 /**
0152  * lockref_mark_dead - mark lockref dead
0153  * @lockref: pointer to lockref structure
0154  */
0155 void lockref_mark_dead(struct lockref *lockref)
0156 {
0157     assert_spin_locked(&lockref->lock);
0158     lockref->count = -128;
0159 }
0160 EXPORT_SYMBOL(lockref_mark_dead);
0161 
0162 /**
0163  * lockref_get_not_dead - Increments count unless the ref is dead
0164  * @lockref: pointer to lockref structure
0165  * Return: 1 if count updated successfully or 0 if lockref was dead
0166  */
0167 int lockref_get_not_dead(struct lockref *lockref)
0168 {
0169     int retval;
0170 
0171     CMPXCHG_LOOP(
0172         new.count++;
0173         if (old.count < 0)
0174             return 0;
0175     ,
0176         return 1;
0177     );
0178 
0179     spin_lock(&lockref->lock);
0180     retval = 0;
0181     if (lockref->count >= 0) {
0182         lockref->count++;
0183         retval = 1;
0184     }
0185     spin_unlock(&lockref->lock);
0186     return retval;
0187 }
0188 EXPORT_SYMBOL(lockref_get_not_dead);