Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/export.h>
0003 #include <linux/lockref.h>
0004 
0005 #if USE_CMPXCHG_LOCKREF
0006 
0007 /*
0008  * Note that the "cmpxchg()" reloads the "old" value for the
0009  * failure case.
0010  */
0011 #define CMPXCHG_LOOP(CODE, SUCCESS) do {                    \
0012     int retry = 100;                            \
0013     struct lockref old;                         \
0014     BUILD_BUG_ON(sizeof(old) != 8);                     \
0015     old.lock_count = READ_ONCE(lockref->lock_count);            \
0016     while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
0017         struct lockref new = old;                   \
0018         CODE                                \
0019         if (likely(try_cmpxchg64_relaxed(&lockref->lock_count,      \
0020                          &old.lock_count,       \
0021                          new.lock_count))) {        \
0022             SUCCESS;                        \
0023         }                               \
0024         if (!--retry)                           \
0025             break;                          \
0026         cpu_relax();                            \
0027     }                                   \
0028 } while (0)
0029 
0030 #else
0031 
0032 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
0033 
0034 #endif
0035 
0036 /**
0037  * lockref_get - Increments reference count unconditionally
0038  * @lockref: pointer to lockref structure
0039  *
0040  * This operation is only valid if you already hold a reference
0041  * to the object, so you know the count cannot be zero.
0042  */
0043 void lockref_get(struct lockref *lockref)
0044 {
0045     CMPXCHG_LOOP(
0046         new.count++;
0047     ,
0048         return;
0049     );
0050 
0051     spin_lock(&lockref->lock);
0052     lockref->count++;
0053     spin_unlock(&lockref->lock);
0054 }
0055 EXPORT_SYMBOL(lockref_get);
0056 
0057 /**
0058  * lockref_get_not_zero - Increments count unless the count is 0 or dead
0059  * @lockref: pointer to lockref structure
0060  * Return: 1 if count updated successfully or 0 if count was zero
0061  */
0062 int lockref_get_not_zero(struct lockref *lockref)
0063 {
0064     int retval;
0065 
0066     CMPXCHG_LOOP(
0067         new.count++;
0068         if (old.count <= 0)
0069             return 0;
0070     ,
0071         return 1;
0072     );
0073 
0074     spin_lock(&lockref->lock);
0075     retval = 0;
0076     if (lockref->count > 0) {
0077         lockref->count++;
0078         retval = 1;
0079     }
0080     spin_unlock(&lockref->lock);
0081     return retval;
0082 }
0083 EXPORT_SYMBOL(lockref_get_not_zero);
0084 
0085 /**
0086  * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
0087  * @lockref: pointer to lockref structure
0088  * Return: 1 if count updated successfully or 0 if count would become zero
0089  */
0090 int lockref_put_not_zero(struct lockref *lockref)
0091 {
0092     int retval;
0093 
0094     CMPXCHG_LOOP(
0095         new.count--;
0096         if (old.count <= 1)
0097             return 0;
0098     ,
0099         return 1;
0100     );
0101 
0102     spin_lock(&lockref->lock);
0103     retval = 0;
0104     if (lockref->count > 1) {
0105         lockref->count--;
0106         retval = 1;
0107     }
0108     spin_unlock(&lockref->lock);
0109     return retval;
0110 }
0111 EXPORT_SYMBOL(lockref_put_not_zero);
0112 
0113 /**
0114  * lockref_put_return - Decrement reference count if possible
0115  * @lockref: pointer to lockref structure
0116  *
0117  * Decrement the reference count and return the new value.
0118  * If the lockref was dead or locked, return an error.
0119  */
0120 int lockref_put_return(struct lockref *lockref)
0121 {
0122     CMPXCHG_LOOP(
0123         new.count--;
0124         if (old.count <= 0)
0125             return -1;
0126     ,
0127         return new.count;
0128     );
0129     return -1;
0130 }
0131 EXPORT_SYMBOL(lockref_put_return);
0132 
0133 /**
0134  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
0135  * @lockref: pointer to lockref structure
0136  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
0137  */
0138 int lockref_put_or_lock(struct lockref *lockref)
0139 {
0140     CMPXCHG_LOOP(
0141         new.count--;
0142         if (old.count <= 1)
0143             break;
0144     ,
0145         return 1;
0146     );
0147 
0148     spin_lock(&lockref->lock);
0149     if (lockref->count <= 1)
0150         return 0;
0151     lockref->count--;
0152     spin_unlock(&lockref->lock);
0153     return 1;
0154 }
0155 EXPORT_SYMBOL(lockref_put_or_lock);
0156 
0157 /**
0158  * lockref_mark_dead - mark lockref dead
0159  * @lockref: pointer to lockref structure
0160  */
0161 void lockref_mark_dead(struct lockref *lockref)
0162 {
0163     assert_spin_locked(&lockref->lock);
0164     lockref->count = -128;
0165 }
0166 EXPORT_SYMBOL(lockref_mark_dead);
0167 
0168 /**
0169  * lockref_get_not_dead - Increments count unless the ref is dead
0170  * @lockref: pointer to lockref structure
0171  * Return: 1 if count updated successfully or 0 if lockref was dead
0172  */
0173 int lockref_get_not_dead(struct lockref *lockref)
0174 {
0175     int retval;
0176 
0177     CMPXCHG_LOOP(
0178         new.count++;
0179         if (old.count < 0)
0180             return 0;
0181     ,
0182         return 1;
0183     );
0184 
0185     spin_lock(&lockref->lock);
0186     retval = 0;
0187     if (lockref->count >= 0) {
0188         lockref->count++;
0189         retval = 1;
0190     }
0191     spin_unlock(&lockref->lock);
0192     return retval;
0193 }
0194 EXPORT_SYMBOL(lockref_get_not_dead);