Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Out-of-line refcount functions.
0004  */
0005 
0006 #include <linux/mutex.h>
0007 #include <linux/refcount.h>
0008 #include <linux/spinlock.h>
0009 #include <linux/bug.h>
0010 
0011 #define REFCOUNT_WARN(str)  WARN_ONCE(1, "refcount_t: " str ".\n")
0012 
0013 void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
0014 {
0015     refcount_set(r, REFCOUNT_SATURATED);
0016 
0017     switch (t) {
0018     case REFCOUNT_ADD_NOT_ZERO_OVF:
0019         REFCOUNT_WARN("saturated; leaking memory");
0020         break;
0021     case REFCOUNT_ADD_OVF:
0022         REFCOUNT_WARN("saturated; leaking memory");
0023         break;
0024     case REFCOUNT_ADD_UAF:
0025         REFCOUNT_WARN("addition on 0; use-after-free");
0026         break;
0027     case REFCOUNT_SUB_UAF:
0028         REFCOUNT_WARN("underflow; use-after-free");
0029         break;
0030     case REFCOUNT_DEC_LEAK:
0031         REFCOUNT_WARN("decrement hit 0; leaking memory");
0032         break;
0033     default:
0034         REFCOUNT_WARN("unknown saturation event!?");
0035     }
0036 }
0037 EXPORT_SYMBOL(refcount_warn_saturate);
0038 
0039 /**
0040  * refcount_dec_if_one - decrement a refcount if it is 1
0041  * @r: the refcount
0042  *
0043  * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
0044  * success thereof.
0045  *
0046  * Like all decrement operations, it provides release memory order and provides
0047  * a control dependency.
0048  *
0049  * It can be used like a try-delete operator; this explicit case is provided
0050  * and not cmpxchg in generic, because that would allow implementing unsafe
0051  * operations.
0052  *
0053  * Return: true if the resulting refcount is 0, false otherwise
0054  */
0055 bool refcount_dec_if_one(refcount_t *r)
0056 {
0057     int val = 1;
0058 
0059     return atomic_try_cmpxchg_release(&r->refs, &val, 0);
0060 }
0061 EXPORT_SYMBOL(refcount_dec_if_one);
0062 
0063 /**
0064  * refcount_dec_not_one - decrement a refcount if it is not 1
0065  * @r: the refcount
0066  *
0067  * No atomic_t counterpart, it decrements unless the value is 1, in which case
0068  * it will return false.
0069  *
0070  * Was often done like: atomic_add_unless(&var, -1, 1)
0071  *
0072  * Return: true if the decrement operation was successful, false otherwise
0073  */
0074 bool refcount_dec_not_one(refcount_t *r)
0075 {
0076     unsigned int new, val = atomic_read(&r->refs);
0077 
0078     do {
0079         if (unlikely(val == REFCOUNT_SATURATED))
0080             return true;
0081 
0082         if (val == 1)
0083             return false;
0084 
0085         new = val - 1;
0086         if (new > val) {
0087             WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
0088             return true;
0089         }
0090 
0091     } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
0092 
0093     return true;
0094 }
0095 EXPORT_SYMBOL(refcount_dec_not_one);
0096 
0097 /**
0098  * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
0099  *                               refcount to 0
0100  * @r: the refcount
0101  * @lock: the mutex to be locked
0102  *
0103  * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
0104  * to decrement when saturated at REFCOUNT_SATURATED.
0105  *
0106  * Provides release memory ordering, such that prior loads and stores are done
0107  * before, and provides a control dependency such that free() must come after.
0108  * See the comment on top.
0109  *
0110  * Return: true and hold mutex if able to decrement refcount to 0, false
0111  *         otherwise
0112  */
0113 bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
0114 {
0115     if (refcount_dec_not_one(r))
0116         return false;
0117 
0118     mutex_lock(lock);
0119     if (!refcount_dec_and_test(r)) {
0120         mutex_unlock(lock);
0121         return false;
0122     }
0123 
0124     return true;
0125 }
0126 EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
0127 
0128 /**
0129  * refcount_dec_and_lock - return holding spinlock if able to decrement
0130  *                         refcount to 0
0131  * @r: the refcount
0132  * @lock: the spinlock to be locked
0133  *
0134  * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
0135  * decrement when saturated at REFCOUNT_SATURATED.
0136  *
0137  * Provides release memory ordering, such that prior loads and stores are done
0138  * before, and provides a control dependency such that free() must come after.
0139  * See the comment on top.
0140  *
0141  * Return: true and hold spinlock if able to decrement refcount to 0, false
0142  *         otherwise
0143  */
0144 bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
0145 {
0146     if (refcount_dec_not_one(r))
0147         return false;
0148 
0149     spin_lock(lock);
0150     if (!refcount_dec_and_test(r)) {
0151         spin_unlock(lock);
0152         return false;
0153     }
0154 
0155     return true;
0156 }
0157 EXPORT_SYMBOL(refcount_dec_and_lock);
0158 
0159 /**
0160  * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
0161  *                                 interrupts if able to decrement refcount to 0
0162  * @r: the refcount
0163  * @lock: the spinlock to be locked
0164  * @flags: saved IRQ-flags if the is acquired
0165  *
0166  * Same as refcount_dec_and_lock() above except that the spinlock is acquired
0167  * with disabled interrupts.
0168  *
0169  * Return: true and hold spinlock if able to decrement refcount to 0, false
0170  *         otherwise
0171  */
0172 bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
0173                    unsigned long *flags)
0174 {
0175     if (refcount_dec_not_one(r))
0176         return false;
0177 
0178     spin_lock_irqsave(lock, *flags);
0179     if (!refcount_dec_and_test(r)) {
0180         spin_unlock_irqrestore(lock, *flags);
0181         return false;
0182     }
0183 
0184     return true;
0185 }
0186 EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);