Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * Queued spinlock
0004  *
0005  * A 'generic' spinlock implementation that is based on MCS locks. For an
0006  * architecture that's looking for a 'generic' spinlock, please first consider
0007  * ticket-lock.h and only come looking here when you've considered all the
0008  * constraints below and can show your hardware does actually perform better
0009  * with qspinlock.
0010  *
0011  * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
0012  * weaker than RCtso if you're power), where regular code only expects atomic_t
0013  * to be RCpc.
0014  *
0015  * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
0016  * of atomic operations to behave well together, please audit them carefully to
0017  * ensure they all have forward progress. Many atomic operations may default to
0018  * cmpxchg() loops which will not have good forward progress properties on
0019  * LL/SC architectures.
0020  *
0021  * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
0022  * do. Carefully read the patches that introduced
0023  * queued_fetch_set_pending_acquire().
0024  *
0025  * qspinlock also heavily relies on mixed size atomic operations, in specific
0026  * it requires architectures to have xchg16; something which many LL/SC
0027  * architectures need to implement as a 32bit and+or in order to satisfy the
0028  * forward progress guarantees mentioned above.
0029  *
0030  * Further reading on mixed size atomics that might be relevant:
0031  *
0032  *   http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
0033  *
0034  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
0035  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
0036  *
0037  * Authors: Waiman Long <waiman.long@hpe.com>
0038  */
0039 #ifndef __ASM_GENERIC_QSPINLOCK_H
0040 #define __ASM_GENERIC_QSPINLOCK_H
0041 
0042 #include <asm-generic/qspinlock_types.h>
0043 #include <linux/atomic.h>
0044 
0045 #ifndef queued_spin_is_locked
0046 /**
0047  * queued_spin_is_locked - is the spinlock locked?
0048  * @lock: Pointer to queued spinlock structure
0049  * Return: 1 if it is locked, 0 otherwise
0050  */
0051 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
0052 {
0053     /*
0054      * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
0055      * isn't immediately observable.
0056      */
0057     return atomic_read(&lock->val);
0058 }
0059 #endif
0060 
0061 /**
0062  * queued_spin_value_unlocked - is the spinlock structure unlocked?
0063  * @lock: queued spinlock structure
0064  * Return: 1 if it is unlocked, 0 otherwise
0065  *
0066  * N.B. Whenever there are tasks waiting for the lock, it is considered
0067  *      locked wrt the lockref code to avoid lock stealing by the lockref
0068  *      code and change things underneath the lock. This also allows some
0069  *      optimizations to be applied without conflict with lockref.
0070  */
0071 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
0072 {
0073     return !atomic_read(&lock.val);
0074 }
0075 
0076 /**
0077  * queued_spin_is_contended - check if the lock is contended
0078  * @lock : Pointer to queued spinlock structure
0079  * Return: 1 if lock contended, 0 otherwise
0080  */
0081 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
0082 {
0083     return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
0084 }
0085 /**
0086  * queued_spin_trylock - try to acquire the queued spinlock
0087  * @lock : Pointer to queued spinlock structure
0088  * Return: 1 if lock acquired, 0 if failed
0089  */
0090 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
0091 {
0092     int val = atomic_read(&lock->val);
0093 
0094     if (unlikely(val))
0095         return 0;
0096 
0097     return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
0098 }
0099 
0100 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
0101 
0102 #ifndef queued_spin_lock
0103 /**
0104  * queued_spin_lock - acquire a queued spinlock
0105  * @lock: Pointer to queued spinlock structure
0106  */
0107 static __always_inline void queued_spin_lock(struct qspinlock *lock)
0108 {
0109     int val = 0;
0110 
0111     if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
0112         return;
0113 
0114     queued_spin_lock_slowpath(lock, val);
0115 }
0116 #endif
0117 
0118 #ifndef queued_spin_unlock
0119 /**
0120  * queued_spin_unlock - release a queued spinlock
0121  * @lock : Pointer to queued spinlock structure
0122  */
0123 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
0124 {
0125     /*
0126      * unlock() needs release semantics:
0127      */
0128     smp_store_release(&lock->locked, 0);
0129 }
0130 #endif
0131 
0132 #ifndef virt_spin_lock
0133 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
0134 {
0135     return false;
0136 }
0137 #endif
0138 
0139 /*
0140  * Remapping spinlock architecture specific functions to the corresponding
0141  * queued spinlock functions.
0142  */
0143 #define arch_spin_is_locked(l)      queued_spin_is_locked(l)
0144 #define arch_spin_is_contended(l)   queued_spin_is_contended(l)
0145 #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
0146 #define arch_spin_lock(l)       queued_spin_lock(l)
0147 #define arch_spin_trylock(l)        queued_spin_trylock(l)
0148 #define arch_spin_unlock(l)     queued_spin_unlock(l)
0149 
0150 #endif /* __ASM_GENERIC_QSPINLOCK_H */