Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 
0003 /*
0004  * 'Generic' ticket-lock implementation.
0005  *
0006  * It relies on atomic_fetch_add() having well defined forward progress
0007  * guarantees under contention. If your architecture cannot provide this, stick
0008  * to a test-and-set lock.
0009  *
0010  * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
0011  * sub-word of the value. This is generally true for anything LL/SC although
0012  * you'd be hard pressed to find anything useful in architecture specifications
0013  * about this. If your architecture cannot do this you might be better off with
0014  * a test-and-set.
0015  *
0016  * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
0017  * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
0018  * a full fence after the spin to upgrade the otherwise-RCpc
0019  * atomic_cond_read_acquire().
0020  *
0021  * The implementation uses smp_cond_load_acquire() to spin, so if the
0022  * architecture has WFE like instructions to sleep instead of poll for word
0023  * modifications be sure to implement that (see ARM64 for example).
0024  *
0025  */
0026 
0027 #ifndef __ASM_GENERIC_SPINLOCK_H
0028 #define __ASM_GENERIC_SPINLOCK_H
0029 
0030 #include <linux/atomic.h>
0031 #include <asm-generic/spinlock_types.h>
0032 
0033 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
0034 {
0035     u32 val = atomic_fetch_add(1<<16, lock);
0036     u16 ticket = val >> 16;
0037 
0038     if (ticket == (u16)val)
0039         return;
0040 
0041     /*
0042      * atomic_cond_read_acquire() is RCpc, but rather than defining a
0043      * custom cond_read_rcsc() here we just emit a full fence.  We only
0044      * need the prior reads before subsequent writes ordering from
0045      * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
0046      * have no outstanding writes due to the atomic_fetch_add() the extra
0047      * orderings are free.
0048      */
0049     atomic_cond_read_acquire(lock, ticket == (u16)VAL);
0050     smp_mb();
0051 }
0052 
0053 static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
0054 {
0055     u32 old = atomic_read(lock);
0056 
0057     if ((old >> 16) != (old & 0xffff))
0058         return false;
0059 
0060     return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */
0061 }
0062 
0063 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
0064 {
0065     u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
0066     u32 val = atomic_read(lock);
0067 
0068     smp_store_release(ptr, (u16)val + 1);
0069 }
0070 
0071 static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
0072 {
0073     u32 val = atomic_read(lock);
0074 
0075     return ((val >> 16) != (val & 0xffff));
0076 }
0077 
0078 static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
0079 {
0080     u32 val = atomic_read(lock);
0081 
0082     return (s16)((val >> 16) - (val & 0xffff)) > 1;
0083 }
0084 
0085 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
0086 {
0087     return !arch_spin_is_locked(&lock);
0088 }
0089 
0090 #include <asm/qrwlock.h>
0091 
0092 #endif /* __ASM_GENERIC_SPINLOCK_H */