0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #ifndef __ASM_GENERIC_SPINLOCK_H
0028 #define __ASM_GENERIC_SPINLOCK_H
0029
0030 #include <linux/atomic.h>
0031 #include <asm-generic/spinlock_types.h>
0032
0033 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
0034 {
0035 u32 val = atomic_fetch_add(1<<16, lock);
0036 u16 ticket = val >> 16;
0037
0038 if (ticket == (u16)val)
0039 return;
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 atomic_cond_read_acquire(lock, ticket == (u16)VAL);
0050 smp_mb();
0051 }
0052
0053 static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
0054 {
0055 u32 old = atomic_read(lock);
0056
0057 if ((old >> 16) != (old & 0xffff))
0058 return false;
0059
0060 return atomic_try_cmpxchg(lock, &old, old + (1<<16));
0061 }
0062
0063 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
0064 {
0065 u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
0066 u32 val = atomic_read(lock);
0067
0068 smp_store_release(ptr, (u16)val + 1);
0069 }
0070
0071 static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
0072 {
0073 u32 val = atomic_read(lock);
0074
0075 return ((val >> 16) != (val & 0xffff));
0076 }
0077
0078 static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
0079 {
0080 u32 val = atomic_read(lock);
0081
0082 return (s16)((val >> 16) - (val & 0xffff)) > 1;
0083 }
0084
0085 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
0086 {
0087 return !arch_spin_is_locked(&lock);
0088 }
0089
0090 #include <asm/qrwlock.h>
0091
0092 #endif