0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef __ASM_SPINLOCK_H
0011 #define __ASM_SPINLOCK_H
0012
0013 #include <linux/smp.h>
0014 #include <asm/atomic_ops.h>
0015 #include <asm/barrier.h>
0016 #include <asm/processor.h>
0017 #include <asm/alternative.h>
0018
0019 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
0020
0021 extern int spin_retry;
0022
0023 bool arch_vcpu_is_preempted(int cpu);
0024
0025 #define vcpu_is_preempted arch_vcpu_is_preempted
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 void arch_spin_relax(arch_spinlock_t *lock);
0037 #define arch_spin_relax arch_spin_relax
0038
0039 void arch_spin_lock_wait(arch_spinlock_t *);
0040 int arch_spin_trylock_retry(arch_spinlock_t *);
0041 void arch_spin_lock_setup(int cpu);
0042
0043 static inline u32 arch_spin_lockval(int cpu)
0044 {
0045 return cpu + 1;
0046 }
0047
0048 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
0049 {
0050 return lock.lock == 0;
0051 }
0052
0053 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
0054 {
0055 return READ_ONCE(lp->lock) != 0;
0056 }
0057
0058 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
0059 {
0060 barrier();
0061 return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
0062 }
0063
0064 static inline void arch_spin_lock(arch_spinlock_t *lp)
0065 {
0066 if (!arch_spin_trylock_once(lp))
0067 arch_spin_lock_wait(lp);
0068 }
0069
0070 static inline int arch_spin_trylock(arch_spinlock_t *lp)
0071 {
0072 if (!arch_spin_trylock_once(lp))
0073 return arch_spin_trylock_retry(lp);
0074 return 1;
0075 }
0076
0077 static inline void arch_spin_unlock(arch_spinlock_t *lp)
0078 {
0079 typecheck(int, lp->lock);
0080 kcsan_release();
0081 asm_inline volatile(
0082 ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49)
0083 " sth %1,%0\n"
0084 : "=R" (((unsigned short *) &lp->lock)[1])
0085 : "d" (0) : "cc", "memory");
0086 }
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 #define arch_read_relax(rw) barrier()
0100 #define arch_write_relax(rw) barrier()
0101
0102 void arch_read_lock_wait(arch_rwlock_t *lp);
0103 void arch_write_lock_wait(arch_rwlock_t *lp);
0104
0105 static inline void arch_read_lock(arch_rwlock_t *rw)
0106 {
0107 int old;
0108
0109 old = __atomic_add(1, &rw->cnts);
0110 if (old & 0xffff0000)
0111 arch_read_lock_wait(rw);
0112 }
0113
0114 static inline void arch_read_unlock(arch_rwlock_t *rw)
0115 {
0116 __atomic_add_const_barrier(-1, &rw->cnts);
0117 }
0118
0119 static inline void arch_write_lock(arch_rwlock_t *rw)
0120 {
0121 if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
0122 arch_write_lock_wait(rw);
0123 }
0124
0125 static inline void arch_write_unlock(arch_rwlock_t *rw)
0126 {
0127 __atomic_add_barrier(-0x30000, &rw->cnts);
0128 }
0129
0130
0131 static inline int arch_read_trylock(arch_rwlock_t *rw)
0132 {
0133 int old;
0134
0135 old = READ_ONCE(rw->cnts);
0136 return (!(old & 0xffff0000) &&
0137 __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
0138 }
0139
0140 static inline int arch_write_trylock(arch_rwlock_t *rw)
0141 {
0142 int old;
0143
0144 old = READ_ONCE(rw->cnts);
0145 return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
0146 }
0147
0148 #endif