0001
0002 #ifndef _ASM_X86_QSPINLOCK_H
0003 #define _ASM_X86_QSPINLOCK_H
0004
0005 #include <linux/jump_label.h>
0006 #include <asm/cpufeature.h>
0007 #include <asm-generic/qspinlock_types.h>
0008 #include <asm/paravirt.h>
0009 #include <asm/rmwcc.h>
0010
0011 #define _Q_PENDING_LOOPS (1 << 9)
0012
0013 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
0014 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
0015 {
0016 u32 val;
0017
0018
0019
0020
0021
0022
0023 val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
0024 "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
0025 val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
0026
0027 return val;
0028 }
0029
0030 #ifdef CONFIG_PARAVIRT_SPINLOCKS
0031 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
0032 extern void __pv_init_lock_hash(void);
0033 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
0034 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
0035 extern bool nopvspin;
0036
0037 #define queued_spin_unlock queued_spin_unlock
0038
0039
0040
0041
0042
0043
0044 static inline void native_queued_spin_unlock(struct qspinlock *lock)
0045 {
0046 smp_store_release(&lock->locked, 0);
0047 }
0048
0049 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
0050 {
0051 pv_queued_spin_lock_slowpath(lock, val);
0052 }
0053
0054 static inline void queued_spin_unlock(struct qspinlock *lock)
0055 {
0056 kcsan_release();
0057 pv_queued_spin_unlock(lock);
0058 }
0059
0060 #define vcpu_is_preempted vcpu_is_preempted
0061 static inline bool vcpu_is_preempted(long cpu)
0062 {
0063 return pv_vcpu_is_preempted(cpu);
0064 }
0065 #endif
0066
0067 #ifdef CONFIG_PARAVIRT
0068
0069
0070
0071
0072
0073
0074
0075 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
0076
0077 void native_pv_lock_init(void) __init;
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 #define virt_spin_lock virt_spin_lock
0088 static inline bool virt_spin_lock(struct qspinlock *lock)
0089 {
0090 if (!static_branch_likely(&virt_spin_lock_key))
0091 return false;
0092
0093
0094
0095
0096
0097
0098
0099 do {
0100 while (atomic_read(&lock->val) != 0)
0101 cpu_relax();
0102 } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
0103
0104 return true;
0105 }
0106 #else
0107 static inline void native_pv_lock_init(void)
0108 {
0109 }
0110 #endif
0111
0112 #include <asm-generic/qspinlock.h>
0113
0114 #endif