Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_QSPINLOCK_H
0003 #define _ASM_X86_QSPINLOCK_H
0004 
0005 #include <linux/jump_label.h>
0006 #include <asm/cpufeature.h>
0007 #include <asm-generic/qspinlock_types.h>
0008 #include <asm/paravirt.h>
0009 #include <asm/rmwcc.h>
0010 
0011 #define _Q_PENDING_LOOPS    (1 << 9)
0012 
0013 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
0014 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
0015 {
0016     u32 val;
0017 
0018     /*
0019      * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
0020      * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
0021      * statement expression, which GCC doesn't like.
0022      */
0023     val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
0024                    "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
0025     val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
0026 
0027     return val;
0028 }
0029 
0030 #ifdef CONFIG_PARAVIRT_SPINLOCKS
0031 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
0032 extern void __pv_init_lock_hash(void);
0033 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
0034 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
0035 extern bool nopvspin;
0036 
0037 #define queued_spin_unlock queued_spin_unlock
0038 /**
0039  * queued_spin_unlock - release a queued spinlock
0040  * @lock : Pointer to queued spinlock structure
0041  *
0042  * A smp_store_release() on the least-significant byte.
0043  */
0044 static inline void native_queued_spin_unlock(struct qspinlock *lock)
0045 {
0046     smp_store_release(&lock->locked, 0);
0047 }
0048 
0049 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
0050 {
0051     pv_queued_spin_lock_slowpath(lock, val);
0052 }
0053 
0054 static inline void queued_spin_unlock(struct qspinlock *lock)
0055 {
0056     kcsan_release();
0057     pv_queued_spin_unlock(lock);
0058 }
0059 
0060 #define vcpu_is_preempted vcpu_is_preempted
0061 static inline bool vcpu_is_preempted(long cpu)
0062 {
0063     return pv_vcpu_is_preempted(cpu);
0064 }
0065 #endif
0066 
0067 #ifdef CONFIG_PARAVIRT
0068 /*
0069  * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
0070  *
0071  * Native (and PV wanting native due to vCPU pinning) should disable this key.
0072  * It is done in this backwards fashion to only have a single direction change,
0073  * which removes ordering between native_pv_spin_init() and HV setup.
0074  */
0075 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
0076 
0077 void native_pv_lock_init(void) __init;
0078 
0079 /*
0080  * Shortcut for the queued_spin_lock_slowpath() function that allows
0081  * virt to hijack it.
0082  *
0083  * Returns:
0084  *   true - lock has been negotiated, all done;
0085  *   false - queued_spin_lock_slowpath() will do its thing.
0086  */
0087 #define virt_spin_lock virt_spin_lock
0088 static inline bool virt_spin_lock(struct qspinlock *lock)
0089 {
0090     if (!static_branch_likely(&virt_spin_lock_key))
0091         return false;
0092 
0093     /*
0094      * On hypervisors without PARAVIRT_SPINLOCKS support we fall
0095      * back to a Test-and-Set spinlock, because fair locks have
0096      * horrible lock 'holder' preemption issues.
0097      */
0098 
0099     do {
0100         while (atomic_read(&lock->val) != 0)
0101             cpu_relax();
0102     } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
0103 
0104     return true;
0105 }
0106 #else
0107 static inline void native_pv_lock_init(void)
0108 {
0109 }
0110 #endif /* CONFIG_PARAVIRT */
0111 
0112 #include <asm-generic/qspinlock.h>
0113 
0114 #endif /* _ASM_X86_QSPINLOCK_H */