0001
0002 #ifndef _ASM_POWERPC_QSPINLOCK_H
0003 #define _ASM_POWERPC_QSPINLOCK_H
0004
0005 #include <asm-generic/qspinlock_types.h>
0006 #include <asm/paravirt.h>
0007
0008 #define _Q_PENDING_LOOPS (1 << 9)
0009
0010 #ifdef CONFIG_PARAVIRT_SPINLOCKS
0011 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
0012 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
0013 extern void __pv_queued_spin_unlock(struct qspinlock *lock);
0014
0015 static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
0016 {
0017 if (!is_shared_processor())
0018 native_queued_spin_lock_slowpath(lock, val);
0019 else
0020 __pv_queued_spin_lock_slowpath(lock, val);
0021 }
0022
0023 #define queued_spin_unlock queued_spin_unlock
0024 static inline void queued_spin_unlock(struct qspinlock *lock)
0025 {
0026 if (!is_shared_processor())
0027 smp_store_release(&lock->locked, 0);
0028 else
0029 __pv_queued_spin_unlock(lock);
0030 }
0031
0032 #else
0033 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
0034 #endif
0035
0036 static __always_inline void queued_spin_lock(struct qspinlock *lock)
0037 {
0038 u32 val = 0;
0039
0040 if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
0041 return;
0042
0043 queued_spin_lock_slowpath(lock, val);
0044 }
0045 #define queued_spin_lock queued_spin_lock
0046
0047 #ifdef CONFIG_PARAVIRT_SPINLOCKS
0048 #define SPIN_THRESHOLD (1<<15)
0049
0050 static __always_inline void pv_wait(u8 *ptr, u8 val)
0051 {
0052 if (*ptr != val)
0053 return;
0054 yield_to_any();
0055
0056
0057
0058
0059 }
0060
0061 static __always_inline void pv_kick(int cpu)
0062 {
0063 prod_cpu(cpu);
0064 }
0065
0066 extern void __pv_init_lock_hash(void);
0067
0068 static inline void pv_spinlocks_init(void)
0069 {
0070 __pv_init_lock_hash();
0071 }
0072
0073 #endif
0074
0075
0076
0077
0078
0079
0080
0081
0082 #include <asm-generic/qspinlock.h>
0083
0084 #endif