0001
0002 #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
0003 #define _ASM_POWERPC_SIMPLE_SPINLOCK_H
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/irqflags.h>
0018 #include <asm/paravirt.h>
0019 #include <asm/paca.h>
0020 #include <asm/synch.h>
0021 #include <asm/ppc-opcode.h>
0022
0023 #ifdef CONFIG_PPC64
0024
0025 #ifdef __BIG_ENDIAN__
0026 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
0027 #else
0028 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
0029 #endif
0030 #else
0031 #define LOCK_TOKEN 1
0032 #endif
0033
0034 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
0035 {
0036 return lock.slock == 0;
0037 }
0038
0039 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
0040 {
0041 return !arch_spin_value_unlocked(READ_ONCE(*lock));
0042 }
0043
0044
0045
0046
0047
0048 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
0049 {
0050 unsigned long tmp, token;
0051 unsigned int eh = IS_ENABLED(CONFIG_PPC64);
0052
0053 token = LOCK_TOKEN;
0054 __asm__ __volatile__(
0055 "1: lwarx %0,0,%2,%[eh]\n\
0056 cmpwi 0,%0,0\n\
0057 bne- 2f\n\
0058 stwcx. %1,0,%2\n\
0059 bne- 1b\n"
0060 PPC_ACQUIRE_BARRIER
0061 "2:"
0062 : "=&r" (tmp)
0063 : "r" (token), "r" (&lock->slock), [eh] "n" (eh)
0064 : "cr0", "memory");
0065
0066 return tmp;
0067 }
0068
0069 static inline int arch_spin_trylock(arch_spinlock_t *lock)
0070 {
0071 return __arch_spin_trylock(lock) == 0;
0072 }
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 #if defined(CONFIG_PPC_SPLPAR)
0089
0090 void splpar_spin_yield(arch_spinlock_t *lock);
0091 void splpar_rw_yield(arch_rwlock_t *lock);
0092 #else
0093 static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
0094 static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
0095 #endif
0096
0097 static inline void spin_yield(arch_spinlock_t *lock)
0098 {
0099 if (is_shared_processor())
0100 splpar_spin_yield(lock);
0101 else
0102 barrier();
0103 }
0104
0105 static inline void rw_yield(arch_rwlock_t *lock)
0106 {
0107 if (is_shared_processor())
0108 splpar_rw_yield(lock);
0109 else
0110 barrier();
0111 }
0112
0113 static inline void arch_spin_lock(arch_spinlock_t *lock)
0114 {
0115 while (1) {
0116 if (likely(__arch_spin_trylock(lock) == 0))
0117 break;
0118 do {
0119 HMT_low();
0120 if (is_shared_processor())
0121 splpar_spin_yield(lock);
0122 } while (unlikely(lock->slock != 0));
0123 HMT_medium();
0124 }
0125 }
0126
0127 static inline void arch_spin_unlock(arch_spinlock_t *lock)
0128 {
0129 __asm__ __volatile__("# arch_spin_unlock\n\t"
0130 PPC_RELEASE_BARRIER: : :"memory");
0131 lock->slock = 0;
0132 }
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145 #ifdef CONFIG_PPC64
0146 #define __DO_SIGN_EXTEND "extsw %0,%0\n"
0147 #define WRLOCK_TOKEN LOCK_TOKEN
0148 #else
0149 #define __DO_SIGN_EXTEND
0150 #define WRLOCK_TOKEN (-1)
0151 #endif
0152
0153
0154
0155
0156
0157 static inline long __arch_read_trylock(arch_rwlock_t *rw)
0158 {
0159 long tmp;
0160 unsigned int eh = IS_ENABLED(CONFIG_PPC64);
0161
0162 __asm__ __volatile__(
0163 "1: lwarx %0,0,%1,%[eh]\n"
0164 __DO_SIGN_EXTEND
0165 " addic. %0,%0,1\n\
0166 ble- 2f\n"
0167 " stwcx. %0,0,%1\n\
0168 bne- 1b\n"
0169 PPC_ACQUIRE_BARRIER
0170 "2:" : "=&r" (tmp)
0171 : "r" (&rw->lock), [eh] "n" (eh)
0172 : "cr0", "xer", "memory");
0173
0174 return tmp;
0175 }
0176
0177
0178
0179
0180
0181 static inline long __arch_write_trylock(arch_rwlock_t *rw)
0182 {
0183 long tmp, token;
0184 unsigned int eh = IS_ENABLED(CONFIG_PPC64);
0185
0186 token = WRLOCK_TOKEN;
0187 __asm__ __volatile__(
0188 "1: lwarx %0,0,%2,%[eh]\n\
0189 cmpwi 0,%0,0\n\
0190 bne- 2f\n"
0191 " stwcx. %1,0,%2\n\
0192 bne- 1b\n"
0193 PPC_ACQUIRE_BARRIER
0194 "2:" : "=&r" (tmp)
0195 : "r" (token), "r" (&rw->lock), [eh] "n" (eh)
0196 : "cr0", "memory");
0197
0198 return tmp;
0199 }
0200
0201 static inline void arch_read_lock(arch_rwlock_t *rw)
0202 {
0203 while (1) {
0204 if (likely(__arch_read_trylock(rw) > 0))
0205 break;
0206 do {
0207 HMT_low();
0208 if (is_shared_processor())
0209 splpar_rw_yield(rw);
0210 } while (unlikely(rw->lock < 0));
0211 HMT_medium();
0212 }
0213 }
0214
0215 static inline void arch_write_lock(arch_rwlock_t *rw)
0216 {
0217 while (1) {
0218 if (likely(__arch_write_trylock(rw) == 0))
0219 break;
0220 do {
0221 HMT_low();
0222 if (is_shared_processor())
0223 splpar_rw_yield(rw);
0224 } while (unlikely(rw->lock != 0));
0225 HMT_medium();
0226 }
0227 }
0228
0229 static inline int arch_read_trylock(arch_rwlock_t *rw)
0230 {
0231 return __arch_read_trylock(rw) > 0;
0232 }
0233
0234 static inline int arch_write_trylock(arch_rwlock_t *rw)
0235 {
0236 return __arch_write_trylock(rw) == 0;
0237 }
0238
0239 static inline void arch_read_unlock(arch_rwlock_t *rw)
0240 {
0241 long tmp;
0242
0243 __asm__ __volatile__(
0244 "# read_unlock\n\t"
0245 PPC_RELEASE_BARRIER
0246 "1: lwarx %0,0,%1\n\
0247 addic %0,%0,-1\n"
0248 " stwcx. %0,0,%1\n\
0249 bne- 1b"
0250 : "=&r"(tmp)
0251 : "r"(&rw->lock)
0252 : "cr0", "xer", "memory");
0253 }
0254
0255 static inline void arch_write_unlock(arch_rwlock_t *rw)
0256 {
0257 __asm__ __volatile__("# write_unlock\n\t"
0258 PPC_RELEASE_BARRIER: : :"memory");
0259 rw->lock = 0;
0260 }
0261
0262 #define arch_spin_relax(lock) spin_yield(lock)
0263 #define arch_read_relax(lock) rw_yield(lock)
0264 #define arch_write_relax(lock) rw_yield(lock)
0265
0266 #endif