0001
0002
0003
0004
0005
0006 #ifndef __ASM_SPINLOCK_H
0007 #define __ASM_SPINLOCK_H
0008
0009 #include <asm/spinlock_types.h>
0010 #include <asm/processor.h>
0011 #include <asm/barrier.h>
0012
0013 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
0014
0015 #ifdef CONFIG_ARC_HAS_LLSC
0016
0017 static inline void arch_spin_lock(arch_spinlock_t *lock)
0018 {
0019 unsigned int val;
0020
0021 __asm__ __volatile__(
0022 "1: llock %[val], [%[slock]] \n"
0023 " breq %[val], %[LOCKED], 1b \n"
0024 " scond %[LOCKED], [%[slock]] \n"
0025 " bnz 1b \n"
0026 " \n"
0027 : [val] "=&r" (val)
0028 : [slock] "r" (&(lock->slock)),
0029 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
0030 : "memory", "cc");
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 smp_mb();
0041 }
0042
0043
0044 static inline int arch_spin_trylock(arch_spinlock_t *lock)
0045 {
0046 unsigned int val, got_it = 0;
0047
0048 __asm__ __volatile__(
0049 "1: llock %[val], [%[slock]] \n"
0050 " breq %[val], %[LOCKED], 4f \n"
0051 " scond %[LOCKED], [%[slock]] \n"
0052 " bnz 1b \n"
0053 " mov %[got_it], 1 \n"
0054 "4: \n"
0055 " \n"
0056 : [val] "=&r" (val),
0057 [got_it] "+&r" (got_it)
0058 : [slock] "r" (&(lock->slock)),
0059 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
0060 : "memory", "cc");
0061
0062 smp_mb();
0063
0064 return got_it;
0065 }
0066
0067 static inline void arch_spin_unlock(arch_spinlock_t *lock)
0068 {
0069 smp_mb();
0070
0071 WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
0072 }
0073
0074
0075
0076
0077
0078
0079 static inline void arch_read_lock(arch_rwlock_t *rw)
0080 {
0081 unsigned int val;
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 __asm__ __volatile__(
0094 "1: llock %[val], [%[rwlock]] \n"
0095 " brls %[val], %[WR_LOCKED], 1b\n"
0096 " sub %[val], %[val], 1 \n"
0097 " scond %[val], [%[rwlock]] \n"
0098 " bnz 1b \n"
0099 " \n"
0100 : [val] "=&r" (val)
0101 : [rwlock] "r" (&(rw->counter)),
0102 [WR_LOCKED] "ir" (0)
0103 : "memory", "cc");
0104
0105 smp_mb();
0106 }
0107
0108
0109 static inline int arch_read_trylock(arch_rwlock_t *rw)
0110 {
0111 unsigned int val, got_it = 0;
0112
0113 __asm__ __volatile__(
0114 "1: llock %[val], [%[rwlock]] \n"
0115 " brls %[val], %[WR_LOCKED], 4f\n"
0116 " sub %[val], %[val], 1 \n"
0117 " scond %[val], [%[rwlock]] \n"
0118 " bnz 1b \n"
0119 " mov %[got_it], 1 \n"
0120 " \n"
0121 "4: ; --- done --- \n"
0122
0123 : [val] "=&r" (val),
0124 [got_it] "+&r" (got_it)
0125 : [rwlock] "r" (&(rw->counter)),
0126 [WR_LOCKED] "ir" (0)
0127 : "memory", "cc");
0128
0129 smp_mb();
0130
0131 return got_it;
0132 }
0133
0134 static inline void arch_write_lock(arch_rwlock_t *rw)
0135 {
0136 unsigned int val;
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 __asm__ __volatile__(
0151 "1: llock %[val], [%[rwlock]] \n"
0152 " brne %[val], %[UNLOCKED], 1b \n"
0153 " mov %[val], %[WR_LOCKED] \n"
0154 " scond %[val], [%[rwlock]] \n"
0155 " bnz 1b \n"
0156 " \n"
0157 : [val] "=&r" (val)
0158 : [rwlock] "r" (&(rw->counter)),
0159 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
0160 [WR_LOCKED] "ir" (0)
0161 : "memory", "cc");
0162
0163 smp_mb();
0164 }
0165
0166
0167 static inline int arch_write_trylock(arch_rwlock_t *rw)
0168 {
0169 unsigned int val, got_it = 0;
0170
0171 __asm__ __volatile__(
0172 "1: llock %[val], [%[rwlock]] \n"
0173 " brne %[val], %[UNLOCKED], 4f \n"
0174 " mov %[val], %[WR_LOCKED] \n"
0175 " scond %[val], [%[rwlock]] \n"
0176 " bnz 1b \n"
0177 " mov %[got_it], 1 \n"
0178 " \n"
0179 "4: ; --- done --- \n"
0180
0181 : [val] "=&r" (val),
0182 [got_it] "+&r" (got_it)
0183 : [rwlock] "r" (&(rw->counter)),
0184 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
0185 [WR_LOCKED] "ir" (0)
0186 : "memory", "cc");
0187
0188 smp_mb();
0189
0190 return got_it;
0191 }
0192
0193 static inline void arch_read_unlock(arch_rwlock_t *rw)
0194 {
0195 unsigned int val;
0196
0197 smp_mb();
0198
0199
0200
0201
0202 __asm__ __volatile__(
0203 "1: llock %[val], [%[rwlock]] \n"
0204 " add %[val], %[val], 1 \n"
0205 " scond %[val], [%[rwlock]] \n"
0206 " bnz 1b \n"
0207 " \n"
0208 : [val] "=&r" (val)
0209 : [rwlock] "r" (&(rw->counter))
0210 : "memory", "cc");
0211 }
0212
0213 static inline void arch_write_unlock(arch_rwlock_t *rw)
0214 {
0215 smp_mb();
0216
0217 WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
0218 }
0219
0220 #else
0221
0222 static inline void arch_spin_lock(arch_spinlock_t *lock)
0223 {
0224 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
0225
0226
0227
0228
0229
0230
0231 smp_mb();
0232
0233 __asm__ __volatile__(
0234 "1: ex %0, [%1] \n"
0235 " breq %0, %2, 1b \n"
0236 : "+&r" (val)
0237 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
0238 : "memory");
0239
0240 smp_mb();
0241 }
0242
0243
0244 static inline int arch_spin_trylock(arch_spinlock_t *lock)
0245 {
0246 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
0247
0248 smp_mb();
0249
0250 __asm__ __volatile__(
0251 "1: ex %0, [%1] \n"
0252 : "+r" (val)
0253 : "r"(&(lock->slock))
0254 : "memory");
0255
0256 smp_mb();
0257
0258 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
0259 }
0260
0261 static inline void arch_spin_unlock(arch_spinlock_t *lock)
0262 {
0263 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
0264
0265
0266
0267
0268
0269 smp_mb();
0270
0271
0272
0273
0274
0275
0276
0277 __asm__ __volatile__(
0278 " ex %0, [%1] \n"
0279 : "+r" (val)
0280 : "r"(&(lock->slock))
0281 : "memory");
0282
0283
0284
0285
0286 smp_mb();
0287 }
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 static inline int arch_read_trylock(arch_rwlock_t *rw)
0299 {
0300 int ret = 0;
0301 unsigned long flags;
0302
0303 local_irq_save(flags);
0304 arch_spin_lock(&(rw->lock_mutex));
0305
0306
0307
0308
0309
0310 if (rw->counter > 0) {
0311 rw->counter--;
0312 ret = 1;
0313 }
0314
0315 arch_spin_unlock(&(rw->lock_mutex));
0316 local_irq_restore(flags);
0317
0318 return ret;
0319 }
0320
0321
0322 static inline int arch_write_trylock(arch_rwlock_t *rw)
0323 {
0324 int ret = 0;
0325 unsigned long flags;
0326
0327 local_irq_save(flags);
0328 arch_spin_lock(&(rw->lock_mutex));
0329
0330
0331
0332
0333
0334
0335
0336 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
0337 rw->counter = 0;
0338 ret = 1;
0339 }
0340 arch_spin_unlock(&(rw->lock_mutex));
0341 local_irq_restore(flags);
0342
0343 return ret;
0344 }
0345
0346 static inline void arch_read_lock(arch_rwlock_t *rw)
0347 {
0348 while (!arch_read_trylock(rw))
0349 cpu_relax();
0350 }
0351
0352 static inline void arch_write_lock(arch_rwlock_t *rw)
0353 {
0354 while (!arch_write_trylock(rw))
0355 cpu_relax();
0356 }
0357
0358 static inline void arch_read_unlock(arch_rwlock_t *rw)
0359 {
0360 unsigned long flags;
0361
0362 local_irq_save(flags);
0363 arch_spin_lock(&(rw->lock_mutex));
0364 rw->counter++;
0365 arch_spin_unlock(&(rw->lock_mutex));
0366 local_irq_restore(flags);
0367 }
0368
0369 static inline void arch_write_unlock(arch_rwlock_t *rw)
0370 {
0371 unsigned long flags;
0372
0373 local_irq_save(flags);
0374 arch_spin_lock(&(rw->lock_mutex));
0375 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
0376 arch_spin_unlock(&(rw->lock_mutex));
0377 local_irq_restore(flags);
0378 }
0379
0380 #endif
0381
0382 #endif