Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  */
0005 
0006 #ifndef __ASM_SPINLOCK_H
0007 #define __ASM_SPINLOCK_H
0008 
0009 #include <asm/spinlock_types.h>
0010 #include <asm/processor.h>
0011 #include <asm/barrier.h>
0012 
0013 #define arch_spin_is_locked(x)  ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
0014 
0015 #ifdef CONFIG_ARC_HAS_LLSC
0016 
0017 static inline void arch_spin_lock(arch_spinlock_t *lock)
0018 {
0019     unsigned int val;
0020 
0021     __asm__ __volatile__(
0022     "1: llock   %[val], [%[slock]]  \n"
0023     "   breq    %[val], %[LOCKED], 1b   \n" /* spin while LOCKED */
0024     "   scond   %[LOCKED], [%[slock]]   \n" /* acquire */
0025     "   bnz 1b          \n"
0026     "                   \n"
0027     : [val]     "=&r"   (val)
0028     : [slock]   "r" (&(lock->slock)),
0029       [LOCKED]  "r" (__ARCH_SPIN_LOCK_LOCKED__)
0030     : "memory", "cc");
0031 
0032     /*
0033      * ACQUIRE barrier to ensure load/store after taking the lock
0034      * don't "bleed-up" out of the critical section (leak-in is allowed)
0035      * http://www.spinics.net/lists/kernel/msg2010409.html
0036      *
0037      * ARCv2 only has load-load, store-store and all-all barrier
0038      * thus need the full all-all barrier
0039      */
0040     smp_mb();
0041 }
0042 
0043 /* 1 - lock taken successfully */
0044 static inline int arch_spin_trylock(arch_spinlock_t *lock)
0045 {
0046     unsigned int val, got_it = 0;
0047 
0048     __asm__ __volatile__(
0049     "1: llock   %[val], [%[slock]]  \n"
0050     "   breq    %[val], %[LOCKED], 4f   \n" /* already LOCKED, just bail */
0051     "   scond   %[LOCKED], [%[slock]]   \n" /* acquire */
0052     "   bnz 1b          \n"
0053     "   mov %[got_it], 1        \n"
0054     "4:                 \n"
0055     "                   \n"
0056     : [val]     "=&r"   (val),
0057       [got_it]  "+&r"   (got_it)
0058     : [slock]   "r" (&(lock->slock)),
0059       [LOCKED]  "r" (__ARCH_SPIN_LOCK_LOCKED__)
0060     : "memory", "cc");
0061 
0062     smp_mb();
0063 
0064     return got_it;
0065 }
0066 
0067 static inline void arch_spin_unlock(arch_spinlock_t *lock)
0068 {
0069     smp_mb();
0070 
0071     WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
0072 }
0073 
0074 /*
0075  * Read-write spinlocks, allowing multiple readers but only one writer.
0076  * Unfair locking as Writers could be starved indefinitely by Reader(s)
0077  */
0078 
0079 static inline void arch_read_lock(arch_rwlock_t *rw)
0080 {
0081     unsigned int val;
0082 
0083     /*
0084      * zero means writer holds the lock exclusively, deny Reader.
0085      * Otherwise grant lock to first/subseq reader
0086      *
0087      *  if (rw->counter > 0) {
0088      *      rw->counter--;
0089      *      ret = 1;
0090      *  }
0091      */
0092 
0093     __asm__ __volatile__(
0094     "1: llock   %[val], [%[rwlock]] \n"
0095     "   brls    %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
0096     "   sub %[val], %[val], 1   \n" /* reader lock */
0097     "   scond   %[val], [%[rwlock]] \n"
0098     "   bnz 1b          \n"
0099     "                   \n"
0100     : [val]     "=&r"   (val)
0101     : [rwlock]  "r" (&(rw->counter)),
0102       [WR_LOCKED]   "ir"    (0)
0103     : "memory", "cc");
0104 
0105     smp_mb();
0106 }
0107 
0108 /* 1 - lock taken successfully */
0109 static inline int arch_read_trylock(arch_rwlock_t *rw)
0110 {
0111     unsigned int val, got_it = 0;
0112 
0113     __asm__ __volatile__(
0114     "1: llock   %[val], [%[rwlock]] \n"
0115     "   brls    %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
0116     "   sub %[val], %[val], 1   \n" /* counter-- */
0117     "   scond   %[val], [%[rwlock]] \n"
0118     "   bnz 1b          \n" /* retry if collided with someone */
0119     "   mov %[got_it], 1        \n"
0120     "                   \n"
0121     "4: ; --- done ---          \n"
0122 
0123     : [val]     "=&r"   (val),
0124       [got_it]  "+&r"   (got_it)
0125     : [rwlock]  "r" (&(rw->counter)),
0126       [WR_LOCKED]   "ir"    (0)
0127     : "memory", "cc");
0128 
0129     smp_mb();
0130 
0131     return got_it;
0132 }
0133 
0134 static inline void arch_write_lock(arch_rwlock_t *rw)
0135 {
0136     unsigned int val;
0137 
0138     /*
0139      * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
0140      * deny writer. Otherwise if unlocked grant to writer
0141      * Hence the claim that Linux rwlocks are unfair to writers.
0142      * (can be starved for an indefinite time by readers).
0143      *
0144      *  if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
0145      *      rw->counter = 0;
0146      *      ret = 1;
0147      *  }
0148      */
0149 
0150     __asm__ __volatile__(
0151     "1: llock   %[val], [%[rwlock]] \n"
0152     "   brne    %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
0153     "   mov %[val], %[WR_LOCKED]    \n"
0154     "   scond   %[val], [%[rwlock]] \n"
0155     "   bnz 1b          \n"
0156     "                   \n"
0157     : [val]     "=&r"   (val)
0158     : [rwlock]  "r" (&(rw->counter)),
0159       [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
0160       [WR_LOCKED]   "ir"    (0)
0161     : "memory", "cc");
0162 
0163     smp_mb();
0164 }
0165 
0166 /* 1 - lock taken successfully */
0167 static inline int arch_write_trylock(arch_rwlock_t *rw)
0168 {
0169     unsigned int val, got_it = 0;
0170 
0171     __asm__ __volatile__(
0172     "1: llock   %[val], [%[rwlock]] \n"
0173     "   brne    %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
0174     "   mov %[val], %[WR_LOCKED]    \n"
0175     "   scond   %[val], [%[rwlock]] \n"
0176     "   bnz 1b          \n" /* retry if collided with someone */
0177     "   mov %[got_it], 1        \n"
0178     "                   \n"
0179     "4: ; --- done ---          \n"
0180 
0181     : [val]     "=&r"   (val),
0182       [got_it]  "+&r"   (got_it)
0183     : [rwlock]  "r" (&(rw->counter)),
0184       [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
0185       [WR_LOCKED]   "ir"    (0)
0186     : "memory", "cc");
0187 
0188     smp_mb();
0189 
0190     return got_it;
0191 }
0192 
0193 static inline void arch_read_unlock(arch_rwlock_t *rw)
0194 {
0195     unsigned int val;
0196 
0197     smp_mb();
0198 
0199     /*
0200      * rw->counter++;
0201      */
0202     __asm__ __volatile__(
0203     "1: llock   %[val], [%[rwlock]] \n"
0204     "   add %[val], %[val], 1   \n"
0205     "   scond   %[val], [%[rwlock]] \n"
0206     "   bnz 1b          \n"
0207     "                   \n"
0208     : [val]     "=&r"   (val)
0209     : [rwlock]  "r" (&(rw->counter))
0210     : "memory", "cc");
0211 }
0212 
0213 static inline void arch_write_unlock(arch_rwlock_t *rw)
0214 {
0215     smp_mb();
0216 
0217     WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
0218 }
0219 
0220 #else   /* !CONFIG_ARC_HAS_LLSC */
0221 
0222 static inline void arch_spin_lock(arch_spinlock_t *lock)
0223 {
0224     unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
0225 
0226     /*
0227      * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
0228      * for ACQ and REL semantics respectively. However EX based spinlocks
0229      * need the extra smp_mb to workaround a hardware quirk.
0230      */
0231     smp_mb();
0232 
0233     __asm__ __volatile__(
0234     "1: ex  %0, [%1]        \n"
0235     "   breq  %0, %2, 1b    \n"
0236     : "+&r" (val)
0237     : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
0238     : "memory");
0239 
0240     smp_mb();
0241 }
0242 
0243 /* 1 - lock taken successfully */
0244 static inline int arch_spin_trylock(arch_spinlock_t *lock)
0245 {
0246     unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
0247 
0248     smp_mb();
0249 
0250     __asm__ __volatile__(
0251     "1: ex  %0, [%1]        \n"
0252     : "+r" (val)
0253     : "r"(&(lock->slock))
0254     : "memory");
0255 
0256     smp_mb();
0257 
0258     return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
0259 }
0260 
0261 static inline void arch_spin_unlock(arch_spinlock_t *lock)
0262 {
0263     unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
0264 
0265     /*
0266      * RELEASE barrier: given the instructions avail on ARCv2, full barrier
0267      * is the only option
0268      */
0269     smp_mb();
0270 
0271     /*
0272      * EX is not really required here, a simple STore of 0 suffices.
0273      * However this causes tasklist livelocks in SystemC based SMP virtual
0274      * platforms where the systemc core scheduler uses EX as a cue for
0275      * moving to next core. Do a git log of this file for details
0276      */
0277     __asm__ __volatile__(
0278     "   ex  %0, [%1]        \n"
0279     : "+r" (val)
0280     : "r"(&(lock->slock))
0281     : "memory");
0282 
0283     /*
0284      * see pairing version/comment in arch_spin_lock above
0285      */
0286     smp_mb();
0287 }
0288 
0289 /*
0290  * Read-write spinlocks, allowing multiple readers but only one writer.
0291  * Unfair locking as Writers could be starved indefinitely by Reader(s)
0292  *
0293  * The spinlock itself is contained in @counter and access to it is
0294  * serialized with @lock_mutex.
0295  */
0296 
0297 /* 1 - lock taken successfully */
0298 static inline int arch_read_trylock(arch_rwlock_t *rw)
0299 {
0300     int ret = 0;
0301     unsigned long flags;
0302 
0303     local_irq_save(flags);
0304     arch_spin_lock(&(rw->lock_mutex));
0305 
0306     /*
0307      * zero means writer holds the lock exclusively, deny Reader.
0308      * Otherwise grant lock to first/subseq reader
0309      */
0310     if (rw->counter > 0) {
0311         rw->counter--;
0312         ret = 1;
0313     }
0314 
0315     arch_spin_unlock(&(rw->lock_mutex));
0316     local_irq_restore(flags);
0317 
0318     return ret;
0319 }
0320 
0321 /* 1 - lock taken successfully */
0322 static inline int arch_write_trylock(arch_rwlock_t *rw)
0323 {
0324     int ret = 0;
0325     unsigned long flags;
0326 
0327     local_irq_save(flags);
0328     arch_spin_lock(&(rw->lock_mutex));
0329 
0330     /*
0331      * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
0332      * deny writer. Otherwise if unlocked grant to writer
0333      * Hence the claim that Linux rwlocks are unfair to writers.
0334      * (can be starved for an indefinite time by readers).
0335      */
0336     if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
0337         rw->counter = 0;
0338         ret = 1;
0339     }
0340     arch_spin_unlock(&(rw->lock_mutex));
0341     local_irq_restore(flags);
0342 
0343     return ret;
0344 }
0345 
0346 static inline void arch_read_lock(arch_rwlock_t *rw)
0347 {
0348     while (!arch_read_trylock(rw))
0349         cpu_relax();
0350 }
0351 
0352 static inline void arch_write_lock(arch_rwlock_t *rw)
0353 {
0354     while (!arch_write_trylock(rw))
0355         cpu_relax();
0356 }
0357 
0358 static inline void arch_read_unlock(arch_rwlock_t *rw)
0359 {
0360     unsigned long flags;
0361 
0362     local_irq_save(flags);
0363     arch_spin_lock(&(rw->lock_mutex));
0364     rw->counter++;
0365     arch_spin_unlock(&(rw->lock_mutex));
0366     local_irq_restore(flags);
0367 }
0368 
0369 static inline void arch_write_unlock(arch_rwlock_t *rw)
0370 {
0371     unsigned long flags;
0372 
0373     local_irq_save(flags);
0374     arch_spin_lock(&(rw->lock_mutex));
0375     rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
0376     arch_spin_unlock(&(rw->lock_mutex));
0377     local_irq_restore(flags);
0378 }
0379 
0380 #endif
0381 
0382 #endif /* __ASM_SPINLOCK_H */