Back to home page

OSCL-LXR

 
 

    


0001 #ifndef __LINUX_SPINLOCK_UP_H
0002 #define __LINUX_SPINLOCK_UP_H
0003 
0004 #ifndef __LINUX_SPINLOCK_H
0005 # error "please don't include this file directly"
0006 #endif
0007 
0008 #include <asm/processor.h>  /* for cpu_relax() */
0009 #include <asm/barrier.h>
0010 
0011 /*
0012  * include/linux/spinlock_up.h - UP-debug version of spinlocks.
0013  *
0014  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
0015  * Released under the General Public License (GPL).
0016  *
0017  * In the debug case, 1 means unlocked, 0 means locked. (the values
0018  * are inverted, to catch initialization bugs)
0019  *
0020  * No atomicity anywhere, we are on UP. However, we still need
0021  * the compiler barriers, because we do not want the compiler to
0022  * move potentially faulting instructions (notably user accesses)
0023  * into the locked sequence, resulting in non-atomic execution.
0024  */
0025 
0026 #ifdef CONFIG_DEBUG_SPINLOCK
0027 #define arch_spin_is_locked(x)      ((x)->slock == 0)
0028 
0029 static inline void arch_spin_lock(arch_spinlock_t *lock)
0030 {
0031     lock->slock = 0;
0032     barrier();
0033 }
0034 
0035 static inline int arch_spin_trylock(arch_spinlock_t *lock)
0036 {
0037     char oldval = lock->slock;
0038 
0039     lock->slock = 0;
0040     barrier();
0041 
0042     return oldval > 0;
0043 }
0044 
0045 static inline void arch_spin_unlock(arch_spinlock_t *lock)
0046 {
0047     barrier();
0048     lock->slock = 1;
0049 }
0050 
0051 /*
0052  * Read-write spinlocks. No debug version.
0053  */
0054 #define arch_read_lock(lock)        do { barrier(); (void)(lock); } while (0)
0055 #define arch_write_lock(lock)       do { barrier(); (void)(lock); } while (0)
0056 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
0057 #define arch_write_trylock(lock)    ({ barrier(); (void)(lock); 1; })
0058 #define arch_read_unlock(lock)      do { barrier(); (void)(lock); } while (0)
0059 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
0060 
0061 #else /* DEBUG_SPINLOCK */
0062 #define arch_spin_is_locked(lock)   ((void)(lock), 0)
0063 /* for sched/core.c and kernel_lock.c: */
0064 # define arch_spin_lock(lock)       do { barrier(); (void)(lock); } while (0)
0065 # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
0066 # define arch_spin_trylock(lock)    ({ barrier(); (void)(lock); 1; })
0067 #endif /* DEBUG_SPINLOCK */
0068 
0069 #define arch_spin_is_contended(lock)    (((void)(lock), 0))
0070 
0071 #endif /* __LINUX_SPINLOCK_UP_H */