0001
0002
0003
0004
0005
0006
0007 #ifndef __SPARC_SPINLOCK_H
0008 #define __SPARC_SPINLOCK_H
0009
0010 #ifndef __ASSEMBLY__
0011
0012 #include <asm/psr.h>
0013 #include <asm/barrier.h>
0014 #include <asm/processor.h> /* for cpu_relax */
0015
0016 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
0017
0018 static inline void arch_spin_lock(arch_spinlock_t *lock)
0019 {
0020 __asm__ __volatile__(
0021 "\n1:\n\t"
0022 "ldstub [%0], %%g2\n\t"
0023 "orcc %%g2, 0x0, %%g0\n\t"
0024 "bne,a 2f\n\t"
0025 " ldub [%0], %%g2\n\t"
0026 ".subsection 2\n"
0027 "2:\n\t"
0028 "orcc %%g2, 0x0, %%g0\n\t"
0029 "bne,a 2b\n\t"
0030 " ldub [%0], %%g2\n\t"
0031 "b,a 1b\n\t"
0032 ".previous\n"
0033 :
0034 : "r" (lock)
0035 : "g2", "memory", "cc");
0036 }
0037
0038 static inline int arch_spin_trylock(arch_spinlock_t *lock)
0039 {
0040 unsigned int result;
0041 __asm__ __volatile__("ldstub [%1], %0"
0042 : "=r" (result)
0043 : "r" (lock)
0044 : "memory");
0045 return (result == 0);
0046 }
0047
0048 static inline void arch_spin_unlock(arch_spinlock_t *lock)
0049 {
0050 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
0051 }
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 static inline void __arch_read_lock(arch_rwlock_t *rw)
0080 {
0081 register arch_rwlock_t *lp asm("g1");
0082 lp = rw;
0083 __asm__ __volatile__(
0084 "mov %%o7, %%g4\n\t"
0085 "call ___rw_read_enter\n\t"
0086 " ldstub [%%g1 + 3], %%g2\n"
0087 :
0088 : "r" (lp)
0089 : "g2", "g4", "memory", "cc");
0090 }
0091
0092 #define arch_read_lock(lock) \
0093 do { unsigned long flags; \
0094 local_irq_save(flags); \
0095 __arch_read_lock(lock); \
0096 local_irq_restore(flags); \
0097 } while(0)
0098
0099 static inline void __arch_read_unlock(arch_rwlock_t *rw)
0100 {
0101 register arch_rwlock_t *lp asm("g1");
0102 lp = rw;
0103 __asm__ __volatile__(
0104 "mov %%o7, %%g4\n\t"
0105 "call ___rw_read_exit\n\t"
0106 " ldstub [%%g1 + 3], %%g2\n"
0107 :
0108 : "r" (lp)
0109 : "g2", "g4", "memory", "cc");
0110 }
0111
0112 #define arch_read_unlock(lock) \
0113 do { unsigned long flags; \
0114 local_irq_save(flags); \
0115 __arch_read_unlock(lock); \
0116 local_irq_restore(flags); \
0117 } while(0)
0118
0119 static inline void arch_write_lock(arch_rwlock_t *rw)
0120 {
0121 register arch_rwlock_t *lp asm("g1");
0122 lp = rw;
0123 __asm__ __volatile__(
0124 "mov %%o7, %%g4\n\t"
0125 "call ___rw_write_enter\n\t"
0126 " ldstub [%%g1 + 3], %%g2\n"
0127 :
0128 : "r" (lp)
0129 : "g2", "g4", "memory", "cc");
0130 *(volatile __u32 *)&lp->lock = ~0U;
0131 }
0132
0133 static inline void arch_write_unlock(arch_rwlock_t *lock)
0134 {
0135 __asm__ __volatile__(
0136 " st %%g0, [%0]"
0137 :
0138 : "r" (lock)
0139 : "memory");
0140 }
0141
0142 static inline int arch_write_trylock(arch_rwlock_t *rw)
0143 {
0144 unsigned int val;
0145
0146 __asm__ __volatile__("ldstub [%1 + 3], %0"
0147 : "=r" (val)
0148 : "r" (&rw->lock)
0149 : "memory");
0150
0151 if (val == 0) {
0152 val = rw->lock & ~0xff;
0153 if (val)
0154 ((volatile u8*)&rw->lock)[3] = 0;
0155 else
0156 *(volatile u32*)&rw->lock = ~0U;
0157 }
0158
0159 return (val == 0);
0160 }
0161
0162 static inline int __arch_read_trylock(arch_rwlock_t *rw)
0163 {
0164 register arch_rwlock_t *lp asm("g1");
0165 register int res asm("o0");
0166 lp = rw;
0167 __asm__ __volatile__(
0168 "mov %%o7, %%g4\n\t"
0169 "call ___rw_read_try\n\t"
0170 " ldstub [%%g1 + 3], %%g2\n"
0171 : "=r" (res)
0172 : "r" (lp)
0173 : "g2", "g4", "memory", "cc");
0174 return res;
0175 }
0176
0177 #define arch_read_trylock(lock) \
0178 ({ unsigned long flags; \
0179 int res; \
0180 local_irq_save(flags); \
0181 res = __arch_read_trylock(lock); \
0182 local_irq_restore(flags); \
0183 res; \
0184 })
0185
0186 #endif
0187
0188 #endif