0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/atomic.h>
0012 #include <linux/spinlock.h>
0013 #include <linux/module.h>
0014
0015 #ifdef CONFIG_SMP
0016 #define ATOMIC_HASH_SIZE 4
0017 #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
0018
0019 spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
0020 [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
0021 };
0022
0023 #else
0024
0025 static DEFINE_SPINLOCK(dummy);
0026 #define ATOMIC_HASH_SIZE 1
0027 #define ATOMIC_HASH(a) (&dummy)
0028
0029 #endif
0030
0031 #define ATOMIC_FETCH_OP(op, c_op) \
0032 int arch_atomic_fetch_##op(int i, atomic_t *v) \
0033 { \
0034 int ret; \
0035 unsigned long flags; \
0036 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
0037 \
0038 ret = v->counter; \
0039 v->counter c_op i; \
0040 \
0041 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
0042 return ret; \
0043 } \
0044 EXPORT_SYMBOL(arch_atomic_fetch_##op);
0045
0046 #define ATOMIC_OP_RETURN(op, c_op) \
0047 int arch_atomic_##op##_return(int i, atomic_t *v) \
0048 { \
0049 int ret; \
0050 unsigned long flags; \
0051 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
0052 \
0053 ret = (v->counter c_op i); \
0054 \
0055 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
0056 return ret; \
0057 } \
0058 EXPORT_SYMBOL(arch_atomic_##op##_return);
0059
0060 ATOMIC_OP_RETURN(add, +=)
0061
0062 ATOMIC_FETCH_OP(add, +=)
0063 ATOMIC_FETCH_OP(and, &=)
0064 ATOMIC_FETCH_OP(or, |=)
0065 ATOMIC_FETCH_OP(xor, ^=)
0066
0067 #undef ATOMIC_FETCH_OP
0068 #undef ATOMIC_OP_RETURN
0069
0070 int arch_atomic_xchg(atomic_t *v, int new)
0071 {
0072 int ret;
0073 unsigned long flags;
0074
0075 spin_lock_irqsave(ATOMIC_HASH(v), flags);
0076 ret = v->counter;
0077 v->counter = new;
0078 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
0079 return ret;
0080 }
0081 EXPORT_SYMBOL(arch_atomic_xchg);
0082
0083 int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
0084 {
0085 int ret;
0086 unsigned long flags;
0087
0088 spin_lock_irqsave(ATOMIC_HASH(v), flags);
0089 ret = v->counter;
0090 if (likely(ret == old))
0091 v->counter = new;
0092
0093 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
0094 return ret;
0095 }
0096 EXPORT_SYMBOL(arch_atomic_cmpxchg);
0097
0098 int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
0099 {
0100 int ret;
0101 unsigned long flags;
0102
0103 spin_lock_irqsave(ATOMIC_HASH(v), flags);
0104 ret = v->counter;
0105 if (ret != u)
0106 v->counter += a;
0107 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
0108 return ret;
0109 }
0110 EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
0111
0112
0113 void arch_atomic_set(atomic_t *v, int i)
0114 {
0115 unsigned long flags;
0116
0117 spin_lock_irqsave(ATOMIC_HASH(v), flags);
0118 v->counter = i;
0119 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
0120 }
0121 EXPORT_SYMBOL(arch_atomic_set);
0122
0123 unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
0124 {
0125 unsigned long old, flags;
0126
0127 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
0128 old = *addr;
0129 *addr = old | mask;
0130 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
0131
0132 return old & mask;
0133 }
0134 EXPORT_SYMBOL(sp32___set_bit);
0135
0136 unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
0137 {
0138 unsigned long old, flags;
0139
0140 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
0141 old = *addr;
0142 *addr = old & ~mask;
0143 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
0144
0145 return old & mask;
0146 }
0147 EXPORT_SYMBOL(sp32___clear_bit);
0148
0149 unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
0150 {
0151 unsigned long old, flags;
0152
0153 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
0154 old = *addr;
0155 *addr = old ^ mask;
0156 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
0157
0158 return old & mask;
0159 }
0160 EXPORT_SYMBOL(sp32___change_bit);
0161
0162 unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
0163 {
0164 unsigned long flags;
0165 u32 prev;
0166
0167 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
0168 if ((prev = *ptr) == old)
0169 *ptr = new;
0170 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
0171
0172 return (unsigned long)prev;
0173 }
0174 EXPORT_SYMBOL(__cmpxchg_u32);
0175
0176 u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
0177 {
0178 unsigned long flags;
0179 u64 prev;
0180
0181 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
0182 if ((prev = *ptr) == old)
0183 *ptr = new;
0184 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
0185
0186 return prev;
0187 }
0188 EXPORT_SYMBOL(__cmpxchg_u64);
0189
0190 unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
0191 {
0192 unsigned long flags;
0193 u32 prev;
0194
0195 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
0196 prev = *ptr;
0197 *ptr = new;
0198 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
0199
0200 return (unsigned long)prev;
0201 }
0202 EXPORT_SYMBOL(__xchg_u32);