0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/types.h>
0009 #include <linux/cache.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/init.h>
0012 #include <linux/export.h>
0013 #include <linux/atomic.h>
0014
0015
0016
0017
0018
0019
0020
0021
0022 #define NR_LOCKS 16
0023
0024
0025
0026
0027 static union {
0028 raw_spinlock_t lock;
0029 char pad[L1_CACHE_BYTES];
0030 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
0031 [0 ... (NR_LOCKS - 1)] = {
0032 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
0033 },
0034 };
0035
0036 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
0037 {
0038 unsigned long addr = (unsigned long) v;
0039
0040 addr >>= L1_CACHE_SHIFT;
0041 addr ^= (addr >> 8) ^ (addr >> 16);
0042 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
0043 }
0044
0045 s64 generic_atomic64_read(const atomic64_t *v)
0046 {
0047 unsigned long flags;
0048 raw_spinlock_t *lock = lock_addr(v);
0049 s64 val;
0050
0051 raw_spin_lock_irqsave(lock, flags);
0052 val = v->counter;
0053 raw_spin_unlock_irqrestore(lock, flags);
0054 return val;
0055 }
0056 EXPORT_SYMBOL(generic_atomic64_read);
0057
0058 void generic_atomic64_set(atomic64_t *v, s64 i)
0059 {
0060 unsigned long flags;
0061 raw_spinlock_t *lock = lock_addr(v);
0062
0063 raw_spin_lock_irqsave(lock, flags);
0064 v->counter = i;
0065 raw_spin_unlock_irqrestore(lock, flags);
0066 }
0067 EXPORT_SYMBOL(generic_atomic64_set);
0068
0069 #define ATOMIC64_OP(op, c_op) \
0070 void generic_atomic64_##op(s64 a, atomic64_t *v) \
0071 { \
0072 unsigned long flags; \
0073 raw_spinlock_t *lock = lock_addr(v); \
0074 \
0075 raw_spin_lock_irqsave(lock, flags); \
0076 v->counter c_op a; \
0077 raw_spin_unlock_irqrestore(lock, flags); \
0078 } \
0079 EXPORT_SYMBOL(generic_atomic64_##op);
0080
0081 #define ATOMIC64_OP_RETURN(op, c_op) \
0082 s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
0083 { \
0084 unsigned long flags; \
0085 raw_spinlock_t *lock = lock_addr(v); \
0086 s64 val; \
0087 \
0088 raw_spin_lock_irqsave(lock, flags); \
0089 val = (v->counter c_op a); \
0090 raw_spin_unlock_irqrestore(lock, flags); \
0091 return val; \
0092 } \
0093 EXPORT_SYMBOL(generic_atomic64_##op##_return);
0094
0095 #define ATOMIC64_FETCH_OP(op, c_op) \
0096 s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
0097 { \
0098 unsigned long flags; \
0099 raw_spinlock_t *lock = lock_addr(v); \
0100 s64 val; \
0101 \
0102 raw_spin_lock_irqsave(lock, flags); \
0103 val = v->counter; \
0104 v->counter c_op a; \
0105 raw_spin_unlock_irqrestore(lock, flags); \
0106 return val; \
0107 } \
0108 EXPORT_SYMBOL(generic_atomic64_fetch_##op);
0109
0110 #define ATOMIC64_OPS(op, c_op) \
0111 ATOMIC64_OP(op, c_op) \
0112 ATOMIC64_OP_RETURN(op, c_op) \
0113 ATOMIC64_FETCH_OP(op, c_op)
0114
0115 ATOMIC64_OPS(add, +=)
0116 ATOMIC64_OPS(sub, -=)
0117
0118 #undef ATOMIC64_OPS
0119 #define ATOMIC64_OPS(op, c_op) \
0120 ATOMIC64_OP(op, c_op) \
0121 ATOMIC64_FETCH_OP(op, c_op)
0122
0123 ATOMIC64_OPS(and, &=)
0124 ATOMIC64_OPS(or, |=)
0125 ATOMIC64_OPS(xor, ^=)
0126
0127 #undef ATOMIC64_OPS
0128 #undef ATOMIC64_FETCH_OP
0129 #undef ATOMIC64_OP
0130
0131 s64 generic_atomic64_dec_if_positive(atomic64_t *v)
0132 {
0133 unsigned long flags;
0134 raw_spinlock_t *lock = lock_addr(v);
0135 s64 val;
0136
0137 raw_spin_lock_irqsave(lock, flags);
0138 val = v->counter - 1;
0139 if (val >= 0)
0140 v->counter = val;
0141 raw_spin_unlock_irqrestore(lock, flags);
0142 return val;
0143 }
0144 EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
0145
0146 s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
0147 {
0148 unsigned long flags;
0149 raw_spinlock_t *lock = lock_addr(v);
0150 s64 val;
0151
0152 raw_spin_lock_irqsave(lock, flags);
0153 val = v->counter;
0154 if (val == o)
0155 v->counter = n;
0156 raw_spin_unlock_irqrestore(lock, flags);
0157 return val;
0158 }
0159 EXPORT_SYMBOL(generic_atomic64_cmpxchg);
0160
0161 s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
0162 {
0163 unsigned long flags;
0164 raw_spinlock_t *lock = lock_addr(v);
0165 s64 val;
0166
0167 raw_spin_lock_irqsave(lock, flags);
0168 val = v->counter;
0169 v->counter = new;
0170 raw_spin_unlock_irqrestore(lock, flags);
0171 return val;
0172 }
0173 EXPORT_SYMBOL(generic_atomic64_xchg);
0174
0175 s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
0176 {
0177 unsigned long flags;
0178 raw_spinlock_t *lock = lock_addr(v);
0179 s64 val;
0180
0181 raw_spin_lock_irqsave(lock, flags);
0182 val = v->counter;
0183 if (val != u)
0184 v->counter += a;
0185 raw_spin_unlock_irqrestore(lock, flags);
0186
0187 return val;
0188 }
0189 EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);