Back to home page

LXR

 
 

    


0001 /*
0002  * Generic implementation of 64-bit atomics using spinlocks,
0003  * useful on processors that don't have 64-bit atomic instructions.
0004  *
0005  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
0006  *
0007  * This program is free software; you can redistribute it and/or
0008  * modify it under the terms of the GNU General Public License
0009  * as published by the Free Software Foundation; either version
0010  * 2 of the License, or (at your option) any later version.
0011  */
0012 #include <linux/types.h>
0013 #include <linux/cache.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/init.h>
0016 #include <linux/export.h>
0017 #include <linux/atomic.h>
0018 
0019 /*
0020  * We use a hashed array of spinlocks to provide exclusive access
0021  * to each atomic64_t variable.  Since this is expected to used on
0022  * systems with small numbers of CPUs (<= 4 or so), we use a
0023  * relatively small array of 16 spinlocks to avoid wasting too much
0024  * memory on the spinlock array.
0025  */
0026 #define NR_LOCKS    16
0027 
0028 /*
0029  * Ensure each lock is in a separate cacheline.
0030  */
0031 static union {
0032     raw_spinlock_t lock;
0033     char pad[L1_CACHE_BYTES];
0034 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
0035     [0 ... (NR_LOCKS - 1)] = {
0036         .lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
0037     },
0038 };
0039 
0040 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
0041 {
0042     unsigned long addr = (unsigned long) v;
0043 
0044     addr >>= L1_CACHE_SHIFT;
0045     addr ^= (addr >> 8) ^ (addr >> 16);
0046     return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
0047 }
0048 
0049 long long atomic64_read(const atomic64_t *v)
0050 {
0051     unsigned long flags;
0052     raw_spinlock_t *lock = lock_addr(v);
0053     long long val;
0054 
0055     raw_spin_lock_irqsave(lock, flags);
0056     val = v->counter;
0057     raw_spin_unlock_irqrestore(lock, flags);
0058     return val;
0059 }
0060 EXPORT_SYMBOL(atomic64_read);
0061 
0062 void atomic64_set(atomic64_t *v, long long i)
0063 {
0064     unsigned long flags;
0065     raw_spinlock_t *lock = lock_addr(v);
0066 
0067     raw_spin_lock_irqsave(lock, flags);
0068     v->counter = i;
0069     raw_spin_unlock_irqrestore(lock, flags);
0070 }
0071 EXPORT_SYMBOL(atomic64_set);
0072 
0073 #define ATOMIC64_OP(op, c_op)                       \
0074 void atomic64_##op(long long a, atomic64_t *v)              \
0075 {                                   \
0076     unsigned long flags;                        \
0077     raw_spinlock_t *lock = lock_addr(v);                \
0078                                     \
0079     raw_spin_lock_irqsave(lock, flags);             \
0080     v->counter c_op a;                      \
0081     raw_spin_unlock_irqrestore(lock, flags);            \
0082 }                                   \
0083 EXPORT_SYMBOL(atomic64_##op);
0084 
0085 #define ATOMIC64_OP_RETURN(op, c_op)                    \
0086 long long atomic64_##op##_return(long long a, atomic64_t *v)        \
0087 {                                   \
0088     unsigned long flags;                        \
0089     raw_spinlock_t *lock = lock_addr(v);                \
0090     long long val;                          \
0091                                     \
0092     raw_spin_lock_irqsave(lock, flags);             \
0093     val = (v->counter c_op a);                  \
0094     raw_spin_unlock_irqrestore(lock, flags);            \
0095     return val;                         \
0096 }                                   \
0097 EXPORT_SYMBOL(atomic64_##op##_return);
0098 
0099 #define ATOMIC64_FETCH_OP(op, c_op)                 \
0100 long long atomic64_fetch_##op(long long a, atomic64_t *v)       \
0101 {                                   \
0102     unsigned long flags;                        \
0103     raw_spinlock_t *lock = lock_addr(v);                \
0104     long long val;                          \
0105                                     \
0106     raw_spin_lock_irqsave(lock, flags);             \
0107     val = v->counter;                       \
0108     v->counter c_op a;                      \
0109     raw_spin_unlock_irqrestore(lock, flags);            \
0110     return val;                         \
0111 }                                   \
0112 EXPORT_SYMBOL(atomic64_fetch_##op);
0113 
0114 #define ATOMIC64_OPS(op, c_op)                      \
0115     ATOMIC64_OP(op, c_op)                       \
0116     ATOMIC64_OP_RETURN(op, c_op)                    \
0117     ATOMIC64_FETCH_OP(op, c_op)
0118 
0119 ATOMIC64_OPS(add, +=)
0120 ATOMIC64_OPS(sub, -=)
0121 
0122 #undef ATOMIC64_OPS
0123 #define ATOMIC64_OPS(op, c_op)                      \
0124     ATOMIC64_OP(op, c_op)                       \
0125     ATOMIC64_OP_RETURN(op, c_op)                    \
0126     ATOMIC64_FETCH_OP(op, c_op)
0127 
0128 ATOMIC64_OPS(and, &=)
0129 ATOMIC64_OPS(or, |=)
0130 ATOMIC64_OPS(xor, ^=)
0131 
0132 #undef ATOMIC64_OPS
0133 #undef ATOMIC64_FETCH_OP
0134 #undef ATOMIC64_OP_RETURN
0135 #undef ATOMIC64_OP
0136 
0137 long long atomic64_dec_if_positive(atomic64_t *v)
0138 {
0139     unsigned long flags;
0140     raw_spinlock_t *lock = lock_addr(v);
0141     long long val;
0142 
0143     raw_spin_lock_irqsave(lock, flags);
0144     val = v->counter - 1;
0145     if (val >= 0)
0146         v->counter = val;
0147     raw_spin_unlock_irqrestore(lock, flags);
0148     return val;
0149 }
0150 EXPORT_SYMBOL(atomic64_dec_if_positive);
0151 
0152 long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
0153 {
0154     unsigned long flags;
0155     raw_spinlock_t *lock = lock_addr(v);
0156     long long val;
0157 
0158     raw_spin_lock_irqsave(lock, flags);
0159     val = v->counter;
0160     if (val == o)
0161         v->counter = n;
0162     raw_spin_unlock_irqrestore(lock, flags);
0163     return val;
0164 }
0165 EXPORT_SYMBOL(atomic64_cmpxchg);
0166 
0167 long long atomic64_xchg(atomic64_t *v, long long new)
0168 {
0169     unsigned long flags;
0170     raw_spinlock_t *lock = lock_addr(v);
0171     long long val;
0172 
0173     raw_spin_lock_irqsave(lock, flags);
0174     val = v->counter;
0175     v->counter = new;
0176     raw_spin_unlock_irqrestore(lock, flags);
0177     return val;
0178 }
0179 EXPORT_SYMBOL(atomic64_xchg);
0180 
0181 int atomic64_add_unless(atomic64_t *v, long long a, long long u)
0182 {
0183     unsigned long flags;
0184     raw_spinlock_t *lock = lock_addr(v);
0185     int ret = 0;
0186 
0187     raw_spin_lock_irqsave(lock, flags);
0188     if (v->counter != u) {
0189         v->counter += a;
0190         ret = 1;
0191     }
0192     raw_spin_unlock_irqrestore(lock, flags);
0193     return ret;
0194 }
0195 EXPORT_SYMBOL(atomic64_add_unless);