Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 
0003 #ifndef _ASM_ARC_ATOMIC_SPLOCK_H
0004 #define _ASM_ARC_ATOMIC_SPLOCK_H
0005 
0006 /*
0007  * Non hardware assisted Atomic-R-M-W
0008  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
0009  */
0010 
0011 static inline void arch_atomic_set(atomic_t *v, int i)
0012 {
0013     /*
0014      * Independent of hardware support, all of the atomic_xxx() APIs need
0015      * to follow the same locking rules to make sure that a "hardware"
0016      * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
0017      * sequence
0018      *
0019      * Thus atomic_set() despite being 1 insn (and seemingly atomic)
0020      * requires the locking.
0021      */
0022     unsigned long flags;
0023 
0024     atomic_ops_lock(flags);
0025     WRITE_ONCE(v->counter, i);
0026     atomic_ops_unlock(flags);
0027 }
0028 
0029 #define arch_atomic_set_release(v, i)   arch_atomic_set((v), (i))
0030 
0031 #define ATOMIC_OP(op, c_op, asm_op)                 \
0032 static inline void arch_atomic_##op(int i, atomic_t *v)         \
0033 {                                   \
0034     unsigned long flags;                        \
0035                                     \
0036     atomic_ops_lock(flags);                     \
0037     v->counter c_op i;                      \
0038     atomic_ops_unlock(flags);                   \
0039 }
0040 
0041 #define ATOMIC_OP_RETURN(op, c_op, asm_op)              \
0042 static inline int arch_atomic_##op##_return(int i, atomic_t *v)     \
0043 {                                   \
0044     unsigned long flags;                        \
0045     unsigned int temp;                      \
0046                                     \
0047     /*                              \
0048      * spin lock/unlock provides the needed smp_mb() before/after   \
0049      */                             \
0050     atomic_ops_lock(flags);                     \
0051     temp = v->counter;                      \
0052     temp c_op i;                            \
0053     v->counter = temp;                      \
0054     atomic_ops_unlock(flags);                   \
0055                                     \
0056     return temp;                            \
0057 }
0058 
0059 #define ATOMIC_FETCH_OP(op, c_op, asm_op)               \
0060 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)        \
0061 {                                   \
0062     unsigned long flags;                        \
0063     unsigned int orig;                      \
0064                                     \
0065     /*                              \
0066      * spin lock/unlock provides the needed smp_mb() before/after   \
0067      */                             \
0068     atomic_ops_lock(flags);                     \
0069     orig = v->counter;                      \
0070     v->counter c_op i;                      \
0071     atomic_ops_unlock(flags);                   \
0072                                     \
0073     return orig;                            \
0074 }
0075 
0076 #define ATOMIC_OPS(op, c_op, asm_op)                    \
0077     ATOMIC_OP(op, c_op, asm_op)                 \
0078     ATOMIC_OP_RETURN(op, c_op, asm_op)              \
0079     ATOMIC_FETCH_OP(op, c_op, asm_op)
0080 
0081 ATOMIC_OPS(add, +=, add)
0082 ATOMIC_OPS(sub, -=, sub)
0083 
0084 #undef ATOMIC_OPS
0085 #define ATOMIC_OPS(op, c_op, asm_op)                    \
0086     ATOMIC_OP(op, c_op, asm_op)                 \
0087     ATOMIC_FETCH_OP(op, c_op, asm_op)
0088 
0089 ATOMIC_OPS(and, &=, and)
0090 ATOMIC_OPS(andnot, &= ~, bic)
0091 ATOMIC_OPS(or, |=, or)
0092 ATOMIC_OPS(xor, ^=, xor)
0093 
0094 #define arch_atomic_andnot      arch_atomic_andnot
0095 #define arch_atomic_fetch_andnot    arch_atomic_fetch_andnot
0096 
0097 #undef ATOMIC_OPS
0098 #undef ATOMIC_FETCH_OP
0099 #undef ATOMIC_OP_RETURN
0100 #undef ATOMIC_OP
0101 
0102 #endif