Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* 64-bit atomic xchg() and cmpxchg() definitions.
0003  *
0004  * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
0005  */
0006 
0007 #ifndef __ARCH_SPARC64_CMPXCHG__
0008 #define __ARCH_SPARC64_CMPXCHG__
0009 
0010 static inline unsigned long
0011 __cmpxchg_u32(volatile int *m, int old, int new)
0012 {
0013     __asm__ __volatile__("cas [%2], %3, %0"
0014                  : "=&r" (new)
0015                  : "0" (new), "r" (m), "r" (old)
0016                  : "memory");
0017 
0018     return new;
0019 }
0020 
0021 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
0022 {
0023     unsigned long tmp1, tmp2;
0024 
0025     __asm__ __volatile__(
0026 "   mov     %0, %1\n"
0027 "1: lduw        [%4], %2\n"
0028 "   cas     [%4], %2, %0\n"
0029 "   cmp     %2, %0\n"
0030 "   bne,a,pn    %%icc, 1b\n"
0031 "    mov        %1, %0\n"
0032     : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
0033     : "0" (val), "r" (m)
0034     : "cc", "memory");
0035     return val;
0036 }
0037 
0038 static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
0039 {
0040     unsigned long tmp1, tmp2;
0041 
0042     __asm__ __volatile__(
0043 "   mov     %0, %1\n"
0044 "1: ldx     [%4], %2\n"
0045 "   casx        [%4], %2, %0\n"
0046 "   cmp     %2, %0\n"
0047 "   bne,a,pn    %%xcc, 1b\n"
0048 "    mov        %1, %0\n"
0049     : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
0050     : "0" (val), "r" (m)
0051     : "cc", "memory");
0052     return val;
0053 }
0054 
0055 #define arch_xchg(ptr,x)                            \
0056 ({  __typeof__(*(ptr)) __ret;                   \
0057     __ret = (__typeof__(*(ptr)))                    \
0058         __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)));  \
0059     __ret;                              \
0060 })
0061 
0062 void __xchg_called_with_bad_pointer(void);
0063 
0064 /*
0065  * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
0066  * here is to get the bit shift of the byte we are interested in.
0067  * The XOR is handy for reversing the bits for big-endian byte order.
0068  */
0069 static inline unsigned long
0070 xchg16(__volatile__ unsigned short *m, unsigned short val)
0071 {
0072     unsigned long maddr = (unsigned long)m;
0073     int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
0074     unsigned int mask = 0xffff << bit_shift;
0075     unsigned int *ptr = (unsigned int  *) (maddr & ~2);
0076     unsigned int old32, new32, load32;
0077 
0078     /* Read the old value */
0079     load32 = *ptr;
0080 
0081     do {
0082         old32 = load32;
0083         new32 = (load32 & (~mask)) | val << bit_shift;
0084         load32 = __cmpxchg_u32(ptr, old32, new32);
0085     } while (load32 != old32);
0086 
0087     return (load32 & mask) >> bit_shift;
0088 }
0089 
0090 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
0091                        int size)
0092 {
0093     switch (size) {
0094     case 2:
0095         return xchg16(ptr, x);
0096     case 4:
0097         return xchg32(ptr, x);
0098     case 8:
0099         return xchg64(ptr, x);
0100     }
0101     __xchg_called_with_bad_pointer();
0102     return x;
0103 }
0104 
0105 /*
0106  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
0107  * store NEW in MEM.  Return the initial value in MEM.  Success is
0108  * indicated by comparing RETURN with OLD.
0109  */
0110 
0111 #include <asm-generic/cmpxchg-local.h>
0112 
0113 
0114 static inline unsigned long
0115 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
0116 {
0117     __asm__ __volatile__("casx [%2], %3, %0"
0118                  : "=&r" (new)
0119                  : "0" (new), "r" (m), "r" (old)
0120                  : "memory");
0121 
0122     return new;
0123 }
0124 
0125 /*
0126  * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
0127  * here is to get the bit shift of the byte we are interested in.
0128  * The XOR is handy for reversing the bits for big-endian byte order
0129  */
0130 static inline unsigned long
0131 __cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
0132 {
0133     unsigned long maddr = (unsigned long)m;
0134     int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
0135     unsigned int mask = 0xff << bit_shift;
0136     unsigned int *ptr = (unsigned int *) (maddr & ~3);
0137     unsigned int old32, new32, load;
0138     unsigned int load32 = *ptr;
0139 
0140     do {
0141         new32 = (load32 & ~mask) | (new << bit_shift);
0142         old32 = (load32 & ~mask) | (old << bit_shift);
0143         load32 = __cmpxchg_u32(ptr, old32, new32);
0144         if (load32 == old32)
0145             return old;
0146         load = (load32 & mask) >> bit_shift;
0147     } while (load == old);
0148 
0149     return load;
0150 }
0151 
0152 /* This function doesn't exist, so you'll get a linker error
0153    if something tries to do an invalid cmpxchg().  */
0154 void __cmpxchg_called_with_bad_pointer(void);
0155 
0156 static inline unsigned long
0157 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
0158 {
0159     switch (size) {
0160         case 1:
0161             return __cmpxchg_u8(ptr, old, new);
0162         case 4:
0163             return __cmpxchg_u32(ptr, old, new);
0164         case 8:
0165             return __cmpxchg_u64(ptr, old, new);
0166     }
0167     __cmpxchg_called_with_bad_pointer();
0168     return old;
0169 }
0170 
0171 #define arch_cmpxchg(ptr,o,n)                        \
0172   ({                                     \
0173      __typeof__(*(ptr)) _o_ = (o);                   \
0174      __typeof__(*(ptr)) _n_ = (n);                   \
0175      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,       \
0176                     (unsigned long)_n_, sizeof(*(ptr))); \
0177   })
0178 
0179 /*
0180  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
0181  * them available.
0182  */
0183 
0184 static inline unsigned long __cmpxchg_local(volatile void *ptr,
0185                       unsigned long old,
0186                       unsigned long new, int size)
0187 {
0188     switch (size) {
0189     case 4:
0190     case 8: return __cmpxchg(ptr, old, new, size);
0191     default:
0192         return __generic_cmpxchg_local(ptr, old, new, size);
0193     }
0194 
0195     return old;
0196 }
0197 
0198 #define arch_cmpxchg_local(ptr, o, n)                   \
0199     ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
0200             (unsigned long)(n), sizeof(*(ptr))))
0201 #define arch_cmpxchg64_local(ptr, o, n)                 \
0202   ({                                    \
0203     BUILD_BUG_ON(sizeof(*(ptr)) != 8);              \
0204     arch_cmpxchg_local((ptr), (o), (n));                    \
0205   })
0206 #define arch_cmpxchg64(ptr, o, n)   arch_cmpxchg64_local((ptr), (o), (n))
0207 
0208 #endif /* __ARCH_SPARC64_CMPXCHG__ */