Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_BITOPS_H
0003 #define _ASM_X86_BITOPS_H
0004 
0005 /*
0006  * Copyright 1992, Linus Torvalds.
0007  *
0008  * Note: inlines with more than a single statement should be marked
0009  * __always_inline to avoid problems with older gcc's inlining heuristics.
0010  */
0011 
0012 #ifndef _LINUX_BITOPS_H
0013 #error only <linux/bitops.h> can be included directly
0014 #endif
0015 
0016 #include <linux/compiler.h>
0017 #include <asm/alternative.h>
0018 #include <asm/rmwcc.h>
0019 #include <asm/barrier.h>
0020 
0021 #if BITS_PER_LONG == 32
0022 # define _BITOPS_LONG_SHIFT 5
0023 #elif BITS_PER_LONG == 64
0024 # define _BITOPS_LONG_SHIFT 6
0025 #else
0026 # error "Unexpected BITS_PER_LONG"
0027 #endif
0028 
0029 #define BIT_64(n)           (U64_C(1) << (n))
0030 
0031 /*
0032  * These have to be done with inline assembly: that way the bit-setting
0033  * is guaranteed to be atomic. All bit operations return 0 if the bit
0034  * was cleared before the operation and != 0 if it was not.
0035  *
0036  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
0037  */
0038 
0039 #define RLONG_ADDR(x)            "m" (*(volatile long *) (x))
0040 #define WBYTE_ADDR(x)           "+m" (*(volatile char *) (x))
0041 
0042 #define ADDR                RLONG_ADDR(addr)
0043 
0044 /*
0045  * We do the locked ops that don't return the old value as
0046  * a mask operation on a byte.
0047  */
0048 #define CONST_MASK_ADDR(nr, addr)   WBYTE_ADDR((void *)(addr) + ((nr)>>3))
0049 #define CONST_MASK(nr)          (1 << ((nr) & 7))
0050 
0051 static __always_inline void
0052 arch_set_bit(long nr, volatile unsigned long *addr)
0053 {
0054     if (__builtin_constant_p(nr)) {
0055         asm volatile(LOCK_PREFIX "orb %b1,%0"
0056             : CONST_MASK_ADDR(nr, addr)
0057             : "iq" (CONST_MASK(nr))
0058             : "memory");
0059     } else {
0060         asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
0061             : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
0062     }
0063 }
0064 
0065 static __always_inline void
0066 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
0067 {
0068     asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
0069 }
0070 
0071 static __always_inline void
0072 arch_clear_bit(long nr, volatile unsigned long *addr)
0073 {
0074     if (__builtin_constant_p(nr)) {
0075         asm volatile(LOCK_PREFIX "andb %b1,%0"
0076             : CONST_MASK_ADDR(nr, addr)
0077             : "iq" (~CONST_MASK(nr)));
0078     } else {
0079         asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
0080             : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
0081     }
0082 }
0083 
0084 static __always_inline void
0085 arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
0086 {
0087     barrier();
0088     arch_clear_bit(nr, addr);
0089 }
0090 
0091 static __always_inline void
0092 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
0093 {
0094     asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
0095 }
0096 
0097 static __always_inline bool
0098 arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
0099 {
0100     bool negative;
0101     asm volatile(LOCK_PREFIX "andb %2,%1"
0102         CC_SET(s)
0103         : CC_OUT(s) (negative), WBYTE_ADDR(addr)
0104         : "ir" ((char) ~(1 << nr)) : "memory");
0105     return negative;
0106 }
0107 #define arch_clear_bit_unlock_is_negative_byte                                 \
0108     arch_clear_bit_unlock_is_negative_byte
0109 
0110 static __always_inline void
0111 arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
0112 {
0113     arch___clear_bit(nr, addr);
0114 }
0115 
0116 static __always_inline void
0117 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
0118 {
0119     asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
0120 }
0121 
0122 static __always_inline void
0123 arch_change_bit(long nr, volatile unsigned long *addr)
0124 {
0125     if (__builtin_constant_p(nr)) {
0126         asm volatile(LOCK_PREFIX "xorb %b1,%0"
0127             : CONST_MASK_ADDR(nr, addr)
0128             : "iq" (CONST_MASK(nr)));
0129     } else {
0130         asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
0131             : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
0132     }
0133 }
0134 
0135 static __always_inline bool
0136 arch_test_and_set_bit(long nr, volatile unsigned long *addr)
0137 {
0138     return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
0139 }
0140 
0141 static __always_inline bool
0142 arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
0143 {
0144     return arch_test_and_set_bit(nr, addr);
0145 }
0146 
0147 static __always_inline bool
0148 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
0149 {
0150     bool oldbit;
0151 
0152     asm(__ASM_SIZE(bts) " %2,%1"
0153         CC_SET(c)
0154         : CC_OUT(c) (oldbit)
0155         : ADDR, "Ir" (nr) : "memory");
0156     return oldbit;
0157 }
0158 
0159 static __always_inline bool
0160 arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
0161 {
0162     return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
0163 }
0164 
0165 /*
0166  * Note: the operation is performed atomically with respect to
0167  * the local CPU, but not other CPUs. Portable code should not
0168  * rely on this behaviour.
0169  * KVM relies on this behaviour on x86 for modifying memory that is also
0170  * accessed from a hypervisor on the same CPU if running in a VM: don't change
0171  * this without also updating arch/x86/kernel/kvm.c
0172  */
0173 static __always_inline bool
0174 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
0175 {
0176     bool oldbit;
0177 
0178     asm volatile(__ASM_SIZE(btr) " %2,%1"
0179              CC_SET(c)
0180              : CC_OUT(c) (oldbit)
0181              : ADDR, "Ir" (nr) : "memory");
0182     return oldbit;
0183 }
0184 
0185 static __always_inline bool
0186 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
0187 {
0188     bool oldbit;
0189 
0190     asm volatile(__ASM_SIZE(btc) " %2,%1"
0191              CC_SET(c)
0192              : CC_OUT(c) (oldbit)
0193              : ADDR, "Ir" (nr) : "memory");
0194 
0195     return oldbit;
0196 }
0197 
0198 static __always_inline bool
0199 arch_test_and_change_bit(long nr, volatile unsigned long *addr)
0200 {
0201     return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
0202 }
0203 
0204 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
0205 {
0206     return ((1UL << (nr & (BITS_PER_LONG-1))) &
0207         (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
0208 }
0209 
0210 static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
0211 {
0212     bool oldbit;
0213 
0214     asm volatile("testb %2,%1"
0215              CC_SET(nz)
0216              : CC_OUT(nz) (oldbit)
0217              : "m" (((unsigned char *)addr)[nr >> 3]),
0218                "i" (1 << (nr & 7))
0219              :"memory");
0220 
0221     return oldbit;
0222 }
0223 
0224 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
0225 {
0226     bool oldbit;
0227 
0228     asm volatile(__ASM_SIZE(bt) " %2,%1"
0229              CC_SET(c)
0230              : CC_OUT(c) (oldbit)
0231              : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
0232 
0233     return oldbit;
0234 }
0235 
0236 static __always_inline bool
0237 arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
0238 {
0239     return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
0240                       variable_test_bit(nr, addr);
0241 }
0242 
0243 static __always_inline bool
0244 arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
0245 {
0246     return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
0247                       variable_test_bit(nr, addr);
0248 }
0249 
0250 /**
0251  * __ffs - find first set bit in word
0252  * @word: The word to search
0253  *
0254  * Undefined if no bit exists, so code should check against 0 first.
0255  */
0256 static __always_inline unsigned long __ffs(unsigned long word)
0257 {
0258     asm("rep; bsf %1,%0"
0259         : "=r" (word)
0260         : "rm" (word));
0261     return word;
0262 }
0263 
0264 /**
0265  * ffz - find first zero bit in word
0266  * @word: The word to search
0267  *
0268  * Undefined if no zero exists, so code should check against ~0UL first.
0269  */
0270 static __always_inline unsigned long ffz(unsigned long word)
0271 {
0272     asm("rep; bsf %1,%0"
0273         : "=r" (word)
0274         : "r" (~word));
0275     return word;
0276 }
0277 
0278 /*
0279  * __fls: find last set bit in word
0280  * @word: The word to search
0281  *
0282  * Undefined if no set bit exists, so code should check against 0 first.
0283  */
0284 static __always_inline unsigned long __fls(unsigned long word)
0285 {
0286     asm("bsr %1,%0"
0287         : "=r" (word)
0288         : "rm" (word));
0289     return word;
0290 }
0291 
0292 #undef ADDR
0293 
0294 #ifdef __KERNEL__
0295 /**
0296  * ffs - find first set bit in word
0297  * @x: the word to search
0298  *
0299  * This is defined the same way as the libc and compiler builtin ffs
0300  * routines, therefore differs in spirit from the other bitops.
0301  *
0302  * ffs(value) returns 0 if value is 0 or the position of the first
0303  * set bit if value is nonzero. The first (least significant) bit
0304  * is at position 1.
0305  */
0306 static __always_inline int ffs(int x)
0307 {
0308     int r;
0309 
0310 #ifdef CONFIG_X86_64
0311     /*
0312      * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
0313      * dest reg is undefined if x==0, but their CPU architect says its
0314      * value is written to set it to the same as before, except that the
0315      * top 32 bits will be cleared.
0316      *
0317      * We cannot do this on 32 bits because at the very least some
0318      * 486 CPUs did not behave this way.
0319      */
0320     asm("bsfl %1,%0"
0321         : "=r" (r)
0322         : "rm" (x), "0" (-1));
0323 #elif defined(CONFIG_X86_CMOV)
0324     asm("bsfl %1,%0\n\t"
0325         "cmovzl %2,%0"
0326         : "=&r" (r) : "rm" (x), "r" (-1));
0327 #else
0328     asm("bsfl %1,%0\n\t"
0329         "jnz 1f\n\t"
0330         "movl $-1,%0\n"
0331         "1:" : "=r" (r) : "rm" (x));
0332 #endif
0333     return r + 1;
0334 }
0335 
0336 /**
0337  * fls - find last set bit in word
0338  * @x: the word to search
0339  *
0340  * This is defined in a similar way as the libc and compiler builtin
0341  * ffs, but returns the position of the most significant set bit.
0342  *
0343  * fls(value) returns 0 if value is 0 or the position of the last
0344  * set bit if value is nonzero. The last (most significant) bit is
0345  * at position 32.
0346  */
0347 static __always_inline int fls(unsigned int x)
0348 {
0349     int r;
0350 
0351 #ifdef CONFIG_X86_64
0352     /*
0353      * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
0354      * dest reg is undefined if x==0, but their CPU architect says its
0355      * value is written to set it to the same as before, except that the
0356      * top 32 bits will be cleared.
0357      *
0358      * We cannot do this on 32 bits because at the very least some
0359      * 486 CPUs did not behave this way.
0360      */
0361     asm("bsrl %1,%0"
0362         : "=r" (r)
0363         : "rm" (x), "0" (-1));
0364 #elif defined(CONFIG_X86_CMOV)
0365     asm("bsrl %1,%0\n\t"
0366         "cmovzl %2,%0"
0367         : "=&r" (r) : "rm" (x), "rm" (-1));
0368 #else
0369     asm("bsrl %1,%0\n\t"
0370         "jnz 1f\n\t"
0371         "movl $-1,%0\n"
0372         "1:" : "=r" (r) : "rm" (x));
0373 #endif
0374     return r + 1;
0375 }
0376 
0377 /**
0378  * fls64 - find last set bit in a 64-bit word
0379  * @x: the word to search
0380  *
0381  * This is defined in a similar way as the libc and compiler builtin
0382  * ffsll, but returns the position of the most significant set bit.
0383  *
0384  * fls64(value) returns 0 if value is 0 or the position of the last
0385  * set bit if value is nonzero. The last (most significant) bit is
0386  * at position 64.
0387  */
0388 #ifdef CONFIG_X86_64
0389 static __always_inline int fls64(__u64 x)
0390 {
0391     int bitpos = -1;
0392     /*
0393      * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
0394      * dest reg is undefined if x==0, but their CPU architect says its
0395      * value is written to set it to the same as before.
0396      */
0397     asm("bsrq %1,%q0"
0398         : "+r" (bitpos)
0399         : "rm" (x));
0400     return bitpos + 1;
0401 }
0402 #else
0403 #include <asm-generic/bitops/fls64.h>
0404 #endif
0405 
0406 #include <asm-generic/bitops/sched.h>
0407 
0408 #include <asm/arch_hweight.h>
0409 
0410 #include <asm-generic/bitops/const_hweight.h>
0411 
0412 #include <asm-generic/bitops/instrumented-atomic.h>
0413 #include <asm-generic/bitops/instrumented-non-atomic.h>
0414 #include <asm-generic/bitops/instrumented-lock.h>
0415 
0416 #include <asm-generic/bitops/le.h>
0417 
0418 #include <asm-generic/bitops/ext2-atomic-setbit.h>
0419 
0420 #endif /* __KERNEL__ */
0421 #endif /* _ASM_X86_BITOPS_H */