Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ALPHA_BITOPS_H
0003 #define _ALPHA_BITOPS_H
0004 
0005 #ifndef _LINUX_BITOPS_H
0006 #error only <linux/bitops.h> can be included directly
0007 #endif
0008 
0009 #include <asm/compiler.h>
0010 #include <asm/barrier.h>
0011 
0012 /*
0013  * Copyright 1994, Linus Torvalds.
0014  */
0015 
0016 /*
0017  * These have to be done with inline assembly: that way the bit-setting
0018  * is guaranteed to be atomic. All bit operations return 0 if the bit
0019  * was cleared before the operation and != 0 if it was not.
0020  *
0021  * To get proper branch prediction for the main line, we must branch
0022  * forward to code at the end of this object's .text section, then
0023  * branch back to restart the operation.
0024  *
0025  * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
0026  */
0027 
0028 static inline void
0029 set_bit(unsigned long nr, volatile void * addr)
0030 {
0031     unsigned long temp;
0032     int *m = ((int *) addr) + (nr >> 5);
0033 
0034     __asm__ __volatile__(
0035     "1: ldl_l %0,%3\n"
0036     "   bis %0,%2,%0\n"
0037     "   stl_c %0,%1\n"
0038     "   beq %0,2f\n"
0039     ".subsection 2\n"
0040     "2: br 1b\n"
0041     ".previous"
0042     :"=&r" (temp), "=m" (*m)
0043     :"Ir" (1UL << (nr & 31)), "m" (*m));
0044 }
0045 
0046 /*
0047  * WARNING: non atomic version.
0048  */
0049 static __always_inline void
0050 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
0051 {
0052     int *m = ((int *) addr) + (nr >> 5);
0053 
0054     *m |= 1 << (nr & 31);
0055 }
0056 
0057 static inline void
0058 clear_bit(unsigned long nr, volatile void * addr)
0059 {
0060     unsigned long temp;
0061     int *m = ((int *) addr) + (nr >> 5);
0062 
0063     __asm__ __volatile__(
0064     "1: ldl_l %0,%3\n"
0065     "   bic %0,%2,%0\n"
0066     "   stl_c %0,%1\n"
0067     "   beq %0,2f\n"
0068     ".subsection 2\n"
0069     "2: br 1b\n"
0070     ".previous"
0071     :"=&r" (temp), "=m" (*m)
0072     :"Ir" (1UL << (nr & 31)), "m" (*m));
0073 }
0074 
0075 static inline void
0076 clear_bit_unlock(unsigned long nr, volatile void * addr)
0077 {
0078     smp_mb();
0079     clear_bit(nr, addr);
0080 }
0081 
0082 /*
0083  * WARNING: non atomic version.
0084  */
0085 static __always_inline void
0086 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
0087 {
0088     int *m = ((int *) addr) + (nr >> 5);
0089 
0090     *m &= ~(1 << (nr & 31));
0091 }
0092 
0093 static inline void
0094 __clear_bit_unlock(unsigned long nr, volatile void * addr)
0095 {
0096     smp_mb();
0097     arch___clear_bit(nr, addr);
0098 }
0099 
0100 static inline void
0101 change_bit(unsigned long nr, volatile void * addr)
0102 {
0103     unsigned long temp;
0104     int *m = ((int *) addr) + (nr >> 5);
0105 
0106     __asm__ __volatile__(
0107     "1: ldl_l %0,%3\n"
0108     "   xor %0,%2,%0\n"
0109     "   stl_c %0,%1\n"
0110     "   beq %0,2f\n"
0111     ".subsection 2\n"
0112     "2: br 1b\n"
0113     ".previous"
0114     :"=&r" (temp), "=m" (*m)
0115     :"Ir" (1UL << (nr & 31)), "m" (*m));
0116 }
0117 
0118 /*
0119  * WARNING: non atomic version.
0120  */
0121 static __always_inline void
0122 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
0123 {
0124     int *m = ((int *) addr) + (nr >> 5);
0125 
0126     *m ^= 1 << (nr & 31);
0127 }
0128 
0129 static inline int
0130 test_and_set_bit(unsigned long nr, volatile void *addr)
0131 {
0132     unsigned long oldbit;
0133     unsigned long temp;
0134     int *m = ((int *) addr) + (nr >> 5);
0135 
0136     __asm__ __volatile__(
0137 #ifdef CONFIG_SMP
0138     "   mb\n"
0139 #endif
0140     "1: ldl_l %0,%4\n"
0141     "   and %0,%3,%2\n"
0142     "   bne %2,2f\n"
0143     "   xor %0,%3,%0\n"
0144     "   stl_c %0,%1\n"
0145     "   beq %0,3f\n"
0146     "2:\n"
0147 #ifdef CONFIG_SMP
0148     "   mb\n"
0149 #endif
0150     ".subsection 2\n"
0151     "3: br 1b\n"
0152     ".previous"
0153     :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
0154     :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
0155 
0156     return oldbit != 0;
0157 }
0158 
0159 static inline int
0160 test_and_set_bit_lock(unsigned long nr, volatile void *addr)
0161 {
0162     unsigned long oldbit;
0163     unsigned long temp;
0164     int *m = ((int *) addr) + (nr >> 5);
0165 
0166     __asm__ __volatile__(
0167     "1: ldl_l %0,%4\n"
0168     "   and %0,%3,%2\n"
0169     "   bne %2,2f\n"
0170     "   xor %0,%3,%0\n"
0171     "   stl_c %0,%1\n"
0172     "   beq %0,3f\n"
0173     "2:\n"
0174 #ifdef CONFIG_SMP
0175     "   mb\n"
0176 #endif
0177     ".subsection 2\n"
0178     "3: br 1b\n"
0179     ".previous"
0180     :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
0181     :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
0182 
0183     return oldbit != 0;
0184 }
0185 
0186 /*
0187  * WARNING: non atomic version.
0188  */
0189 static __always_inline bool
0190 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
0191 {
0192     unsigned long mask = 1 << (nr & 0x1f);
0193     int *m = ((int *) addr) + (nr >> 5);
0194     int old = *m;
0195 
0196     *m = old | mask;
0197     return (old & mask) != 0;
0198 }
0199 
0200 static inline int
0201 test_and_clear_bit(unsigned long nr, volatile void * addr)
0202 {
0203     unsigned long oldbit;
0204     unsigned long temp;
0205     int *m = ((int *) addr) + (nr >> 5);
0206 
0207     __asm__ __volatile__(
0208 #ifdef CONFIG_SMP
0209     "   mb\n"
0210 #endif
0211     "1: ldl_l %0,%4\n"
0212     "   and %0,%3,%2\n"
0213     "   beq %2,2f\n"
0214     "   xor %0,%3,%0\n"
0215     "   stl_c %0,%1\n"
0216     "   beq %0,3f\n"
0217     "2:\n"
0218 #ifdef CONFIG_SMP
0219     "   mb\n"
0220 #endif
0221     ".subsection 2\n"
0222     "3: br 1b\n"
0223     ".previous"
0224     :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
0225     :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
0226 
0227     return oldbit != 0;
0228 }
0229 
0230 /*
0231  * WARNING: non atomic version.
0232  */
0233 static __always_inline bool
0234 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
0235 {
0236     unsigned long mask = 1 << (nr & 0x1f);
0237     int *m = ((int *) addr) + (nr >> 5);
0238     int old = *m;
0239 
0240     *m = old & ~mask;
0241     return (old & mask) != 0;
0242 }
0243 
0244 static inline int
0245 test_and_change_bit(unsigned long nr, volatile void * addr)
0246 {
0247     unsigned long oldbit;
0248     unsigned long temp;
0249     int *m = ((int *) addr) + (nr >> 5);
0250 
0251     __asm__ __volatile__(
0252 #ifdef CONFIG_SMP
0253     "   mb\n"
0254 #endif
0255     "1: ldl_l %0,%4\n"
0256     "   and %0,%3,%2\n"
0257     "   xor %0,%3,%0\n"
0258     "   stl_c %0,%1\n"
0259     "   beq %0,3f\n"
0260 #ifdef CONFIG_SMP
0261     "   mb\n"
0262 #endif
0263     ".subsection 2\n"
0264     "3: br 1b\n"
0265     ".previous"
0266     :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
0267     :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
0268 
0269     return oldbit != 0;
0270 }
0271 
0272 /*
0273  * WARNING: non atomic version.
0274  */
0275 static __always_inline bool
0276 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
0277 {
0278     unsigned long mask = 1 << (nr & 0x1f);
0279     int *m = ((int *) addr) + (nr >> 5);
0280     int old = *m;
0281 
0282     *m = old ^ mask;
0283     return (old & mask) != 0;
0284 }
0285 
0286 #define arch_test_bit generic_test_bit
0287 #define arch_test_bit_acquire generic_test_bit_acquire
0288 
0289 /*
0290  * ffz = Find First Zero in word. Undefined if no zero exists,
0291  * so code should check against ~0UL first..
0292  *
0293  * Do a binary search on the bits.  Due to the nature of large
0294  * constants on the alpha, it is worthwhile to split the search.
0295  */
0296 static inline unsigned long ffz_b(unsigned long x)
0297 {
0298     unsigned long sum, x1, x2, x4;
0299 
0300     x = ~x & -~x;       /* set first 0 bit, clear others */
0301     x1 = x & 0xAA;
0302     x2 = x & 0xCC;
0303     x4 = x & 0xF0;
0304     sum = x2 ? 2 : 0;
0305     sum += (x4 != 0) * 4;
0306     sum += (x1 != 0);
0307 
0308     return sum;
0309 }
0310 
0311 static inline unsigned long ffz(unsigned long word)
0312 {
0313 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
0314     /* Whee.  EV67 can calculate it directly.  */
0315     return __kernel_cttz(~word);
0316 #else
0317     unsigned long bits, qofs, bofs;
0318 
0319     bits = __kernel_cmpbge(word, ~0UL);
0320     qofs = ffz_b(bits);
0321     bits = __kernel_extbl(word, qofs);
0322     bofs = ffz_b(bits);
0323 
0324     return qofs*8 + bofs;
0325 #endif
0326 }
0327 
0328 /*
0329  * __ffs = Find First set bit in word.  Undefined if no set bit exists.
0330  */
0331 static inline unsigned long __ffs(unsigned long word)
0332 {
0333 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
0334     /* Whee.  EV67 can calculate it directly.  */
0335     return __kernel_cttz(word);
0336 #else
0337     unsigned long bits, qofs, bofs;
0338 
0339     bits = __kernel_cmpbge(0, word);
0340     qofs = ffz_b(bits);
0341     bits = __kernel_extbl(word, qofs);
0342     bofs = ffz_b(~bits);
0343 
0344     return qofs*8 + bofs;
0345 #endif
0346 }
0347 
0348 #ifdef __KERNEL__
0349 
0350 /*
0351  * ffs: find first bit set. This is defined the same way as
0352  * the libc and compiler builtin ffs routines, therefore
0353  * differs in spirit from the above __ffs.
0354  */
0355 
0356 static inline int ffs(int word)
0357 {
0358     int result = __ffs(word) + 1;
0359     return word ? result : 0;
0360 }
0361 
0362 /*
0363  * fls: find last bit set.
0364  */
0365 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
0366 static inline int fls64(unsigned long word)
0367 {
0368     return 64 - __kernel_ctlz(word);
0369 }
0370 #else
0371 extern const unsigned char __flsm1_tab[256];
0372 
0373 static inline int fls64(unsigned long x)
0374 {
0375     unsigned long t, a, r;
0376 
0377     t = __kernel_cmpbge (x, 0x0101010101010101UL);
0378     a = __flsm1_tab[t];
0379     t = __kernel_extbl (x, a);
0380     r = a*8 + __flsm1_tab[t] + (x != 0);
0381 
0382     return r;
0383 }
0384 #endif
0385 
0386 static inline unsigned long __fls(unsigned long x)
0387 {
0388     return fls64(x) - 1;
0389 }
0390 
0391 static inline int fls(unsigned int x)
0392 {
0393     return fls64(x);
0394 }
0395 
0396 /*
0397  * hweightN: returns the hamming weight (i.e. the number
0398  * of bits set) of a N-bit word
0399  */
0400 
0401 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
0402 /* Whee.  EV67 can calculate it directly.  */
0403 static inline unsigned long __arch_hweight64(unsigned long w)
0404 {
0405     return __kernel_ctpop(w);
0406 }
0407 
0408 static inline unsigned int __arch_hweight32(unsigned int w)
0409 {
0410     return __arch_hweight64(w);
0411 }
0412 
0413 static inline unsigned int __arch_hweight16(unsigned int w)
0414 {
0415     return __arch_hweight64(w & 0xffff);
0416 }
0417 
0418 static inline unsigned int __arch_hweight8(unsigned int w)
0419 {
0420     return __arch_hweight64(w & 0xff);
0421 }
0422 #else
0423 #include <asm-generic/bitops/arch_hweight.h>
0424 #endif
0425 
0426 #include <asm-generic/bitops/const_hweight.h>
0427 
0428 #endif /* __KERNEL__ */
0429 
0430 #ifdef __KERNEL__
0431 
0432 /*
0433  * Every architecture must define this function. It's the fastest
0434  * way of searching a 100-bit bitmap.  It's guaranteed that at least
0435  * one of the 100 bits is cleared.
0436  */
0437 static inline unsigned long
0438 sched_find_first_bit(const unsigned long b[2])
0439 {
0440     unsigned long b0, b1, ofs, tmp;
0441 
0442     b0 = b[0];
0443     b1 = b[1];
0444     ofs = (b0 ? 0 : 64);
0445     tmp = (b0 ? b0 : b1);
0446 
0447     return __ffs(tmp) + ofs;
0448 }
0449 
0450 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
0451 
0452 #include <asm-generic/bitops/le.h>
0453 
0454 #include <asm-generic/bitops/ext2-atomic-setbit.h>
0455 
0456 #endif /* __KERNEL__ */
0457 
0458 #endif /* _ALPHA_BITOPS_H */