Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_MATH64_H
0003 #define _LINUX_MATH64_H
0004 
0005 #include <linux/types.h>
0006 #include <linux/math.h>
0007 #include <vdso/math64.h>
0008 #include <asm/div64.h>
0009 
0010 #if BITS_PER_LONG == 64
0011 
0012 #define div64_long(x, y) div64_s64((x), (y))
0013 #define div64_ul(x, y)   div64_u64((x), (y))
0014 
0015 /**
0016  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
0017  * @dividend: unsigned 64bit dividend
0018  * @divisor: unsigned 32bit divisor
0019  * @remainder: pointer to unsigned 32bit remainder
0020  *
0021  * Return: sets ``*remainder``, then returns dividend / divisor
0022  *
0023  * This is commonly provided by 32bit archs to provide an optimized 64bit
0024  * divide.
0025  */
0026 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
0027 {
0028     *remainder = dividend % divisor;
0029     return dividend / divisor;
0030 }
0031 
0032 /*
0033  * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
0034  * @dividend: signed 64bit dividend
0035  * @divisor: signed 32bit divisor
0036  * @remainder: pointer to signed 32bit remainder
0037  *
0038  * Return: sets ``*remainder``, then returns dividend / divisor
0039  */
0040 static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
0041 {
0042     *remainder = dividend % divisor;
0043     return dividend / divisor;
0044 }
0045 
0046 /*
0047  * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
0048  * @dividend: unsigned 64bit dividend
0049  * @divisor: unsigned 64bit divisor
0050  * @remainder: pointer to unsigned 64bit remainder
0051  *
0052  * Return: sets ``*remainder``, then returns dividend / divisor
0053  */
0054 static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
0055 {
0056     *remainder = dividend % divisor;
0057     return dividend / divisor;
0058 }
0059 
0060 /*
0061  * div64_u64 - unsigned 64bit divide with 64bit divisor
0062  * @dividend: unsigned 64bit dividend
0063  * @divisor: unsigned 64bit divisor
0064  *
0065  * Return: dividend / divisor
0066  */
0067 static inline u64 div64_u64(u64 dividend, u64 divisor)
0068 {
0069     return dividend / divisor;
0070 }
0071 
0072 /*
0073  * div64_s64 - signed 64bit divide with 64bit divisor
0074  * @dividend: signed 64bit dividend
0075  * @divisor: signed 64bit divisor
0076  *
0077  * Return: dividend / divisor
0078  */
0079 static inline s64 div64_s64(s64 dividend, s64 divisor)
0080 {
0081     return dividend / divisor;
0082 }
0083 
0084 #elif BITS_PER_LONG == 32
0085 
0086 #define div64_long(x, y) div_s64((x), (y))
0087 #define div64_ul(x, y)   div_u64((x), (y))
0088 
0089 #ifndef div_u64_rem
0090 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
0091 {
0092     *remainder = do_div(dividend, divisor);
0093     return dividend;
0094 }
0095 #endif
0096 
0097 #ifndef div_s64_rem
0098 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
0099 #endif
0100 
0101 #ifndef div64_u64_rem
0102 extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
0103 #endif
0104 
0105 #ifndef div64_u64
0106 extern u64 div64_u64(u64 dividend, u64 divisor);
0107 #endif
0108 
0109 #ifndef div64_s64
0110 extern s64 div64_s64(s64 dividend, s64 divisor);
0111 #endif
0112 
0113 #endif /* BITS_PER_LONG */
0114 
0115 /**
0116  * div_u64 - unsigned 64bit divide with 32bit divisor
0117  * @dividend: unsigned 64bit dividend
0118  * @divisor: unsigned 32bit divisor
0119  *
0120  * This is the most common 64bit divide and should be used if possible,
0121  * as many 32bit archs can optimize this variant better than a full 64bit
0122  * divide.
0123  */
0124 #ifndef div_u64
0125 static inline u64 div_u64(u64 dividend, u32 divisor)
0126 {
0127     u32 remainder;
0128     return div_u64_rem(dividend, divisor, &remainder);
0129 }
0130 #endif
0131 
0132 /**
0133  * div_s64 - signed 64bit divide with 32bit divisor
0134  * @dividend: signed 64bit dividend
0135  * @divisor: signed 32bit divisor
0136  */
0137 #ifndef div_s64
0138 static inline s64 div_s64(s64 dividend, s32 divisor)
0139 {
0140     s32 remainder;
0141     return div_s64_rem(dividend, divisor, &remainder);
0142 }
0143 #endif
0144 
0145 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
0146 
0147 #ifndef mul_u32_u32
0148 /*
0149  * Many a GCC version messes this up and generates a 64x64 mult :-(
0150  */
0151 static inline u64 mul_u32_u32(u32 a, u32 b)
0152 {
0153     return (u64)a * b;
0154 }
0155 #endif
0156 
0157 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
0158 
0159 #ifndef mul_u64_u32_shr
0160 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
0161 {
0162     return (u64)(((unsigned __int128)a * mul) >> shift);
0163 }
0164 #endif /* mul_u64_u32_shr */
0165 
0166 #ifndef mul_u64_u64_shr
0167 static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
0168 {
0169     return (u64)(((unsigned __int128)a * mul) >> shift);
0170 }
0171 #endif /* mul_u64_u64_shr */
0172 
0173 #else
0174 
0175 #ifndef mul_u64_u32_shr
0176 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
0177 {
0178     u32 ah, al;
0179     u64 ret;
0180 
0181     al = a;
0182     ah = a >> 32;
0183 
0184     ret = mul_u32_u32(al, mul) >> shift;
0185     if (ah)
0186         ret += mul_u32_u32(ah, mul) << (32 - shift);
0187 
0188     return ret;
0189 }
0190 #endif /* mul_u64_u32_shr */
0191 
0192 #ifndef mul_u64_u64_shr
0193 static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
0194 {
0195     union {
0196         u64 ll;
0197         struct {
0198 #ifdef __BIG_ENDIAN
0199             u32 high, low;
0200 #else
0201             u32 low, high;
0202 #endif
0203         } l;
0204     } rl, rm, rn, rh, a0, b0;
0205     u64 c;
0206 
0207     a0.ll = a;
0208     b0.ll = b;
0209 
0210     rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
0211     rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
0212     rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
0213     rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
0214 
0215     /*
0216      * Each of these lines computes a 64-bit intermediate result into "c",
0217      * starting at bits 32-95.  The low 32-bits go into the result of the
0218      * multiplication, the high 32-bits are carried into the next step.
0219      */
0220     rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
0221     rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
0222     rh.l.high = (c >> 32) + rh.l.high;
0223 
0224     /*
0225      * The 128-bit result of the multiplication is in rl.ll and rh.ll,
0226      * shift it right and throw away the high part of the result.
0227      */
0228     if (shift == 0)
0229         return rl.ll;
0230     if (shift < 64)
0231         return (rl.ll >> shift) | (rh.ll << (64 - shift));
0232     return rh.ll >> (shift & 63);
0233 }
0234 #endif /* mul_u64_u64_shr */
0235 
0236 #endif
0237 
0238 #ifndef mul_s64_u64_shr
0239 static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
0240 {
0241     u64 ret;
0242 
0243     /*
0244      * Extract the sign before the multiplication and put it back
0245      * afterwards if needed.
0246      */
0247     ret = mul_u64_u64_shr(abs(a), b, shift);
0248 
0249     if (a < 0)
0250         ret = -((s64) ret);
0251 
0252     return ret;
0253 }
0254 #endif /* mul_s64_u64_shr */
0255 
0256 #ifndef mul_u64_u32_div
0257 static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
0258 {
0259     union {
0260         u64 ll;
0261         struct {
0262 #ifdef __BIG_ENDIAN
0263             u32 high, low;
0264 #else
0265             u32 low, high;
0266 #endif
0267         } l;
0268     } u, rl, rh;
0269 
0270     u.ll = a;
0271     rl.ll = mul_u32_u32(u.l.low, mul);
0272     rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
0273 
0274     /* Bits 32-63 of the result will be in rh.l.low. */
0275     rl.l.high = do_div(rh.ll, divisor);
0276 
0277     /* Bits 0-31 of the result will be in rl.l.low. */
0278     do_div(rl.ll, divisor);
0279 
0280     rl.l.high = rh.l.low;
0281     return rl.ll;
0282 }
0283 #endif /* mul_u64_u32_div */
0284 
0285 u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
0286 
0287 #define DIV64_U64_ROUND_UP(ll, d)   \
0288     ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
0289 
0290 /**
0291  * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
0292  * @dividend: unsigned 64bit dividend
0293  * @divisor: unsigned 64bit divisor
0294  *
0295  * Divide unsigned 64bit dividend by unsigned 64bit divisor
0296  * and round to closest integer.
0297  *
0298  * Return: dividend / divisor rounded to nearest integer
0299  */
0300 #define DIV64_U64_ROUND_CLOSEST(dividend, divisor)  \
0301     ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
0302 
0303 /*
0304  * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
0305  * @dividend: unsigned 64bit dividend
0306  * @divisor: unsigned 32bit divisor
0307  *
0308  * Divide unsigned 64bit dividend by unsigned 32bit divisor
0309  * and round to closest integer.
0310  *
0311  * Return: dividend / divisor rounded to nearest integer
0312  */
0313 #define DIV_U64_ROUND_CLOSEST(dividend, divisor)    \
0314     ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
0315 
0316 /*
0317  * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
0318  * @dividend: signed 64bit dividend
0319  * @divisor: signed 32bit divisor
0320  *
0321  * Divide signed 64bit dividend by signed 32bit divisor
0322  * and round to closest integer.
0323  *
0324  * Return: dividend / divisor rounded to nearest integer
0325  */
0326 #define DIV_S64_ROUND_CLOSEST(dividend, divisor)(   \
0327 {                           \
0328     s64 __x = (dividend);               \
0329     s32 __d = (divisor);                \
0330     ((__x > 0) == (__d > 0)) ?          \
0331         div_s64((__x + (__d / 2)), __d) :   \
0332         div_s64((__x - (__d / 2)), __d);    \
0333 }                           \
0334 )
0335 #endif /* _LINUX_MATH64_H */