0001
0002 #ifndef _LINUX_MATH64_H
0003 #define _LINUX_MATH64_H
0004
0005 #include <linux/types.h>
0006 #include <linux/math.h>
0007 #include <vdso/math64.h>
0008 #include <asm/div64.h>
0009
0010 #if BITS_PER_LONG == 64
0011
0012 #define div64_long(x, y) div64_s64((x), (y))
0013 #define div64_ul(x, y) div64_u64((x), (y))
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
0027 {
0028 *remainder = dividend % divisor;
0029 return dividend / divisor;
0030 }
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
0041 {
0042 *remainder = dividend % divisor;
0043 return dividend / divisor;
0044 }
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
0055 {
0056 *remainder = dividend % divisor;
0057 return dividend / divisor;
0058 }
0059
0060
0061
0062
0063
0064
0065
0066
0067 static inline u64 div64_u64(u64 dividend, u64 divisor)
0068 {
0069 return dividend / divisor;
0070 }
0071
0072
0073
0074
0075
0076
0077
0078
0079 static inline s64 div64_s64(s64 dividend, s64 divisor)
0080 {
0081 return dividend / divisor;
0082 }
0083
0084 #elif BITS_PER_LONG == 32
0085
0086 #define div64_long(x, y) div_s64((x), (y))
0087 #define div64_ul(x, y) div_u64((x), (y))
0088
0089 #ifndef div_u64_rem
0090 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
0091 {
0092 *remainder = do_div(dividend, divisor);
0093 return dividend;
0094 }
0095 #endif
0096
0097 #ifndef div_s64_rem
0098 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
0099 #endif
0100
0101 #ifndef div64_u64_rem
0102 extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
0103 #endif
0104
0105 #ifndef div64_u64
0106 extern u64 div64_u64(u64 dividend, u64 divisor);
0107 #endif
0108
0109 #ifndef div64_s64
0110 extern s64 div64_s64(s64 dividend, s64 divisor);
0111 #endif
0112
0113 #endif
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 #ifndef div_u64
0125 static inline u64 div_u64(u64 dividend, u32 divisor)
0126 {
0127 u32 remainder;
0128 return div_u64_rem(dividend, divisor, &remainder);
0129 }
0130 #endif
0131
0132
0133
0134
0135
0136
0137 #ifndef div_s64
0138 static inline s64 div_s64(s64 dividend, s32 divisor)
0139 {
0140 s32 remainder;
0141 return div_s64_rem(dividend, divisor, &remainder);
0142 }
0143 #endif
0144
0145 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
0146
0147 #ifndef mul_u32_u32
0148
0149
0150
0151 static inline u64 mul_u32_u32(u32 a, u32 b)
0152 {
0153 return (u64)a * b;
0154 }
0155 #endif
0156
0157 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
0158
0159 #ifndef mul_u64_u32_shr
0160 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
0161 {
0162 return (u64)(((unsigned __int128)a * mul) >> shift);
0163 }
0164 #endif
0165
0166 #ifndef mul_u64_u64_shr
0167 static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
0168 {
0169 return (u64)(((unsigned __int128)a * mul) >> shift);
0170 }
0171 #endif
0172
0173 #else
0174
0175 #ifndef mul_u64_u32_shr
0176 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
0177 {
0178 u32 ah, al;
0179 u64 ret;
0180
0181 al = a;
0182 ah = a >> 32;
0183
0184 ret = mul_u32_u32(al, mul) >> shift;
0185 if (ah)
0186 ret += mul_u32_u32(ah, mul) << (32 - shift);
0187
0188 return ret;
0189 }
0190 #endif
0191
0192 #ifndef mul_u64_u64_shr
0193 static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
0194 {
0195 union {
0196 u64 ll;
0197 struct {
0198 #ifdef __BIG_ENDIAN
0199 u32 high, low;
0200 #else
0201 u32 low, high;
0202 #endif
0203 } l;
0204 } rl, rm, rn, rh, a0, b0;
0205 u64 c;
0206
0207 a0.ll = a;
0208 b0.ll = b;
0209
0210 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
0211 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
0212 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
0213 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
0214
0215
0216
0217
0218
0219
0220 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
0221 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
0222 rh.l.high = (c >> 32) + rh.l.high;
0223
0224
0225
0226
0227
0228 if (shift == 0)
0229 return rl.ll;
0230 if (shift < 64)
0231 return (rl.ll >> shift) | (rh.ll << (64 - shift));
0232 return rh.ll >> (shift & 63);
0233 }
0234 #endif
0235
0236 #endif
0237
0238 #ifndef mul_s64_u64_shr
0239 static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
0240 {
0241 u64 ret;
0242
0243
0244
0245
0246
0247 ret = mul_u64_u64_shr(abs(a), b, shift);
0248
0249 if (a < 0)
0250 ret = -((s64) ret);
0251
0252 return ret;
0253 }
0254 #endif
0255
0256 #ifndef mul_u64_u32_div
0257 static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
0258 {
0259 union {
0260 u64 ll;
0261 struct {
0262 #ifdef __BIG_ENDIAN
0263 u32 high, low;
0264 #else
0265 u32 low, high;
0266 #endif
0267 } l;
0268 } u, rl, rh;
0269
0270 u.ll = a;
0271 rl.ll = mul_u32_u32(u.l.low, mul);
0272 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
0273
0274
0275 rl.l.high = do_div(rh.ll, divisor);
0276
0277
0278 do_div(rl.ll, divisor);
0279
0280 rl.l.high = rh.l.low;
0281 return rl.ll;
0282 }
0283 #endif
0284
0285 u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
0286
0287 #define DIV64_U64_ROUND_UP(ll, d) \
0288 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300 #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
0301 ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 #define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
0314 ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 #define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \
0327 { \
0328 s64 __x = (dividend); \
0329 s32 __d = (divisor); \
0330 ((__x > 0) == (__d > 0)) ? \
0331 div_s64((__x + (__d / 2)), __d) : \
0332 div_s64((__x - (__d / 2)), __d); \
0333 } \
0334 )
0335 #endif