0001
0002 #ifndef _LINUX_MATH64_H
0003 #define _LINUX_MATH64_H
0004
0005 #include <linux/types.h>
0006
0007 #ifdef __x86_64__
0008 static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
0009 {
0010 u64 q;
0011
0012 asm ("mulq %2; divq %3" : "=a" (q)
0013 : "a" (a), "rm" (b), "rm" (c)
0014 : "rdx");
0015
0016 return q;
0017 }
0018 #define mul_u64_u64_div64 mul_u64_u64_div64
0019 #endif
0020
0021 #ifdef __SIZEOF_INT128__
0022 static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
0023 {
0024 return (u64)(((unsigned __int128)a * b) >> shift);
0025 }
0026
0027 #else
0028
0029 #ifdef __i386__
0030 static inline u64 mul_u32_u32(u32 a, u32 b)
0031 {
0032 u32 high, low;
0033
0034 asm ("mull %[b]" : "=a" (low), "=d" (high)
0035 : [a] "a" (a), [b] "rm" (b) );
0036
0037 return low | ((u64)high) << 32;
0038 }
0039 #else
0040 static inline u64 mul_u32_u32(u32 a, u32 b)
0041 {
0042 return (u64)a * b;
0043 }
0044 #endif
0045
0046 static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
0047 {
0048 u32 ah, al;
0049 u64 ret;
0050
0051 al = a;
0052 ah = a >> 32;
0053
0054 ret = mul_u32_u32(al, b) >> shift;
0055 if (ah)
0056 ret += mul_u32_u32(ah, b) << (32 - shift);
0057
0058 return ret;
0059 }
0060
0061 #endif
0062
0063 #ifndef mul_u64_u64_div64
0064 static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
0065 {
0066 u64 quot, rem;
0067
0068 quot = a / c;
0069 rem = a % c;
0070
0071 return quot * b + (rem * b) / c;
0072 }
0073 #endif
0074
0075 #endif