Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_DIV64_H
0003 #define _ASM_X86_DIV64_H
0004 
0005 #ifdef CONFIG_X86_32
0006 
0007 #include <linux/types.h>
0008 #include <linux/log2.h>
0009 
0010 /*
0011  * do_div() is NOT a C function. It wants to return
0012  * two values (the quotient and the remainder), but
0013  * since that doesn't work very well in C, what it
0014  * does is:
0015  *
0016  * - modifies the 64-bit dividend _in_place_
0017  * - returns the 32-bit remainder
0018  *
0019  * This ends up being the most efficient "calling
0020  * convention" on x86.
0021  */
0022 #define do_div(n, base)                     \
0023 ({                              \
0024     unsigned long __upper, __low, __high, __mod, __base;    \
0025     __base = (base);                    \
0026     if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
0027         __mod = n & (__base - 1);           \
0028         n >>= ilog2(__base);                \
0029     } else {                        \
0030         asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
0031         __upper = __high;               \
0032         if (__high) {                   \
0033             __upper = __high % (__base);        \
0034             __high = __high / (__base);     \
0035         }                       \
0036         asm("divl %2" : "=a" (__low), "=d" (__mod)  \
0037             : "rm" (__base), "0" (__low), "1" (__upper));   \
0038         asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
0039     }                           \
0040     __mod;                          \
0041 })
0042 
0043 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
0044 {
0045     union {
0046         u64 v64;
0047         u32 v32[2];
0048     } d = { dividend };
0049     u32 upper;
0050 
0051     upper = d.v32[1];
0052     d.v32[1] = 0;
0053     if (upper >= divisor) {
0054         d.v32[1] = upper / divisor;
0055         upper %= divisor;
0056     }
0057     asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
0058         "rm" (divisor), "0" (d.v32[0]), "1" (upper));
0059     return d.v64;
0060 }
0061 #define div_u64_rem div_u64_rem
0062 
0063 static inline u64 mul_u32_u32(u32 a, u32 b)
0064 {
0065     u32 high, low;
0066 
0067     asm ("mull %[b]" : "=a" (low), "=d" (high)
0068              : [a] "a" (a), [b] "rm" (b) );
0069 
0070     return low | ((u64)high) << 32;
0071 }
0072 #define mul_u32_u32 mul_u32_u32
0073 
0074 #else
0075 # include <asm-generic/div64.h>
0076 
0077 /*
0078  * Will generate an #DE when the result doesn't fit u64, could fix with an
0079  * __ex_table[] entry when it becomes an issue.
0080  */
0081 static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
0082 {
0083     u64 q;
0084 
0085     asm ("mulq %2; divq %3" : "=a" (q)
0086                 : "a" (a), "rm" (mul), "rm" (div)
0087                 : "rdx");
0088 
0089     return q;
0090 }
0091 #define mul_u64_u64_div_u64 mul_u64_u64_div_u64
0092 
0093 static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
0094 {
0095     return mul_u64_u64_div_u64(a, mul, div);
0096 }
0097 #define mul_u64_u32_div mul_u64_u32_div
0098 
0099 #endif /* CONFIG_X86_32 */
0100 
0101 #endif /* _ASM_X86_DIV64_H */