Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __ASM_ARM_DIV64
0003 #define __ASM_ARM_DIV64
0004 
0005 #include <linux/types.h>
0006 #include <asm/compiler.h>
0007 
0008 /*
0009  * The semantics of __div64_32() are:
0010  *
0011  * uint32_t __div64_32(uint64_t *n, uint32_t base)
0012  * {
0013  *  uint32_t remainder = *n % base;
0014  *  *n = *n / base;
0015  *  return remainder;
0016  * }
0017  *
0018  * In other words, a 64-bit dividend with a 32-bit divisor producing
0019  * a 64-bit result and a 32-bit remainder.  To accomplish this optimally
0020  * we override the generic version in lib/div64.c to call our __do_div64
0021  * assembly implementation with completely non standard calling convention
0022  * for arguments and results (beware).
0023  */
0024 static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
0025 {
0026     register unsigned int __base      asm("r4") = base;
0027     register unsigned long long __n   asm("r0") = *n;
0028     register unsigned long long __res asm("r2");
0029     unsigned int __rem;
0030     asm(    __asmeq("%0", "r0")
0031         __asmeq("%1", "r2")
0032         __asmeq("%2", "r4")
0033         "bl __do_div64"
0034         : "+r" (__n), "=r" (__res)
0035         : "r" (__base)
0036         : "ip", "lr", "cc");
0037     __rem = __n >> 32;
0038     *n = __res;
0039     return __rem;
0040 }
0041 #define __div64_32 __div64_32
0042 
0043 #if !defined(CONFIG_AEABI)
0044 
0045 /*
0046  * In OABI configurations, some uses of the do_div function
0047  * cause gcc to run out of registers. To work around that,
0048  * we can force the use of the out-of-line version for
0049  * configurations that build a OABI kernel.
0050  */
0051 #define do_div(n, base) __div64_32(&(n), base)
0052 
0053 #else
0054 
0055 static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
0056 {
0057     unsigned long long res;
0058     register unsigned int tmp asm("ip") = 0;
0059 
0060     if (!bias) {
0061         asm (   "umull  %Q0, %R0, %Q1, %Q2\n\t"
0062             "mov    %Q0, #0"
0063             : "=&r" (res)
0064             : "r" (m), "r" (n)
0065             : "cc");
0066     } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
0067         res = m;
0068         asm (   "umlal  %Q0, %R0, %Q1, %Q2\n\t"
0069             "mov    %Q0, #0"
0070             : "+&r" (res)
0071             : "r" (m), "r" (n)
0072             : "cc");
0073     } else {
0074         asm (   "umull  %Q0, %R0, %Q2, %Q3\n\t"
0075             "cmn    %Q0, %Q2\n\t"
0076             "adcs   %R0, %R0, %R2\n\t"
0077             "adc    %Q0, %1, #0"
0078             : "=&r" (res), "+&r" (tmp)
0079             : "r" (m), "r" (n)
0080             : "cc");
0081     }
0082 
0083     if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
0084         asm (   "umlal  %R0, %Q0, %R1, %Q2\n\t"
0085             "umlal  %R0, %Q0, %Q1, %R2\n\t"
0086             "mov    %R0, #0\n\t"
0087             "umlal  %Q0, %R0, %R1, %R2"
0088             : "+&r" (res)
0089             : "r" (m), "r" (n)
0090             : "cc");
0091     } else {
0092         asm (   "umlal  %R0, %Q0, %R2, %Q3\n\t"
0093             "umlal  %R0, %1, %Q2, %R3\n\t"
0094             "mov    %R0, #0\n\t"
0095             "adds   %Q0, %1, %Q0\n\t"
0096             "adc    %R0, %R0, #0\n\t"
0097             "umlal  %Q0, %R0, %R2, %R3"
0098             : "+&r" (res), "+&r" (tmp)
0099             : "r" (m), "r" (n)
0100             : "cc");
0101     }
0102 
0103     return res;
0104 }
0105 #define __arch_xprod_64 __arch_xprod_64
0106 
0107 #include <asm-generic/div64.h>
0108 
0109 #endif
0110 
0111 #endif