0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #define _FP_W_TYPE_SIZE 32
0032 #define _FP_W_TYPE unsigned int
0033 #define _FP_WS_TYPE signed int
0034 #define _FP_I_TYPE int
0035
0036 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
0037 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
0038 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 #define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
0083 #define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
0084
0085 #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
0086 #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
0087
0088
0089
0090
0091 #define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
0092 #define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
0093 #define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
0094 #define _FP_NANSIGN_S 0
0095 #define _FP_NANSIGN_D 0
0096 #define _FP_NANSIGN_Q 0
0097
0098 #define _FP_KEEPNANFRACP 1
0099
0100 #ifdef FP_EX_BOOKE_E500_SPE
0101 #define FP_EX_INEXACT (1 << 21)
0102 #define FP_EX_INVALID (1 << 20)
0103 #define FP_EX_DIVZERO (1 << 19)
0104 #define FP_EX_UNDERFLOW (1 << 18)
0105 #define FP_EX_OVERFLOW (1 << 17)
0106 #define FP_INHIBIT_RESULTS 0
0107
0108 #define __FPU_FPSCR (current->thread.spefscr)
0109 #define __FPU_ENABLED_EXC \
0110 ({ \
0111 (__FPU_FPSCR >> 2) & 0x1f; \
0112 })
0113 #else
0114
0115
0116
0117 #define FP_EX_INVALID (1 << (31 - 2))
0118 #define FP_EX_INVALID_SNAN EFLAG_VXSNAN
0119 #define FP_EX_INVALID_ISI EFLAG_VXISI
0120 #define FP_EX_INVALID_IDI EFLAG_VXIDI
0121 #define FP_EX_INVALID_ZDZ EFLAG_VXZDZ
0122 #define FP_EX_INVALID_IMZ EFLAG_VXIMZ
0123 #define FP_EX_OVERFLOW (1 << (31 - 3))
0124 #define FP_EX_UNDERFLOW (1 << (31 - 4))
0125 #define FP_EX_DIVZERO (1 << (31 - 5))
0126 #define FP_EX_INEXACT (1 << (31 - 6))
0127
0128 #define __FPU_FPSCR (current->thread.fp_state.fpscr)
0129
0130
0131
0132
0133 #define __FPU_ENABLED_EXC \
0134 ({ \
0135 (__FPU_FPSCR >> 3) & 0x1f; \
0136 })
0137
0138 #endif
0139
0140
0141
0142
0143
0144 #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
0145 do { \
0146 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
0147 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
0148 { \
0149 R##_s = X##_s; \
0150 _FP_FRAC_COPY_##wc(R,X); \
0151 } \
0152 else \
0153 { \
0154 R##_s = Y##_s; \
0155 _FP_FRAC_COPY_##wc(R,Y); \
0156 } \
0157 R##_c = FP_CLS_NAN; \
0158 } while (0)
0159
0160
0161 #include <linux/kernel.h>
0162 #include <linux/sched.h>
0163
0164 #define __FPU_TRAP_P(bits) \
0165 ((__FPU_ENABLED_EXC & (bits)) != 0)
0166
0167 #define __FP_PACK_S(val,X) \
0168 ({ int __exc = _FP_PACK_CANONICAL(S,1,X); \
0169 if(!__exc || !__FPU_TRAP_P(__exc)) \
0170 _FP_PACK_RAW_1_P(S,val,X); \
0171 __exc; \
0172 })
0173
0174 #define __FP_PACK_D(val,X) \
0175 do { \
0176 _FP_PACK_CANONICAL(D, 2, X); \
0177 if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \
0178 _FP_PACK_RAW_2_P(D, val, X); \
0179 } while (0)
0180
0181 #define __FP_PACK_DS(val,X) \
0182 do { \
0183 FP_DECL_S(__X); \
0184 FP_CONV(S, D, 1, 2, __X, X); \
0185 _FP_PACK_CANONICAL(S, 1, __X); \
0186 if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) { \
0187 _FP_UNPACK_CANONICAL(S, 1, __X); \
0188 FP_CONV(D, S, 2, 1, X, __X); \
0189 _FP_PACK_CANONICAL(D, 2, X); \
0190 if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) \
0191 _FP_PACK_RAW_2_P(D, val, X); \
0192 } \
0193 } while (0)
0194
0195
0196 #define FP_ROUNDMODE \
0197 ({ \
0198 __FPU_FPSCR & 0x3; \
0199 })
0200
0201
0202
0203
0204
0205 #include <linux/types.h>
0206 #include <asm/byteorder.h>
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
0217 do { \
0218 if (__builtin_constant_p (bh) && (bh) == 0) \
0219 __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \
0220 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
0221 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
0222 __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \
0223 : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
0224 else \
0225 __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \
0226 : "=r" (sh), "=&r" (sl) \
0227 : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
0228 } while (0)
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
0240 do { \
0241 if (__builtin_constant_p (ah) && (ah) == 0) \
0242 __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \
0243 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
0244 else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
0245 __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \
0246 : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
0247 else if (__builtin_constant_p (bh) && (bh) == 0) \
0248 __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \
0249 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
0250 else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
0251 __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \
0252 : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
0253 else \
0254 __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \
0255 : "=r" (sh), "=&r" (sl) \
0256 : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
0257 } while (0)
0258
0259
0260
0261
0262
0263
0264
0265 #define umul_ppmm(ph, pl, m0, m1) \
0266 do { \
0267 USItype __m0 = (m0), __m1 = (m1); \
0268 __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
0269 (pl) = __m0 * __m1; \
0270 } while (0)
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280 #define udiv_qrnnd(q, r, n1, n0, d) \
0281 do { \
0282 UWtype __d1, __d0, __q1, __q0; \
0283 UWtype __r1, __r0, __m; \
0284 __d1 = __ll_highpart (d); \
0285 __d0 = __ll_lowpart (d); \
0286 \
0287 __r1 = (n1) % __d1; \
0288 __q1 = (n1) / __d1; \
0289 __m = (UWtype) __q1 * __d0; \
0290 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
0291 if (__r1 < __m) \
0292 { \
0293 __q1--, __r1 += (d); \
0294 if (__r1 >= (d)) \
0295 if (__r1 < __m) \
0296 __q1--, __r1 += (d); \
0297 } \
0298 __r1 -= __m; \
0299 \
0300 __r0 = __r1 % __d1; \
0301 __q0 = __r1 / __d1; \
0302 __m = (UWtype) __q0 * __d0; \
0303 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
0304 if (__r0 < __m) \
0305 { \
0306 __q0--, __r0 += (d); \
0307 if (__r0 >= (d)) \
0308 if (__r0 < __m) \
0309 __q0--, __r0 += (d); \
0310 } \
0311 __r0 -= __m; \
0312 \
0313 (q) = (UWtype) __q1 * __ll_B | __q0; \
0314 (r) = __r0; \
0315 } while (0)
0316
0317 #define UDIV_NEEDS_NORMALIZATION 1
0318
0319 #define abort() \
0320 return 0
0321
0322 #ifdef __BIG_ENDIAN
0323 #define __BYTE_ORDER __BIG_ENDIAN
0324 #else
0325 #define __BYTE_ORDER __LITTLE_ENDIAN
0326 #endif
0327
0328
0329 #define EFLAG_INVALID (1 << (31 - 2))
0330 #define EFLAG_OVERFLOW (1 << (31 - 3))
0331 #define EFLAG_UNDERFLOW (1 << (31 - 4))
0332 #define EFLAG_DIVZERO (1 << (31 - 5))
0333 #define EFLAG_INEXACT (1 << (31 - 6))
0334
0335 #define EFLAG_VXSNAN (1 << (31 - 7))
0336 #define EFLAG_VXISI (1 << (31 - 8))
0337 #define EFLAG_VXIDI (1 << (31 - 9))
0338 #define EFLAG_VXZDZ (1 << (31 - 10))
0339 #define EFLAG_VXIMZ (1 << (31 - 11))
0340 #define EFLAG_VXVC (1 << (31 - 12))
0341 #define EFLAG_VXSOFT (1 << (31 - 21))
0342 #define EFLAG_VXSQRT (1 << (31 - 22))
0343 #define EFLAG_VXCVI (1 << (31 - 23))