diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-11-18 14:22:39 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-12-22 18:42:53 +0900 |
commit | e9bf51e5ccc7703226c79888603e157066213700 (patch) | |
tree | 3fbd5107cbbbc47ec01ebb3c81470e836f082e42 | |
parent | 00e825c6b99b39f12751ea45d38bb4d900de70f4 (diff) | |
download | blackbird-op-linux-e9bf51e5ccc7703226c79888603e157066213700.tar.gz blackbird-op-linux-e9bf51e5ccc7703226c79888603e157066213700.zip |
sh: __udivdi3 -> do_div() in softfloat lib.
Inhibit the generation of __udivdi3 for the softfloat lib, use do_div()
outright.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/kernel/cpu/sh4/softfloat.c | 73 |
1 files changed, 41 insertions, 32 deletions
diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c index 2b747f3b02bd..42edf2e54e85 100644 --- a/arch/sh/kernel/cpu/sh4/softfloat.c +++ b/arch/sh/kernel/cpu/sh4/softfloat.c @@ -37,6 +37,7 @@ */ #include <linux/kernel.h> #include <cpu/fpu.h> +#include <asm/div64.h> #define LIT64( a ) a##LL @@ -67,16 +68,16 @@ typedef unsigned long long float64; extern void float_raise(unsigned int flags); /* in fpu.c */ extern int float_rounding_mode(void); /* in fpu.c */ -inline bits64 extractFloat64Frac(float64 a); -inline flag extractFloat64Sign(float64 a); -inline int16 extractFloat64Exp(float64 a); -inline int16 extractFloat32Exp(float32 a); -inline flag extractFloat32Sign(float32 a); -inline bits32 extractFloat32Frac(float32 a); -inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig); -inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr); -inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig); -inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr); +bits64 extractFloat64Frac(float64 a); +flag extractFloat64Sign(float64 a); +int16 extractFloat64Exp(float64 a); +int16 extractFloat32Exp(float32 a); +flag extractFloat32Sign(float32 a); +bits32 extractFloat32Frac(float32 a); +float64 packFloat64(flag zSign, int16 zExp, bits64 zSig); +void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr); +float32 packFloat32(flag zSign, int16 zExp, bits32 zSig); +void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr); float64 float64_sub(float64 a, float64 b); float32 float32_sub(float32 a, float32 b); float32 float32_add(float32 a, float32 b); @@ -86,11 +87,11 @@ float32 float32_div(float32 a, float32 b); float32 float32_mul(float32 a, float32 b); float64 float64_mul(float64 a, float64 b); float32 float64_to_float32(float64 a); -inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, +void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr); -inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, +void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr); -inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr); +void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr); static int8 countLeadingZeros32(bits32 a); static int8 countLeadingZeros64(bits64 a); @@ -110,42 +111,42 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b); static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr, bits32 * zSigPtr); -inline bits64 extractFloat64Frac(float64 a) +bits64 extractFloat64Frac(float64 a) { return a & LIT64(0x000FFFFFFFFFFFFF); } -inline flag extractFloat64Sign(float64 a) +flag extractFloat64Sign(float64 a) { return a >> 63; } -inline int16 extractFloat64Exp(float64 a) +int16 extractFloat64Exp(float64 a) { return (a >> 52) & 0x7FF; } -inline int16 extractFloat32Exp(float32 a) +int16 extractFloat32Exp(float32 a) { return (a >> 23) & 0xFF; } -inline flag extractFloat32Sign(float32 a) +flag extractFloat32Sign(float32 a) { return a >> 31; } -inline bits32 extractFloat32Frac(float32 a) +bits32 extractFloat32Frac(float32 a) { return a & 0x007FFFFF; } -inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig) +float64 packFloat64(flag zSign, int16 zExp, bits64 zSig) { return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig; } -inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr) +void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr) { bits64 z; @@ -338,12 +339,12 @@ static float64 addFloat64Sigs(float64 a, float64 b, flag zSign) } -inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig) +float32 packFloat32(flag zSign, int16 zExp, bits32 zSig) { return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig; } -inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr) +void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr) { bits32 z; if (count == 0) { @@ -634,7 +635,7 @@ normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr) *zExpPtr = 1 - shiftCount; } -inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, +void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr) { bits64 z1; @@ -644,7 +645,7 @@ inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, *z0Ptr = a0 + b0 + (z1 < a1); } -inline void +void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 * z1Ptr) { @@ -656,11 +657,14 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b) { bits64 b0, b1; bits64 rem0, rem1, term0, term1; - bits64 z; + bits64 z, tmp; if (b <= a0) return LIT64(0xFFFFFFFFFFFFFFFF); b0 = b >> 32; - z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32; + tmp = a0; + do_div(tmp, b0); + + z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : tmp << 32; mul64To128(b, z, &term0, &term1); sub128(a0, a1, term0, term1, &rem0, &rem1); while (((sbits64) rem0) < 0) { @@ -669,11 +673,13 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b) add128(rem0, rem1, b0, b1, &rem0, &rem1); } rem0 = (rem0 << 32) | (rem1 >> 32); - z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0; + tmp = rem0; + do_div(tmp, b0); + z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : tmp; return z; } -inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr) +void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr) { bits32 aHigh, aLow, bHigh, bLow; bits64 z0, zMiddleA, zMiddleB, z1; @@ -769,7 +775,8 @@ float32 float32_div(float32 a, float32 b) { flag aSign, bSign, zSign; int16 aExp, bExp, zExp; - bits32 aSig, bSig, zSig; + bits32 aSig, bSig; + uint64_t zSig; aSig = extractFloat32Frac(a); aExp = extractFloat32Exp(a); @@ -804,11 +811,13 @@ float32 float32_div(float32 a, float32 b) aSig >>= 1; ++zExp; } - zSig = (((bits64) aSig) << 32) / bSig; + zSig = (((bits64) aSig) << 32); + do_div(zSig, bSig); + if ((zSig & 0x3F) == 0) { zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32); } - return roundAndPackFloat32(zSign, zExp, zSig); + return roundAndPackFloat32(zSign, zExp, (bits32)zSig); } |