diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:30:11 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:30:11 +0100 |
commit | 3d0d14f983b55a570b976976284df4c434af3223 (patch) | |
tree | 864f11c0ce5ee1e15acdd196018b79d0d0e2685d /arch/x86/math-emu/poly.h | |
parent | a4ec1effce83796209a0258602b0cf50026d86f2 (diff) | |
download | blackbird-op-linux-3d0d14f983b55a570b976976284df4c434af3223.tar.gz blackbird-op-linux-3d0d14f983b55a570b976976284df4c434af3223.zip |
x86: lindent arch/i386/math-emu
lindent these files:
errors lines of code errors/KLOC
arch/x86/math-emu/ 2236 9424 237.2
arch/x86/math-emu/ 128 8706 14.7
no other changes. No code changed:
text data bss dec hex filename
5589802 612739 3833856 10036397 9924ad vmlinux.before
5589802 612739 3833856 10036397 9924ad vmlinux.after
the intent of this patch is to ease the automated tracking of kernel
code quality - it's just much easier for us to maintain it if every file
in arch/x86 is supposed to be clean.
NOTE: it is a known problem of lindent that it causes some style damage
of its own, but it's a safe tool (well, except for the gcc array range
initializers extension), so we did the bulk of the changes via lindent,
and did the manual fixups in a followup patch.
the resulting math-emu code has been tested by Thomas Gleixner on a real
386 DX CPU as well, and it works fine.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/math-emu/poly.h')
-rw-r--r-- | arch/x86/math-emu/poly.h | 79 |
1 files changed, 36 insertions, 43 deletions
diff --git a/arch/x86/math-emu/poly.h b/arch/x86/math-emu/poly.h index 4db798114923..f317de7d8864 100644 --- a/arch/x86/math-emu/poly.h +++ b/arch/x86/math-emu/poly.h @@ -21,9 +21,9 @@ allows. 9-byte would probably be sufficient. */ typedef struct { - unsigned long lsw; - unsigned long midw; - unsigned long msw; + unsigned long lsw; + unsigned long midw; + unsigned long msw; } Xsig; asmlinkage void mul64(unsigned long long const *a, unsigned long long const *b, @@ -33,12 +33,12 @@ asmlinkage void polynomial_Xsig(Xsig *, const unsigned long long *x, asmlinkage void mul32_Xsig(Xsig *, const unsigned long mult); asmlinkage void mul64_Xsig(Xsig *, const unsigned long long *mult); -asmlinkage void mul_Xsig_Xsig(Xsig *dest, const Xsig *mult); +asmlinkage void mul_Xsig_Xsig(Xsig * dest, const Xsig * mult); asmlinkage void shr_Xsig(Xsig *, const int n); asmlinkage int round_Xsig(Xsig *); asmlinkage int norm_Xsig(Xsig *); -asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest); +asmlinkage void div_Xsig(Xsig * x1, const Xsig * x2, const Xsig * dest); /* Macro to extract the most significant 32 bits from a long long */ #define LL_MSW(x) (((unsigned long *)&x)[1]) @@ -49,7 +49,6 @@ asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest); /* Macro to access the 8 ms bytes of an Xsig as a long long */ #define XSIG_LL(x) (*(unsigned long long *)&x.midw) - /* Need to run gcc with optimizations on to get these to actually be in-line. @@ -63,59 +62,53 @@ asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest); static inline unsigned long mul_32_32(const unsigned long arg1, const unsigned long arg2) { - int retval; - asm volatile ("mull %2; movl %%edx,%%eax" \ - :"=a" (retval) \ - :"0" (arg1), "g" (arg2) \ - :"dx"); - return retval; + int retval; + asm volatile ("mull %2; movl %%edx,%%eax":"=a" (retval) + :"0"(arg1), "g"(arg2) + :"dx"); + return retval; } - /* Add the 12 byte Xsig x2 to Xsig dest, with no checks for overflow. */ -static inline void add_Xsig_Xsig(Xsig *dest, const Xsig *x2) +static inline void add_Xsig_Xsig(Xsig * dest, const Xsig * x2) { - asm volatile ("movl %1,%%edi; movl %2,%%esi;\n" - "movl (%%esi),%%eax; addl %%eax,(%%edi);\n" - "movl 4(%%esi),%%eax; adcl %%eax,4(%%edi);\n" - "movl 8(%%esi),%%eax; adcl %%eax,8(%%edi);\n" - :"=g" (*dest):"g" (dest), "g" (x2) - :"ax","si","di"); + asm volatile ("movl %1,%%edi; movl %2,%%esi;\n" + "movl (%%esi),%%eax; addl %%eax,(%%edi);\n" + "movl 4(%%esi),%%eax; adcl %%eax,4(%%edi);\n" + "movl 8(%%esi),%%eax; adcl %%eax,8(%%edi);\n":"=g" + (*dest):"g"(dest), "g"(x2) + :"ax", "si", "di"); } - /* Add the 12 byte Xsig x2 to Xsig dest, adjust exp if overflow occurs. */ /* Note: the constraints in the asm statement didn't always work properly with gcc 2.5.8. Changing from using edi to using ecx got around the problem, but keep fingers crossed! */ -static inline void add_two_Xsig(Xsig *dest, const Xsig *x2, long int *exp) +static inline void add_two_Xsig(Xsig * dest, const Xsig * x2, long int *exp) { - asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n" - "movl (%%esi),%%eax; addl %%eax,(%%ecx);\n" - "movl 4(%%esi),%%eax; adcl %%eax,4(%%ecx);\n" - "movl 8(%%esi),%%eax; adcl %%eax,8(%%ecx);\n" - "jnc 0f;\n" - "rcrl 8(%%ecx); rcrl 4(%%ecx); rcrl (%%ecx)\n" - "movl %4,%%ecx; incl (%%ecx)\n" - "movl $1,%%eax; jmp 1f;\n" - "0: xorl %%eax,%%eax;\n" - "1:\n" - :"=g" (*exp), "=g" (*dest) - :"g" (dest), "g" (x2), "g" (exp) - :"cx","si","ax"); + asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n" + "movl (%%esi),%%eax; addl %%eax,(%%ecx);\n" + "movl 4(%%esi),%%eax; adcl %%eax,4(%%ecx);\n" + "movl 8(%%esi),%%eax; adcl %%eax,8(%%ecx);\n" + "jnc 0f;\n" + "rcrl 8(%%ecx); rcrl 4(%%ecx); rcrl (%%ecx)\n" + "movl %4,%%ecx; incl (%%ecx)\n" + "movl $1,%%eax; jmp 1f;\n" + "0: xorl %%eax,%%eax;\n" "1:\n":"=g" (*exp), "=g"(*dest) + :"g"(dest), "g"(x2), "g"(exp) + :"cx", "si", "ax"); } - /* Negate (subtract from 1.0) the 12 byte Xsig */ /* This is faster in a loop on my 386 than using the "neg" instruction. */ -static inline void negate_Xsig(Xsig *x) +static inline void negate_Xsig(Xsig * x) { - asm volatile("movl %1,%%esi;\n" - "xorl %%ecx,%%ecx;\n" - "movl %%ecx,%%eax; subl (%%esi),%%eax; movl %%eax,(%%esi);\n" - "movl %%ecx,%%eax; sbbl 4(%%esi),%%eax; movl %%eax,4(%%esi);\n" - "movl %%ecx,%%eax; sbbl 8(%%esi),%%eax; movl %%eax,8(%%esi);\n" - :"=g" (*x):"g" (x):"si","ax","cx"); + asm volatile ("movl %1,%%esi;\n" + "xorl %%ecx,%%ecx;\n" + "movl %%ecx,%%eax; subl (%%esi),%%eax; movl %%eax,(%%esi);\n" + "movl %%ecx,%%eax; sbbl 4(%%esi),%%eax; movl %%eax,4(%%esi);\n" + "movl %%ecx,%%eax; sbbl 8(%%esi),%%eax; movl %%eax,8(%%esi);\n":"=g" + (*x):"g"(x):"si", "ax", "cx"); } #endif /* _POLY_H */ |