diff options
Diffstat (limited to 'llvm/test/CodeGen/ARM/vfp.ll')
| -rw-r--r-- | llvm/test/CodeGen/ARM/vfp.ll | 208 |
1 files changed, 101 insertions, 107 deletions
diff --git a/llvm/test/CodeGen/ARM/vfp.ll b/llvm/test/CodeGen/ARM/vfp.ll index 11f668efa56..2acb33f9aeb 100644 --- a/llvm/test/CodeGen/ARM/vfp.ll +++ b/llvm/test/CodeGen/ARM/vfp.ll @@ -1,150 +1,144 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fabs | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fmscs | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fcvt | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fuito | count 2 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fto.i | count 4 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep bmi | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep bgt | count 1 -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+vfp2 | \ +; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2 | \ ; RUN: grep fcmpezs | count 1 -void %test(float *%P, double* %D) { - %A = load float* %P - %B = load double* %D - store float %A, float* %P - store double %B, double* %D - ret void +define void @test(float* %P, double* %D) { + %A = load float* %P ; <float> [#uses=1] + %B = load double* %D ; <double> [#uses=1] + store float %A, float* %P + store double %B, double* %D + ret void } -declare float %fabsf(float) -declare double %fabs(double) +declare float @fabsf(float) -void %test_abs(float *%P, double* %D) { - %a = load float* %P - %b = call float %fabsf(float %a) - store float %b, float* %P +declare double @fabs(double) - %A = load double* %D - %B = call double %fabs(double %A) - store double %B, double* %D - ret void +define void @test_abs(float* %P, double* %D) { + %a = load float* %P ; <float> [#uses=1] + %b = call float @fabsf( float %a ) ; <float> [#uses=1] + store float %b, float* %P + %A = load double* %D ; <double> [#uses=1] + %B = call double @fabs( double %A ) ; <double> [#uses=1] + store double %B, double* %D + ret void } -void %test_add(float *%P, double* %D) { - %a = load float* %P - %b = add float %a, %a - store float %b, float* %P - - %A = load double* %D - %B = add double %A, %A - store double %B, double* %D - ret void +define void @test_add(float* %P, double* %D) { + %a = load float* %P ; <float> [#uses=2] + %b = add float %a, %a ; <float> [#uses=1] + store float %b, float* %P + %A = load double* %D ; <double> [#uses=2] + %B = add double %A, %A ; <double> [#uses=1] + store double %B, double* %D + ret void } -void %test_ext_round(float *%P, double* %D) { - %a = load float* %P - %b = cast float %a to double - - %A = load double* %D - %B = cast double %A to float - - store double %b, double* %D - store float %B, float* %P - ret void +define void @test_ext_round(float* %P, double* %D) { + %a = load float* %P ; <float> [#uses=1] + %b = fpext float %a to double ; <double> [#uses=1] + %A = load double* %D ; <double> [#uses=1] + %B = fptrunc double %A to float ; <float> [#uses=1] + store double %b, double* %D + store float %B, float* %P + ret void } -void %test_fma(float *%P1, float* %P2, float *%P3) { - %a1 = load float* %P1 - %a2 = load float* %P2 - %a3 = load float* %P3 - - %X = mul float %a1, %a2 - %Y = sub float %X, %a3 - - store float %Y, float* %P1 - ret void +define void @test_fma(float* %P1, float* %P2, float* %P3) { + %a1 = load float* %P1 ; <float> [#uses=1] + %a2 = load float* %P2 ; <float> [#uses=1] + %a3 = load float* %P3 ; <float> [#uses=1] + %X = mul float %a1, %a2 ; <float> [#uses=1] + %Y = sub float %X, %a3 ; <float> [#uses=1] + store float %Y, float* %P1 + ret void } -int %test_ftoi(float *%P1) { - %a1 = load float* %P1 - %b1 = cast float %a1 to int - ret int %b1 +define i32 @test_ftoi(float* %P1) { + %a1 = load float* %P1 ; <float> [#uses=1] + %b1 = fptosi float %a1 to i32 ; <i32> [#uses=1] + ret i32 %b1 } -uint %test_ftou(float *%P1) { - %a1 = load float* %P1 - %b1 = cast float %a1 to uint - ret uint %b1 +define i32 @test_ftou(float* %P1) { + %a1 = load float* %P1 ; <float> [#uses=1] + %b1 = fptoui float %a1 to i32 ; <i32> [#uses=1] + ret i32 %b1 } -int %test_dtoi(double *%P1) { - %a1 = load double* %P1 - %b1 = cast double %a1 to int - ret int %b1 +define i32 @test_dtoi(double* %P1) { + %a1 = load double* %P1 ; <double> [#uses=1] + %b1 = fptosi double %a1 to i32 ; <i32> [#uses=1] + ret i32 %b1 } -uint %test_dtou(double *%P1) { - %a1 = load double* %P1 - %b1 = cast double %a1 to uint - ret uint %b1 +define i32 @test_dtou(double* %P1) { + %a1 = load double* %P1 ; <double> [#uses=1] + %b1 = fptoui double %a1 to i32 ; <i32> [#uses=1] + ret i32 %b1 } -void %test_utod(double *%P1, uint %X) { - %b1 = cast uint %X to double - store double %b1, double* %P1 - ret void +define void @test_utod(double* %P1, i32 %X) { + %b1 = uitofp i32 %X to double ; <double> [#uses=1] + store double %b1, double* %P1 + ret void } -void %test_utod2(double *%P1, ubyte %X) { - %b1 = cast ubyte %X to double - store double %b1, double* %P1 - ret void +define void @test_utod2(double* %P1, i8 %X) { + %b1 = uitofp i8 %X to double ; <double> [#uses=1] + store double %b1, double* %P1 + ret void } -void %test_cmp(float* %glob, int %X) { +define void @test_cmp(float* %glob, i32 %X) { entry: - %tmp = load float* %glob ; <float> [#uses=2] - %tmp3 = getelementptr float* %glob, int 2 ; <float*> [#uses=1] - %tmp4 = load float* %tmp3 ; <float> [#uses=2] - %tmp = seteq float %tmp, %tmp4 ; <bool> [#uses=1] - %tmp5 = tail call bool %llvm.isunordered.f32( float %tmp, float %tmp4 ) ; <bool> [#uses=1] - %tmp6 = or bool %tmp, %tmp5 ; <bool> [#uses=1] - br bool %tmp6, label %cond_true, label %cond_false - -cond_true: ; preds = %entry - %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0] - ret void - -cond_false: ; preds = %entry - %tmp7 = tail call int (...)* %baz( ) ; <int> [#uses=0] - ret void + %tmp = load float* %glob ; <float> [#uses=2] + %tmp3 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1] + %tmp4 = load float* %tmp3 ; <float> [#uses=2] + %tmp.upgrd.1 = fcmp oeq float %tmp, %tmp4 ; <i1> [#uses=1] + %tmp5 = fcmp uno float %tmp, %tmp4 ; <i1> [#uses=1] + %tmp6 = or i1 %tmp.upgrd.1, %tmp5 ; <i1> [#uses=1] + br i1 %tmp6, label %cond_true, label %cond_false + +cond_true: ; preds = %entry + %tmp.upgrd.2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0] + ret void + +cond_false: ; preds = %entry + %tmp7 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0] + ret void } -declare bool %llvm.isunordered.f32(float, float) +declare i1 @llvm.isunordered.f32(float, float) -declare int %bar(...) +declare i32 @bar(...) -declare int %baz(...) +declare i32 @baz(...) -void %test_cmpfp0(float* %glob, int %X) { +define void @test_cmpfp0(float* %glob, i32 %X) { entry: - %tmp = load float* %glob ; <float> [#uses=1] - %tmp = setgt float %tmp, 0.000000e+00 ; <bool> [#uses=1] - br bool %tmp, label %cond_true, label %cond_false + %tmp = load float* %glob ; <float> [#uses=1] + %tmp.upgrd.3 = fcmp ogt float %tmp, 0.000000e+00 ; <i1> [#uses=1] + br i1 %tmp.upgrd.3, label %cond_true, label %cond_false -cond_true: ; preds = %entry - %tmp = tail call int (...)* %bar( ) ; <int> [#uses=0] - ret void +cond_true: ; preds = %entry + %tmp.upgrd.4 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0] + ret void -cond_false: ; preds = %entry - %tmp1 = tail call int (...)* %baz( ) ; <int> [#uses=0] - ret void +cond_false: ; preds = %entry + %tmp1 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0] + ret void } - |

