diff options
| author | Alexey Bataev <a.bataev@hotmail.com> | 2016-12-15 12:26:18 +0000 |
|---|---|---|
| committer | Alexey Bataev <a.bataev@hotmail.com> | 2016-12-15 12:26:18 +0000 |
| commit | 2db6045b2960ef15828d731378a01863eecd03fe (patch) | |
| tree | 8deec9c7fa959a97cc966d1ff83517af487016f9 /llvm | |
| parent | 795b0671c5fd3c064f9502d388e5f40a196b9d56 (diff) | |
| download | bcm5719-llvm-2db6045b2960ef15828d731378a01863eecd03fe.tar.gz bcm5719-llvm-2db6045b2960ef15828d731378a01863eecd03fe.zip | |
Revert "[TESTS] Initial commit of tests, by Andrew Tischenko"
This reverts commit ee709f8988653a0334fbf100cdbbdd83a3933347.
llvm-svn: 289814
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/test/CodeGen/X86/recip-fastmath2.ll | 273 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-sqrt.ll | 77 |
2 files changed, 0 insertions, 350 deletions
diff --git a/llvm/test/CodeGen/X86/recip-fastmath2.ll b/llvm/test/CodeGen/X86/recip-fastmath2.ll deleted file mode 100644 index 831bb4da1f3..00000000000 --- a/llvm/test/CodeGen/X86/recip-fastmath2.ll +++ /dev/null @@ -1,273 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX - - -define float @f32_no_step_2(float %x) #3 { -; SSE-LABEL: f32_no_step_2: -; SSE: # BB#0: -; SSE-NEXT: rcpss %xmm0, %xmm0 -; SSE-NEXT: mulss {{.*}}(%rip), %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: f32_no_step_2: -; AVX: # BB#0: -; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: retq - %div = fdiv fast float 1234.0, %x - ret float %div -} - -define float @f32_one_step_2(float %x) #1 { -; SSE-LABEL: f32_one_step_2: -; SSE: # BB#0: -; SSE-NEXT: rcpss %xmm0, %xmm2 -; SSE-NEXT: mulss %xmm2, %xmm0 -; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE-NEXT: subss %xmm0, %xmm1 -; SSE-NEXT: mulss %xmm2, %xmm1 -; SSE-NEXT: addss %xmm2, %xmm1 -; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 -; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: f32_one_step_2: -; AVX: # BB#0: -; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX-NEXT: vsubss %xmm0, %xmm2, %xmm0 -; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: retq - %div = fdiv fast float 3456.0, %x - ret float %div -} - -define float @f32_two_step_2(float %x) #2 { -; SSE-LABEL: f32_two_step_2: -; SSE: # BB#0: -; SSE-NEXT: rcpss %xmm0, %xmm2 -; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: mulss %xmm2, %xmm3 -; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: subss %xmm3, %xmm4 -; SSE-NEXT: mulss %xmm2, %xmm4 -; SSE-NEXT: addss %xmm2, %xmm4 -; SSE-NEXT: mulss %xmm4, %xmm0 -; SSE-NEXT: subss %xmm0, %xmm1 -; SSE-NEXT: mulss %xmm4, %xmm1 -; SSE-NEXT: addss %xmm4, %xmm1 -; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 -; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: f32_two_step_2: -; AVX: # BB#0: -; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX-NEXT: vsubss %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm2 -; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vsubss %xmm0, %xmm3, %xmm0 -; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: retq - %div = fdiv fast float 6789.0, %x - ret float %div -} - -define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 { -; SSE-LABEL: v4f32_one_step2: -; SSE: # BB#0: -; SSE-NEXT: rcpps %xmm0, %xmm2 -; SSE-NEXT: mulps %xmm2, %xmm0 -; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; SSE-NEXT: subps %xmm0, %xmm1 -; SSE-NEXT: mulps %xmm2, %xmm1 -; SSE-NEXT: addps %xmm2, %xmm1 -; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 -; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: v4f32_one_step2: -; AVX: # BB#0: -; AVX-NEXT: vrcpps %xmm0, %xmm1 -; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; AVX-NEXT: vsubps %xmm0, %xmm2, %xmm0 -; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: retq - %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x - ret <4 x float> %div -} - -define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 { -; SSE-LABEL: v4f32_two_step2: -; SSE: # BB#0: -; SSE-NEXT: rcpps %xmm0, %xmm2 -; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: mulps %xmm2, %xmm3 -; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: subps %xmm3, %xmm4 -; SSE-NEXT: mulps %xmm2, %xmm4 -; SSE-NEXT: addps %xmm2, %xmm4 -; SSE-NEXT: mulps %xmm4, %xmm0 -; SSE-NEXT: subps %xmm0, %xmm1 -; SSE-NEXT: mulps %xmm4, %xmm1 -; SSE-NEXT: addps %xmm4, %xmm1 -; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 -; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: retq -; -; AVX-LABEL: v4f32_two_step2: -; AVX: # BB#0: -; AVX-NEXT: vrcpps %xmm0, %xmm1 -; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vmovaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; AVX-NEXT: vsubps %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vmulps %xmm2, %xmm1, %xmm2 -; AVX-NEXT: vaddps %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vsubps %xmm0, %xmm3, %xmm0 -; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: retq - %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x - ret <4 x float> %div -} - -define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 { -; SSE-LABEL: v8f32_one_step2: -; SSE: # BB#0: -; SSE-NEXT: rcpps %xmm1, %xmm4 -; SSE-NEXT: mulps %xmm4, %xmm1 -; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: subps %xmm1, %xmm3 -; SSE-NEXT: mulps %xmm4, %xmm3 -; SSE-NEXT: addps %xmm4, %xmm3 -; SSE-NEXT: rcpps %xmm0, %xmm1 -; SSE-NEXT: mulps %xmm1, %xmm0 -; SSE-NEXT: subps %xmm0, %xmm2 -; SSE-NEXT: mulps %xmm1, %xmm2 -; SSE-NEXT: addps %xmm1, %xmm2 -; SSE-NEXT: mulps {{.*}}(%rip), %xmm2 -; SSE-NEXT: mulps {{.*}}(%rip), %xmm3 -; SSE-NEXT: movaps %xmm2, %xmm0 -; SSE-NEXT: movaps %xmm3, %xmm1 -; SSE-NEXT: retq -; -; AVX-LABEL: v8f32_one_step2: -; AVX: # BB#0: -; AVX-NEXT: vrcpps %ymm0, %ymm1 -; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; AVX-NEXT: vsubps %ymm0, %ymm2, %ymm0 -; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0 -; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0 -; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 -; AVX-NEXT: retq - %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x - ret <8 x float> %div -} - -define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 { -; SSE-LABEL: v8f32_two_step2: -; SSE: # BB#0: -; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: rcpps %xmm1, %xmm3 -; SSE-NEXT: movaps %xmm1, %xmm4 -; SSE-NEXT: mulps %xmm3, %xmm4 -; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; SSE-NEXT: movaps %xmm0, %xmm5 -; SSE-NEXT: subps %xmm4, %xmm5 -; SSE-NEXT: mulps %xmm3, %xmm5 -; SSE-NEXT: addps %xmm3, %xmm5 -; SSE-NEXT: mulps %xmm5, %xmm1 -; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: subps %xmm1, %xmm3 -; SSE-NEXT: mulps %xmm5, %xmm3 -; SSE-NEXT: addps %xmm5, %xmm3 -; SSE-NEXT: rcpps %xmm2, %xmm1 -; SSE-NEXT: movaps %xmm2, %xmm4 -; SSE-NEXT: mulps %xmm1, %xmm4 -; SSE-NEXT: movaps %xmm0, %xmm5 -; SSE-NEXT: subps %xmm4, %xmm5 -; SSE-NEXT: mulps %xmm1, %xmm5 -; SSE-NEXT: addps %xmm1, %xmm5 -; SSE-NEXT: mulps %xmm5, %xmm2 -; SSE-NEXT: subps %xmm2, %xmm0 -; SSE-NEXT: mulps %xmm5, %xmm0 -; SSE-NEXT: addps %xmm5, %xmm0 -; SSE-NEXT: mulps {{.*}}(%rip), %xmm0 -; SSE-NEXT: mulps {{.*}}(%rip), %xmm3 -; SSE-NEXT: movaps %xmm3, %xmm1 -; SSE-NEXT: retq -; -; AVX-LABEL: v8f32_two_step2: -; AVX: # BB#0: -; AVX-NEXT: vrcpps %ymm0, %ymm1 -; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm2 -; AVX-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; AVX-NEXT: vsubps %ymm2, %ymm3, %ymm2 -; AVX-NEXT: vmulps %ymm2, %ymm1, %ymm2 -; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1 -; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vsubps %ymm0, %ymm3, %ymm0 -; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0 -; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0 -; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 -; AVX-NEXT: retq - %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x - ret <8 x float> %div -} - -define <8 x float> @v8f32_no_step(<8 x float> %x) #3 { -; SSE-LABEL: v8f32_no_step: -; SSE: # BB#0: -; SSE-NEXT: rcpps %xmm0, %xmm0 -; SSE-NEXT: rcpps %xmm1, %xmm1 -; SSE-NEXT: retq -; -; AVX-LABEL: v8f32_no_step: -; AVX: # BB#0: -; AVX-NEXT: vrcpps %ymm0, %ymm0 -; AVX-NEXT: retq - %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x - ret <8 x float> %div -} - -define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 { -; SSE-LABEL: v8f32_no_step2: -; SSE: # BB#0: -; SSE-NEXT: rcpps %xmm1, %xmm1 -; SSE-NEXT: rcpps %xmm0, %xmm0 -; SSE-NEXT: mulps {{.*}}(%rip), %xmm0 -; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 -; SSE-NEXT: retq -; -; AVX-LABEL: v8f32_no_step2: -; AVX: # BB#0: -; AVX-NEXT: vrcpps %ymm0, %ymm0 -; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 -; AVX-NEXT: retq - %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x - ret <8 x float> %div -} - -attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="!divf,!vec-divf" } -attributes #1 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf,vec-divf" } -attributes #2 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:2,vec-divf:2" } -attributes #3 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:0,vec-divf:0" } - diff --git a/llvm/test/CodeGen/X86/vector-sqrt.ll b/llvm/test/CodeGen/X86/vector-sqrt.ll deleted file mode 100644 index 71043379334..00000000000 --- a/llvm/test/CodeGen/X86/vector-sqrt.ll +++ /dev/null @@ -1,77 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK - -; ModuleID = 'vector-sqrt.c' -source_filename = "vector-sqrt.c" -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -; Function Attrs: nounwind readonly uwtable -define <2 x double> @sqrtd2(double* nocapture readonly %v) local_unnamed_addr #0 { -; CHECK-LABEL: sqrtd2: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0 -; CHECK-NEXT: vsqrtsd 8(%rdi), %xmm1, %xmm1 -; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; CHECK-NEXT: retq -entry: - %0 = load double, double* %v, align 8, !tbaa !1 - %call = tail call double @sqrt(double %0) #2 - %arrayidx1 = getelementptr inbounds double, double* %v, i64 1 - %1 = load double, double* %arrayidx1, align 8, !tbaa !1 - %call2 = tail call double @sqrt(double %1) #2 - %vecinit.i = insertelement <2 x double> undef, double %call, i32 0 - %vecinit1.i = insertelement <2 x double> %vecinit.i, double %call2, i32 1 - ret <2 x double> %vecinit1.i -} - -; Function Attrs: nounwind readnone -declare double @sqrt(double) local_unnamed_addr #1 - -; Function Attrs: nounwind readonly uwtable -define <4 x float> @sqrtf4(float* nocapture readonly %v) local_unnamed_addr #0 { -; CHECK-LABEL: sqrtf4: -; CHECK: # BB#0: # %entry -; CHECK-NEXT: vsqrtss (%rdi), %xmm0, %xmm0 -; CHECK-NEXT: vsqrtss 4(%rdi), %xmm1, %xmm1 -; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm2 -; CHECK-NEXT: vsqrtss 12(%rdi), %xmm3, %xmm3 -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3] -; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0] -; CHECK-NEXT: retq -entry: - %0 = load float, float* %v, align 4, !tbaa !5 - %call = tail call float @sqrtf(float %0) #2 - %arrayidx1 = getelementptr inbounds float, float* %v, i64 1 - %1 = load float, float* %arrayidx1, align 4, !tbaa !5 - %call2 = tail call float @sqrtf(float %1) #2 - %arrayidx3 = getelementptr inbounds float, float* %v, i64 2 - %2 = load float, float* %arrayidx3, align 4, !tbaa !5 - %call4 = tail call float @sqrtf(float %2) #2 - %arrayidx5 = getelementptr inbounds float, float* %v, i64 3 - %3 = load float, float* %arrayidx5, align 4, !tbaa !5 - %call6 = tail call float @sqrtf(float %3) #2 - %vecinit.i = insertelement <4 x float> undef, float %call, i32 0 - %vecinit1.i = insertelement <4 x float> %vecinit.i, float %call2, i32 1 - %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %call4, i32 2 - %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %call6, i32 3 - ret <4 x float> %vecinit3.i -} - -; Function Attrs: nounwind readnone -declare float @sqrtf(float) local_unnamed_addr #1 - -attributes #0 = { nounwind readonly uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="haswell" "target-features"="+aes,+avx,+avx2,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #1 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="haswell" "target-features"="+aes,+avx,+avx2,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt" "unsafe-fp-math"="false" "use-soft-float"="false" } -attributes #2 = { nounwind readnone } - -!llvm.ident = !{!0} - -!0 = !{!"clang version 4.0.0 (trunk 289787)"} -!1 = !{!2, !2, i64 0} -!2 = !{!"double", !3, i64 0} -!3 = !{!"omnipotent char", !4, i64 0} -!4 = !{!"Simple C/C++ TBAA"} -!5 = !{!6, !6, i64 0} -!6 = !{!"float", !3, i64 0} |

