diff options
author | Sanjay Patel <spatel@rotateright.com> | 2019-04-03 14:41:28 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2019-04-03 14:41:28 +0000 |
commit | 393458f3ed3847accfad751f7775fd8b644c9bf0 (patch) | |
tree | 2016e7b49b87c778c0f052cd9803ddc31eae2d42 | |
parent | 04848090cd433ef2946fc40fe88684629596c84b (diff) | |
download | bcm5719-llvm-393458f3ed3847accfad751f7775fd8b644c9bf0.tar.gz bcm5719-llvm-393458f3ed3847accfad751f7775fd8b644c9bf0.zip |
[x86] add negative tests for FP scalarization; NFC
These go with the proposal in D60150.
llvm-svn: 357592
-rw-r--r-- | llvm/test/CodeGen/X86/scalarize-fp.ll | 82 |
1 files changed, 82 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/scalarize-fp.ll b/llvm/test/CodeGen/X86/scalarize-fp.ll index c4068612379..468aa044326 100644 --- a/llvm/test/CodeGen/X86/scalarize-fp.ll +++ b/llvm/test/CodeGen/X86/scalarize-fp.ll @@ -466,6 +466,88 @@ define <8 x float> @fdiv_splat_splat_v8f32(<8 x float> %vx, <8 x float> %vy) { ret <8 x float> %r } +; Negative test - splat of non-zero indexes (still sink the splat). + +define <2 x double> @fadd_splat_splat_nonzero_v2f64(<2 x double> %vx, <2 x double> %vy) { +; SSE-LABEL: fadd_splat_splat_nonzero_v2f64: +; SSE: # %bb.0: +; SSE-NEXT: addpd %xmm1, %xmm0 +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1] +; SSE-NEXT: retq +; +; AVX-LABEL: fadd_splat_splat_nonzero_v2f64: +; AVX: # %bb.0: +; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1] +; AVX-NEXT: retq + %splatx = shufflevector <2 x double> %vx, <2 x double> undef, <2 x i32> <i32 1, i32 1> + %splaty = shufflevector <2 x double> %vy, <2 x double> undef, <2 x i32> <i32 1, i32 1> + %r = fadd <2 x double> %splatx, %splaty + ret <2 x double> %r +} + +; Negative test - splat of non-zero index and mismatched indexes. + +define <2 x double> @fadd_splat_splat_mismatch_v2f64(<2 x double> %vx, <2 x double> %vy) { +; SSE-LABEL: fadd_splat_splat_mismatch_v2f64: +; SSE: # %bb.0: +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1] +; SSE-NEXT: addpd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: fadd_splat_splat_mismatch_v2f64: +; AVX: # %bb.0: +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,1] +; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %splatx = shufflevector <2 x double> %vx, <2 x double> undef, <2 x i32> <i32 0, i32 0> + %splaty = shufflevector <2 x double> %vy, <2 x double> undef, <2 x i32> <i32 1, i32 1> + %r = fadd <2 x double> %splatx, %splaty + ret <2 x double> %r +} + +; Negative test - non-splat. + +define <2 x double> @fadd_splat_nonsplat_v2f64(<2 x double> %vx, <2 x double> %vy) { +; SSE-LABEL: fadd_splat_nonsplat_v2f64: +; SSE: # %bb.0: +; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0,0] +; SSE-NEXT: addpd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: fadd_splat_nonsplat_v2f64: +; AVX: # %bb.0: +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %splatx = shufflevector <2 x double> %vx, <2 x double> undef, <2 x i32> <i32 0, i32 0> + %splaty = shufflevector <2 x double> %vy, <2 x double> undef, <2 x i32> <i32 0, i32 1> + %r = fadd <2 x double> %splatx, %splaty + ret <2 x double> %r +} + +; Negative test - non-FP. + +define <2 x i64> @add_splat_splat_v2i64(<2 x i64> %vx, <2 x i64> %vy) { +; SSE-LABEL: add_splat_splat_v2i64: +; SSE: # %bb.0: +; SSE-NEXT: paddq %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; SSE-NEXT: retq +; +; AVX-LABEL: add_splat_splat_v2i64: +; AVX: # %bb.0: +; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] +; AVX-NEXT: retq + %splatx = shufflevector <2 x i64> %vx, <2 x i64> undef, <2 x i32> <i32 0, i32 0> + %splaty = shufflevector <2 x i64> %vy, <2 x i64> undef, <2 x i32> <i32 0, i32 0> + %r = add <2 x i64> %splatx, %splaty + ret <2 x i64> %r +} + define <2 x double> @fadd_splat_const_op1_v2f64(<2 x double> %vx) { ; SSE-LABEL: fadd_splat_const_op1_v2f64: ; SSE: # %bb.0: |