diff options
author | Sanjay Patel <spatel@rotateright.com> | 2019-04-03 20:34:22 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2019-04-03 20:34:22 +0000 |
commit | 8c9ceecdc67732465e9f15b14105f7482ab7530e (patch) | |
tree | 17bce13b99a3651fd8f8600ac6d7ff86395cb922 | |
parent | 091baa73814cc8eee7c0b8475b8dcb308c71265b (diff) | |
download | bcm5719-llvm-8c9ceecdc67732465e9f15b14105f7482ab7530e.tar.gz bcm5719-llvm-8c9ceecdc67732465e9f15b14105f7482ab7530e.zip |
[x86] add test for disguised horizontal op; NFC
llvm-svn: 357630
-rw-r--r-- | llvm/test/CodeGen/X86/haddsub-shuf.ll | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/haddsub-shuf.ll b/llvm/test/CodeGen/X86/haddsub-shuf.ll index 0ece3fe1414..8ad11e78915 100644 --- a/llvm/test/CodeGen/X86/haddsub-shuf.ll +++ b/llvm/test/CodeGen/X86/haddsub-shuf.ll @@ -313,6 +313,54 @@ define <2 x double> @hadd_v2f64(<2 x double> %a) { ret <2 x double> %shuf } +define <2 x double> @hadd_v2f64_scalar_splat(<2 x double> %a) { +; SSSE3_SLOW-LABEL: hadd_v2f64_scalar_splat: +; SSSE3_SLOW: # %bb.0: +; SSSE3_SLOW-NEXT: movapd %xmm0, %xmm1 +; SSSE3_SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSSE3_SLOW-NEXT: addsd %xmm0, %xmm1 +; SSSE3_SLOW-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0] +; SSSE3_SLOW-NEXT: retq +; +; SSSE3_FAST-LABEL: hadd_v2f64_scalar_splat: +; SSSE3_FAST: # %bb.0: +; SSSE3_FAST-NEXT: haddpd %xmm0, %xmm0 +; SSSE3_FAST-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0] +; SSSE3_FAST-NEXT: retq +; +; AVX1_SLOW-LABEL: hadd_v2f64_scalar_splat: +; AVX1_SLOW: # %bb.0: +; AVX1_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX1_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX1_SLOW-NEXT: retq +; +; AVX1_FAST-LABEL: hadd_v2f64_scalar_splat: +; AVX1_FAST: # %bb.0: +; AVX1_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX1_FAST-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX1_FAST-NEXT: retq +; +; AVX2_SLOW-LABEL: hadd_v2f64_scalar_splat: +; AVX2_SLOW: # %bb.0: +; AVX2_SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX2_SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX2_SLOW-NEXT: retq +; +; AVX2_FAST-LABEL: hadd_v2f64_scalar_splat: +; AVX2_FAST: # %bb.0: +; AVX2_FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX2_FAST-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX2_FAST-NEXT: retq + %a0 = extractelement <2 x double> %a, i32 0 + %a1 = extractelement <2 x double> %a, i32 1 + %hop = fadd double %a0, %a1 + %ins = insertelement <2 x double> undef, double %hop, i32 0 + %shuf = shufflevector <2 x double> %ins, <2 x double> undef, <2 x i32> <i32 0, i32 0> + ret <2 x double> %shuf +} + define <4 x double> @hadd_v4f64(<4 x double> %a) { ; SSSE3_SLOW-LABEL: hadd_v4f64: ; SSSE3_SLOW: # %bb.0: |