diff options
-rw-r--r-- | llvm/test/CodeGen/X86/known-signbits-vector.ll | 195 |
1 files changed, 128 insertions, 67 deletions
diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index 57d7f66a253..93439a0b1db 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,X64-AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64,X64-AVX2 define <2 x double> @signbits_sext_v2i64_sitofp_v2f64(i32 %a0, i32 %a1) nounwind { ; X86-LABEL: signbits_sext_v2i64_sitofp_v2f64: @@ -159,18 +160,40 @@ define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwin } define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1) nounwind { -; CHECK-LABEL: signbits_sext_shuffle_sitofp: -; CHECK: # %bb.0: -; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1 -; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0 -; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] -; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1] -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 -; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; CHECK-NEXT: vcvtdq2pd %xmm0, %ymm0 -; CHECK-NEXT: ret{{[l|q]}} +; X86-LABEL: signbits_sext_shuffle_sitofp: +; X86: # %bb.0: +; X86-NEXT: vpmovsxdq %xmm0, %xmm1 +; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X86-NEXT: vpmovsxdq %xmm0, %xmm0 +; X86-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; X86-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; X86-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1] +; X86-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X86-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; X86-NEXT: vcvtdq2pd %xmm0, %ymm0 +; X86-NEXT: retl +; +; X64-AVX1-LABEL: signbits_sext_shuffle_sitofp: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 +; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 +; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; X64-AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; X64-AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1] +; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: signbits_sext_shuffle_sitofp: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 +; X64-AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,1,0] +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; X64-AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 +; X64-AVX2-NEXT: retq %1 = sext <4 x i32> %a0 to <4 x i64> %2 = shufflevector <4 x i64> %1, <4 x i64>%a1, <4 x i32> <i32 3, i32 2, i32 1, i32 0> %3 = sitofp <4 x i64> %2 to <4 x double> @@ -253,20 +276,33 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 ; X86-NEXT: popl %eax ; X86-NEXT: retl ; -; X64-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp: -; X64: # %bb.0: -; X64-NEXT: vpsrlq $60, %xmm0, %xmm2 -; X64-NEXT: vpsrlq $61, %xmm0, %xmm0 -; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [4,8] -; X64-NEXT: vpxor %xmm2, %xmm0, %xmm0 -; X64-NEXT: vpsubq %xmm2, %xmm0, %xmm0 -; X64-NEXT: vpmovsxdq %xmm1, %xmm1 -; X64-NEXT: vpand %xmm1, %xmm0, %xmm2 -; X64-NEXT: vpor %xmm1, %xmm2, %xmm1 -; X64-NEXT: vpxor %xmm0, %xmm1, %xmm0 -; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 -; X64-NEXT: retq +; X64-AVX1-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vpsrlq $60, %xmm0, %xmm2 +; X64-AVX1-NEXT: vpsrlq $61, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] +; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [4,8] +; X64-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 +; X64-AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2 +; X64-AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1 +; X64-AVX1-NEXT: vpxor %xmm0, %xmm1, %xmm0 +; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [4,8] +; X64-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpmovsxdq %xmm1, %xmm1 +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2 +; X64-AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1 +; X64-AVX2-NEXT: vpxor %xmm0, %xmm1, %xmm0 +; X64-AVX2-NEXT: vcvtdq2ps %xmm0, %xmm0 +; X64-AVX2-NEXT: retq %1 = ashr <2 x i64> %a0, <i64 61, i64 60> %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 1> %3 = sext <2 x i32> %2 to <2 x i64> @@ -312,33 +348,49 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x ; X86-NEXT: vzeroupper ; X86-NEXT: retl ; -; X64-LABEL: signbits_ashr_sext_select_shuffle_sitofp: -; X64: # %bb.0: -; X64-NEXT: vpsrad $31, %xmm2, %xmm4 -; X64-NEXT: vpsrad $1, %xmm2, %xmm5 -; X64-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] -; X64-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] -; X64-NEXT: vextractf128 $1, %ymm2, %xmm2 -; X64-NEXT: vpsrad $31, %xmm2, %xmm5 -; X64-NEXT: vpsrad $1, %xmm2, %xmm2 -; X64-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; X64-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7] -; X64-NEXT: vpmovsxdq %xmm3, %xmm5 -; X64-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; X64-NEXT: vpmovsxdq %xmm3, %xmm3 -; X64-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm6 -; X64-NEXT: vblendvpd %xmm6, %xmm4, %xmm5, %xmm4 -; X64-NEXT: vextractf128 $1, %ymm1, %xmm1 -; X64-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X64-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 -; X64-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0 -; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] -; X64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 -; X64-NEXT: vzeroupper -; X64-NEXT: retq +; X64-AVX1-LABEL: signbits_ashr_sext_select_shuffle_sitofp: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vpsrad $31, %xmm2, %xmm4 +; X64-AVX1-NEXT: vpsrad $1, %xmm2, %xmm5 +; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] +; X64-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 +; X64-AVX1-NEXT: vpsrad $31, %xmm2, %xmm5 +; X64-AVX1-NEXT: vpsrad $1, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7] +; X64-AVX1-NEXT: vpmovsxdq %xmm3, %xmm5 +; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; X64-AVX1-NEXT: vpmovsxdq %xmm3, %xmm3 +; X64-AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm6 +; X64-AVX1-NEXT: vblendvpd %xmm6, %xmm4, %xmm5, %xmm4 +; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 +; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X64-AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0 +; X64-AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] +; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 +; X64-AVX1-NEXT: vzeroupper +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: signbits_ashr_sext_select_shuffle_sitofp: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpsrad $31, %ymm2, %ymm4 +; X64-AVX2-NEXT: vpsrad $1, %ymm2, %ymm2 +; X64-AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[1,1,3,3,5,5,7,7] +; X64-AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7] +; X64-AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 +; X64-AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vblendvpd %ymm0, %ymm2, %ymm3, %ymm0 +; X64-AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5] +; X64-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; X64-AVX2-NEXT: vcvtdq2ps %xmm0, %xmm0 +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq %1 = ashr <4 x i64> %a2, <i64 33, i64 63, i64 33, i64 63> %2 = sext <4 x i32> %a3 to <4 x i64> %3 = icmp eq <4 x i64> %a0, %a1 @@ -368,19 +420,28 @@ define void @cross_bb_signbits_insert_subvec(<32 x i8>* %ptr, <32 x i8> %x, <32 ; X86-NEXT: vzeroupper ; X86-NEXT: retl ; -; X64-LABEL: cross_bb_signbits_insert_subvec: -; X64: # %bb.0: -; X64-NEXT: vextractf128 $1, %ymm0, %xmm2 -; X64-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; X64-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2 -; X64-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0 -; X64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; X64-NEXT: vandnps %ymm1, %ymm0, %ymm1 -; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 -; X64-NEXT: vorps %ymm1, %ymm0, %ymm0 -; X64-NEXT: vmovaps %ymm0, (%rdi) -; X64-NEXT: vzeroupper -; X64-NEXT: retq +; X64-AVX1-LABEL: cross_bb_signbits_insert_subvec: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; X64-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; X64-AVX1-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0 +; X64-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; X64-AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm1 +; X64-AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 +; X64-AVX1-NEXT: vmovaps %ymm0, (%rdi) +; X64-AVX1-NEXT: vzeroupper +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: cross_bb_signbits_insert_subvec: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X64-AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0 +; X64-AVX2-NEXT: vpblendvb %ymm0, {{.*}}(%rip), %ymm1, %ymm0 +; X64-AVX2-NEXT: vmovdqa %ymm0, (%rdi) +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq %a = icmp eq <32 x i8> %x, zeroinitializer %b = icmp eq <32 x i8> %x, zeroinitializer %c = and <32 x i1> %a, %b |