diff options
-rw-r--r-- | llvm/test/CodeGen/X86/combine-shl.ll | 34 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/known-bits-vector.ll | 28 |
2 files changed, 62 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/combine-shl.ll b/llvm/test/CodeGen/X86/combine-shl.ll index 0052f51c542..315f221b4e9 100644 --- a/llvm/test/CodeGen/X86/combine-shl.ll +++ b/llvm/test/CodeGen/X86/combine-shl.ll @@ -864,3 +864,37 @@ define <4 x i32> @combine_vec_add_shl_nonsplat(<4 x i32> %a0) { %2 = add <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3> ret <4 x i32> %2 } + +define <4 x i32> @combine_vec_add_shuffle_shl(<4 x i32> %a0) { +; SSE2-LABEL: combine_vec_add_shuffle_shl: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pslld $3, %xmm1 +; SSE2-NEXT: pslld $2, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,3,0] +; SSE2-NEXT: por {{.*}}(%rip), %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: combine_vec_add_shuffle_shl: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: pslld $3, %xmm1 +; SSE41-NEXT: pslld $2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; SSE41-NEXT: por {{.*}}(%rip), %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: combine_vec_add_shuffle_shl: +; AVX: # %bb.0: +; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3,3,3,3] +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = shl <4 x i32> %a0, <i32 2, i32 3, i32 0, i32 1> + %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 1, i32 0> + %3 = add <4 x i32> %2, <i32 3, i32 3, i32 3, i32 3> + ret <4 x i32> %3 +} diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index a918ab34042..d1d19948574 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -664,3 +664,31 @@ define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x %6 = uitofp <4 x i32> %5 to <4 x float> ret <4 x float> %6 } + +define <2 x double> @knownbits_lshr_subvector_uitofp(<4 x i32> %x) { +; X32-LABEL: knownbits_lshr_subvector_uitofp: +; X32: # %bb.0: +; X32-NEXT: vpsrld $2, %xmm0, %xmm1 +; X32-NEXT: vpsrld $1, %xmm0, %xmm0 +; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; X32-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [4.503599627370496E+15,4.503599627370496E+15] +; X32-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X32-NEXT: vsubpd %xmm1, %xmm0, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: knownbits_lshr_subvector_uitofp: +; X64: # %bb.0: +; X64-NEXT: vpsrld $2, %xmm0, %xmm1 +; X64-NEXT: vpsrld $1, %xmm0, %xmm0 +; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; X64-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [4.503599627370496E+15,4.503599627370496E+15] +; X64-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-NEXT: vsubpd %xmm1, %xmm0, %xmm0 +; X64-NEXT: retq + %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 0, i32 0> + %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <2 x i32> <i32 0, i32 1> + %3 = uitofp <2 x i32> %2 to <2 x double> + ret <2 x double> %3 +} |