diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2014-11-04 23:25:08 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2014-11-04 23:25:08 +0000 |
| commit | c9a0779309ebf8240dcea044fafbf072c1163095 (patch) | |
| tree | b6f9a9917a3ca17f506944e6b0c57afd695a7e97 /llvm/test/CodeGen/X86 | |
| parent | de4277a076a22e396bf80f0974844af558deb8aa (diff) | |
| download | bcm5719-llvm-c9a0779309ebf8240dcea044fafbf072c1163095.tar.gz bcm5719-llvm-c9a0779309ebf8240dcea044fafbf072c1163095.zip | |
[X86][SSE] Enable commutation for SSE immediate blend instructions
Patch to allow (v)blendps, (v)blendpd, (v)pblendw and vpblendd instructions to be commuted - swaps the src registers and inverts the blend mask.
This is primarily to improve memory folding (see new tests), but it also improves the quality of shuffles (see modified tests).
Differential Revision: http://reviews.llvm.org/D6015
llvm-svn: 221313
Diffstat (limited to 'llvm/test/CodeGen/X86')
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-or.ll | 31 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/commute-blend-avx2.ll | 89 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/commute-blend-sse41.ll | 34 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-blend.ll | 18 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll | 10 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-combining.ll | 70 |
6 files changed, 170 insertions, 82 deletions
diff --git a/llvm/test/CodeGen/X86/combine-or.ll b/llvm/test/CodeGen/X86/combine-or.ll index ba9d48d6047..9539eae9d6a 100644 --- a/llvm/test/CodeGen/X86/combine-or.ll +++ b/llvm/test/CodeGen/X86/combine-or.ll @@ -19,8 +19,7 @@ define <2 x i64> @test1(<2 x i64> %a, <2 x i64> %b) { define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test2: ; CHECK: # BB#0: -; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; CHECK-NEXT: retq %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3> %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4> @@ -32,8 +31,7 @@ define <4 x i32> @test2(<4 x i32> %a, <4 x i32> %b) { define <2 x i64> @test3(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test3: ; CHECK: # BB#0: -; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; CHECK-NEXT: retq %shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 2, i32 1> %shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2> @@ -45,8 +43,7 @@ define <2 x i64> @test3(<2 x i64> %a, <2 x i64> %b) { define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test4: ; CHECK: # BB#0: -; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] -; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; CHECK-NEXT: retq %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4> %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 1, i32 2, i32 3> @@ -106,8 +103,7 @@ define <2 x i64> @test8(<2 x i64> %a, <2 x i64> %b) { define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test9: ; CHECK: # BB#0: -; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; CHECK-NEXT: retq %and1 = and <4 x i32> %a, <i32 0, i32 0, i32 -1, i32 -1> %and2 = and <4 x i32> %b, <i32 -1, i32 -1, i32 0, i32 0> @@ -119,8 +115,7 @@ define <4 x i32> @test9(<4 x i32> %a, <4 x i32> %b) { define <2 x i64> @test10(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test10: ; CHECK: # BB#0: -; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; CHECK-NEXT: retq %and1 = and <2 x i64> %a, <i64 0, i64 -1> %and2 = and <2 x i64> %b, <i64 -1, i64 0> @@ -132,8 +127,7 @@ define <2 x i64> @test10(<2 x i64> %a, <2 x i64> %b) { define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test11: ; CHECK: # BB#0: -; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] -; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; CHECK-NEXT: retq %and1 = and <4 x i32> %a, <i32 -1, i32 0, i32 0, i32 0> %and2 = and <4 x i32> %b, <i32 0, i32 -1, i32 -1, i32 -1> @@ -230,12 +224,10 @@ define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test18: ; CHECK: # BB#0: ; CHECK-NEXT: xorps %xmm2, %xmm2 -; CHECK-NEXT: xorps %xmm3, %xmm3 -; CHECK-NEXT: blendps {{.*#+}} xmm3 = xmm0[0],xmm3[1,2,3] -; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,0,1,1] -; CHECK-NEXT: blendps {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3] -; CHECK-NEXT: orps %xmm0, %xmm2 -; CHECK-NEXT: movaps %xmm2, %xmm0 +; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1] +; CHECK-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] +; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: retq %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 4> %shuf2 = shufflevector <4 x i32> %b, <4 x i32> zeroinitializer, <4 x i32><i32 0, i32 4, i32 4, i32 4> @@ -295,8 +287,7 @@ define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) { define <4 x i8> @test_crash(<4 x i8> %a, <4 x i8> %b) { ; CHECK-LABEL: test_crash: ; CHECK: # BB#0: -; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; CHECK-NEXT: retq %shuf1 = shufflevector <4 x i8> %a, <4 x i8> zeroinitializer, <4 x i32><i32 4, i32 4, i32 2, i32 3> %shuf2 = shufflevector <4 x i8> %b, <4 x i8> zeroinitializer, <4 x i32><i32 0, i32 1, i32 4, i32 4> diff --git a/llvm/test/CodeGen/X86/commute-blend-avx2.ll b/llvm/test/CodeGen/X86/commute-blend-avx2.ll new file mode 100644 index 00000000000..d06c6dad8db --- /dev/null +++ b/llvm/test/CodeGen/X86/commute-blend-avx2.ll @@ -0,0 +1,89 @@ +; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=core-avx2 -mattr=avx2 < %s | FileCheck %s + +define <8 x i16> @commute_fold_vpblendw_128(<8 x i16> %a, <8 x i16>* %b) #0 { + %1 = load <8 x i16>* %b + %2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %1, <8 x i16> %a, i8 17) + ret <8 x i16> %2 + + ;LABEL: commute_fold_vpblendw_128 + ;CHECK: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3],xmm0[4],mem[5,6,7] + ;CHECK-NEXT: retq +} +declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone + +define <16 x i16> @commute_fold_vpblendw_256(<16 x i16> %a, <16 x i16>* %b) #0 { + %1 = load <16 x i16>* %b + %2 = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %1, <16 x i16> %a, i8 17) + ret <16 x i16> %2 + + ;LABEL: commute_fold_vpblendw_256 + ;CHECK: vpblendw {{.*#+}} ymm0 = ymm0[0],mem[1,2,3],ymm0[4],mem[5,6,7],ymm0[8],mem[9,10,11],ymm0[12],mem[13,14,15] + ;CHECK-NEXT: retq +} +declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind readnone + +define <4 x i32> @commute_fold_vpblendd_128(<4 x i32> %a, <4 x i32>* %b) #0 { + %1 = load <4 x i32>* %b + %2 = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %1, <4 x i32> %a, i8 1) + ret <4 x i32> %2 + + ;LABEL: commute_fold_vpblendd_128 + ;CHECK: vpblendd {{.*#+}} xmm0 = xmm0[0],mem[1,2,3] + ;CHECK-NEXT: retq +} +declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind readnone + +define <8 x i32> @commute_fold_vpblendd_256(<8 x i32> %a, <8 x i32>* %b) #0 { + %1 = load <8 x i32>* %b + %2 = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %1, <8 x i32> %a, i8 129) + ret <8 x i32> %2 + + ;LABEL: commute_fold_vpblendd_256 + ;CHECK: vpblendd {{.*#+}} ymm0 = ymm0[0],mem[1,2,3,4,5,6],ymm0[7] + ;CHECK-NEXT: retq +} +declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind readnone + +define <4 x float> @commute_fold_vblendps_128(<4 x float> %a, <4 x float>* %b) #0 { + %1 = load <4 x float>* %b + %2 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %1, <4 x float> %a, i8 3) + ret <4 x float> %2 + + ;LABEL: commute_fold_vblendps_128 + ;CHECK: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3] + ;CHECK-NEXT: retq +} +declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwind readnone + +define <8 x float> @commute_fold_vblendps_256(<8 x float> %a, <8 x float>* %b) #0 { + %1 = load <8 x float>* %b + %2 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %1, <8 x float> %a, i8 7) + ret <8 x float> %2 + + ;LABEL: commute_fold_vblendps_256 + ;CHECK: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],mem[3,4,5,6,7] + ;CHECK-NEXT: retq +} +declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone + +define <2 x double> @commute_fold_vblendpd_128(<2 x double> %a, <2 x double>* %b) #0 { + %1 = load <2 x double>* %b + %2 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %1, <2 x double> %a, i8 1) + ret <2 x double> %2 + + ;LABEL: commute_fold_vblendpd_128 + ;CHECK: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] + ;CHECK-NEXT: retq +} +declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nounwind readnone + +define <4 x double> @commute_fold_vblendpd_256(<4 x double> %a, <4 x double>* %b) #0 { + %1 = load <4 x double>* %b + %2 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %1, <4 x double> %a, i8 7) + ret <4 x double> %2 + + ;LABEL: commute_fold_vblendpd_256 + ;CHECK: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],mem[3] + ;CHECK-NEXT: retq +} +declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone diff --git a/llvm/test/CodeGen/X86/commute-blend-sse41.ll b/llvm/test/CodeGen/X86/commute-blend-sse41.ll new file mode 100644 index 00000000000..59fef8c3a29 --- /dev/null +++ b/llvm/test/CodeGen/X86/commute-blend-sse41.ll @@ -0,0 +1,34 @@ +; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=corei7 < %s | FileCheck %s + +define <8 x i16> @commute_fold_pblendw(<8 x i16> %a, <8 x i16>* %b) #0 { + %1 = load <8 x i16>* %b + %2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %1, <8 x i16> %a, i8 17) + ret <8 x i16> %2 + + ;LABEL: commute_fold_pblendw + ;CHECK: pblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2,3],xmm0[4],mem[5,6,7] + ;CHECK-NEXT: retq +} +declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone + +define <4 x float> @commute_fold_blendps(<4 x float> %a, <4 x float>* %b) #0 { + %1 = load <4 x float>* %b + %2 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %1, <4 x float> %a, i8 3) + ret <4 x float> %2 + + ;LABEL: commute_fold_blendps + ;CHECK: blendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3] + ;CHECK-NEXT: retq +} +declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwind readnone + +define <2 x double> @commute_fold_blendpd(<2 x double> %a, <2 x double>* %b) #0 { + %1 = load <2 x double>* %b + %2 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %1, <2 x double> %a, i8 1) + ret <2 x double> %2 + + ;LABEL: commute_fold_vblendpd + ;CHECK: blendpd {{.*#+}} xmm0 = xmm0[0],mem[1] + ;CHECK-NEXT: retq +} +declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nounwind readnone diff --git a/llvm/test/CodeGen/X86/vector-blend.ll b/llvm/test/CodeGen/X86/vector-blend.ll index b66ed708b0d..7022b71336f 100644 --- a/llvm/test/CodeGen/X86/vector-blend.ll +++ b/llvm/test/CodeGen/X86/vector-blend.ll @@ -202,8 +202,7 @@ define <8 x i16> @vsel_8xi16(<8 x i16> %v1, <8 x i16> %v2) { ; ; SSE41-LABEL: vsel_8xi16: ; SSE41: # BB#0: # %entry -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6,7] ; SSE41-NEXT: retq ; ; AVX-LABEL: vsel_8xi16: @@ -518,10 +517,8 @@ define <8 x float> @constant_blendvps_avx(<8 x float> %xyzw, <8 x float> %abcd) ; ; SSE41-LABEL: constant_blendvps_avx: ; SSE41: # BB#0: # %entry -; SSE41-NEXT: blendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3] -; SSE41-NEXT: blendps {{.*#+}} xmm3 = xmm3[0,1,2],xmm1[3] -; SSE41-NEXT: movaps %xmm2, %xmm0 -; SSE41-NEXT: movaps %xmm3, %xmm1 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3] +; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: constant_blendvps_avx: @@ -637,10 +634,8 @@ define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) ; ; SSE41-LABEL: blend_shufflevector_8xfloat: ; SSE41: # BB#0: # %entry -; SSE41-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] -; SSE41-NEXT: blendps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3] -; SSE41-NEXT: movaps %xmm2, %xmm0 -; SSE41-NEXT: movaps %xmm3, %xmm1 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: blend_shufflevector_8xfloat: @@ -694,8 +689,7 @@ define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) { ; ; SSE41-LABEL: blend_shufflevector_4xi64: ; SSE41: # BB#0: # %entry -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: movaps %xmm3, %xmm1 ; SSE41-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll index e743194ef70..6e11c0e1079 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -260,8 +260,7 @@ define <2 x double> @shuffle_v2f64_21(<2 x double> %a, <2 x double> %b) { ; ; SSE41-LABEL: shuffle_v2f64_21: ; SSE41: # BB#0: -; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm0[1] -; SSE41-NEXT: movapd %xmm1, %xmm0 +; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE41-NEXT: retq ; ; AVX-LABEL: shuffle_v2f64_21: @@ -508,8 +507,7 @@ define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) { ; ; SSE41-LABEL: shuffle_v2i64_21: ; SSE41: # BB#0: -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: shuffle_v2i64_21: @@ -545,8 +543,8 @@ define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64 ; ; SSE41-LABEL: shuffle_v2i64_21_copy: ; SSE41: # BB#0: -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: shuffle_v2i64_21_copy: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 8ad9e5a11d9..446812df50e 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -369,7 +369,8 @@ define <4 x i32> @combine_bitwise_ops_test3b(<4 x i32> %a, <4 x i32> %b, <4 x i3 ; SSE41: # BB#0: ; SSE41-NEXT: pxor %xmm1, %xmm0 ; SSE41-NEXT: pxor %xmm1, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_bitwise_ops_test3b: @@ -411,8 +412,7 @@ define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i3 ; SSE41-LABEL: combine_bitwise_ops_test4b: ; SSE41: # BB#0: ; SSE41-NEXT: pand %xmm1, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7] -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_bitwise_ops_test4b: @@ -452,8 +452,7 @@ define <4 x i32> @combine_bitwise_ops_test5b(<4 x i32> %a, <4 x i32> %b, <4 x i3 ; SSE41-LABEL: combine_bitwise_ops_test5b: ; SSE41: # BB#0: ; SSE41-NEXT: por %xmm1, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7] -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_bitwise_ops_test5b: @@ -1170,8 +1169,7 @@ define <4 x float> @combine_test2(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_test2: ; SSE41: # BB#0: -; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] -; SSE41-NEXT: movaps %xmm1, %xmm0 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_test2: @@ -1237,8 +1235,7 @@ define <4 x float> @combine_test5(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_test5: ; SSE41: # BB#0: -; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm0[1],xmm1[2,3] -; SSE41-NEXT: movaps %xmm1, %xmm0 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_test5: @@ -1299,8 +1296,7 @@ define <4 x i32> @combine_test7(<4 x i32> %a, <4 x i32> %b) { ; ; SSE41-LABEL: combine_test7: ; SSE41: # BB#0: -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_test7: @@ -1371,8 +1367,7 @@ define <4 x i32> @combine_test10(<4 x i32> %a, <4 x i32> %b) { ; ; SSE41-LABEL: combine_test10: ; SSE41: # BB#0: -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_test10: @@ -1415,8 +1410,7 @@ define <4 x float> @combine_test12(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_test12: ; SSE41: # BB#0: -; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] -; SSE41-NEXT: movaps %xmm1, %xmm0 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_test12: @@ -1479,8 +1473,7 @@ define <4 x float> @combine_test15(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_test15: ; SSE41: # BB#0: -; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm0[1],xmm1[2,3] -; SSE41-NEXT: movaps %xmm1, %xmm0 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_test15: @@ -1518,8 +1511,7 @@ define <4 x i32> @combine_test17(<4 x i32> %a, <4 x i32> %b) { ; ; SSE41-LABEL: combine_test17: ; SSE41: # BB#0: -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_test17: @@ -1587,8 +1579,7 @@ define <4 x i32> @combine_test20(<4 x i32> %a, <4 x i32> %b) { ; ; SSE41-LABEL: combine_test20: ; SSE41: # BB#0: -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_test20: @@ -1632,10 +1623,9 @@ define <4 x float> @combine_test1b(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_test1b: ; SSE41: # BB#0: -; SSE41-NEXT: movaps %xmm1, %xmm2 -; SSE41-NEXT: blendps {{.*#+}} xmm2 = xmm2[0],xmm0[1],xmm2[2],xmm0[3] -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0] -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,0] +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] +; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0] +; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,0] ; SSE41-NEXT: movaps %xmm1, %xmm0 ; SSE41-NEXT: retq ; @@ -1673,11 +1663,9 @@ define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_test2b: ; SSE41: # BB#0: -; SSE41-NEXT: movaps %xmm1, %xmm2 -; SSE41-NEXT: blendps {{.*#+}} xmm2 = xmm2[0],xmm0[1],xmm2[2],xmm0[3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[1,1] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE41-NEXT: movaps %xmm2, %xmm0 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] +; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[1,1] +; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_test2b: @@ -1736,10 +1724,9 @@ define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_test4b: ; SSE41: # BB#0: -; SSE41-NEXT: movaps %xmm1, %xmm2 -; SSE41-NEXT: blendps {{.*#+}} xmm2 = xmm2[0],xmm0[1],xmm2[2],xmm0[3] -; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[3,0] -; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[0,2] +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] +; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[3,0] +; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[0,2] ; SSE41-NEXT: movaps %xmm1, %xmm0 ; SSE41-NEXT: retq ; @@ -2006,8 +1993,7 @@ define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_blend_01: ; SSE41: # BB#0: -; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm0[1] -; SSE41-NEXT: movapd %xmm1, %xmm0 +; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_blend_01: @@ -2036,8 +2022,7 @@ define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_blend_02: ; SSE41: # BB#0: -; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] -; SSE41-NEXT: movaps %xmm1, %xmm0 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_blend_02: @@ -2070,8 +2055,7 @@ define <4 x float> @combine_blend_123(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_blend_123: ; SSE41: # BB#0: -; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] -; SSE41-NEXT: movaps %xmm1, %xmm0 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_blend_123: @@ -2153,8 +2137,7 @@ define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_undef_input_test1: ; SSE41: # BB#0: -; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm0[1] -; SSE41-NEXT: movapd %xmm1, %xmm0 +; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_undef_input_test1: @@ -2343,8 +2326,7 @@ define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) { ; ; SSE41-LABEL: combine_undef_input_test11: ; SSE41: # BB#0: -; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm1[0],xmm0[1] -; SSE41-NEXT: movapd %xmm1, %xmm0 +; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_undef_input_test11: |

