diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2014-12-02 22:31:23 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2014-12-02 22:31:23 +0000 |
| commit | 6b988ad8f2285e8bb3c56db38c0416f6186d3889 (patch) | |
| tree | 4d91ac43a414d292c08e53221c42bd662eff0f2d | |
| parent | 656c29b08f1d32071b63d413a209cd418b5240fa (diff) | |
| download | bcm5719-llvm-6b988ad8f2285e8bb3c56db38c0416f6186d3889.tar.gz bcm5719-llvm-6b988ad8f2285e8bb3c56db38c0416f6186d3889.zip | |
[X86][SSE] Keep 4i32 vector insertions in integer domain on SSE4.1 targets
4i32 shuffles for single insertions into zero vectors lowers to X86vzmovl which was using (v)blendps - causing domain switch stalls. This patch fixes this by using (v)pblendw instead.
The updated tests on test/CodeGen/X86/sse41.ll still contain a domain stall due to the use of insertps - I'm looking at fixing this in a future patch.
Differential Revision: http://reviews.llvm.org/D6458
llvm-svn: 223165
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-and.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/combine-or.ll | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/sse41.ll | 16 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll | 24 |
5 files changed, 26 insertions, 26 deletions
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 1b07e874837..0bf765b53dd 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -7734,7 +7734,7 @@ let Predicates = [UseAVX] in { def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))), (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>; def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))), - (VBLENDPSrri (v4i32 (V_SET0)), VR128:$src, (i8 1))>; + (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>; def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))), (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>; @@ -7769,7 +7769,7 @@ let Predicates = [UseSSE41] in { def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))), (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>; def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))), - (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>; + (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>; def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))), (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>; } diff --git a/llvm/test/CodeGen/X86/combine-and.ll b/llvm/test/CodeGen/X86/combine-and.ll index 59a7a1902aa..dace806b4bb 100644 --- a/llvm/test/CodeGen/X86/combine-and.ll +++ b/llvm/test/CodeGen/X86/combine-and.ll @@ -10,7 +10,7 @@ define <4 x i32> @test1(<4 x i32> %A) { ret <4 x i32> %1 } ; CHECK-LABEL: test1 -; CHECK: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; CHECK-NEXT: retq diff --git a/llvm/test/CodeGen/X86/combine-or.ll b/llvm/test/CodeGen/X86/combine-or.ll index f10092a1378..c70067f389a 100644 --- a/llvm/test/CodeGen/X86/combine-or.ll +++ b/llvm/test/CodeGen/X86/combine-or.ll @@ -223,10 +223,10 @@ define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) { define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test18: ; CHECK: # BB#0: -; CHECK-NEXT: xorps %xmm2, %xmm2 -; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; CHECK-NEXT: pxor %xmm2, %xmm2 +; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1] -; CHECK-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] +; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7] ; CHECK-NEXT: por %xmm1, %xmm0 ; CHECK-NEXT: retq %shuf1 = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32><i32 4, i32 0, i32 4, i32 4> diff --git a/llvm/test/CodeGen/X86/sse41.ll b/llvm/test/CodeGen/X86/sse41.ll index 71c1c9ed33c..9c0c2221cb7 100644 --- a/llvm/test/CodeGen/X86/sse41.ll +++ b/llvm/test/CodeGen/X86/sse41.ll @@ -695,15 +695,15 @@ define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) { define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) { ; X32-LABEL: i32_shuf_X00A: ; X32: ## BB#0: -; X32-NEXT: xorps %xmm2, %xmm2 -; X32-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; X32-NEXT: pxor %xmm2, %xmm2 +; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; X32-NEXT: retl ; ; X64-LABEL: i32_shuf_X00A: ; X64: ## BB#0: -; X64-NEXT: xorps %xmm2, %xmm2 -; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; X64-NEXT: pxor %xmm2, %xmm2 +; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; X64-NEXT: retq %vecext = extractelement <4 x i32> %x, i32 0 @@ -717,16 +717,16 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) { define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) { ; X32-LABEL: i32_shuf_X00X: ; X32: ## BB#0: -; X32-NEXT: xorps %xmm1, %xmm1 -; X32-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] +; X32-NEXT: pxor %xmm1, %xmm1 +; X32-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0] ; X32-NEXT: movaps %xmm1, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: i32_shuf_X00X: ; X64: ## BB#0: -; X64-NEXT: xorps %xmm1, %xmm1 -; X64-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] +; X64-NEXT: pxor %xmm1, %xmm1 +; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0] ; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll index 38fb3fb86a3..1b85b4e7670 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll @@ -681,14 +681,14 @@ define <4 x i32> @shuffle_v4i32_4zzz(<4 x i32> %a) { ; ; SSE41-LABEL: shuffle_v4i32_4zzz: ; SSE41: # BB#0: -; SSE41-NEXT: xorps %xmm1, %xmm1 -; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; SSE41-NEXT: retq ; ; AVX-LABEL: shuffle_v4i32_4zzz: ; AVX: # BB#0: -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; AVX-NEXT: retq %shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ret <4 x i32> %shuffle @@ -718,15 +718,15 @@ define <4 x i32> @shuffle_v4i32_z4zz(<4 x i32> %a) { ; ; SSE41-LABEL: shuffle_v4i32_z4zz: ; SSE41: # BB#0: -; SSE41-NEXT: xorps %xmm1, %xmm1 -; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1] ; SSE41-NEXT: retq ; ; AVX-LABEL: shuffle_v4i32_z4zz: ; AVX: # BB#0: -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1] ; AVX-NEXT: retq %shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 2, i32 4, i32 3, i32 0> @@ -757,15 +757,15 @@ define <4 x i32> @shuffle_v4i32_zz4z(<4 x i32> %a) { ; ; SSE41-LABEL: shuffle_v4i32_zz4z: ; SSE41: # BB#0: -; SSE41-NEXT: xorps %xmm1, %xmm1 -; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1] ; SSE41-NEXT: retq ; ; AVX-LABEL: shuffle_v4i32_zz4z: ; AVX: # BB#0: -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,0,1] ; AVX-NEXT: retq %shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 0, i32 4, i32 0> |

