summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2014-10-03 21:38:49 +0000
committerChandler Carruth <chandlerc@gmail.com>2014-10-03 21:38:49 +0000
commit0adda1e4d47a836275fb0baabf98c402759f385f (patch)
tree3ecf8726f61e3db0c845edfa0005d0031afbfbb3 /llvm/test/CodeGen
parent0aca4b1aa0c4e912e1c01ce07e00f5647d1fc30e (diff)
downloadbcm5719-llvm-0adda1e4d47a836275fb0baabf98c402759f385f.tar.gz
bcm5719-llvm-0adda1e4d47a836275fb0baabf98c402759f385f.zip
[x86] Adjust the patterns for lowering X86vzmovl nodes which don't
perform a load to use blendps rather than movss when it is available. For non-loads, blendps is *much* faster. It can execute on two ports in Sandy Bridge and Ivy Bridge, and *three* ports on Haswell. This fixes one of the "regressions" from aggressively taking the "insertion" path in the new vector shuffle lowering. This does highlight one problem with blendps -- it isn't commuted as heavily as it should be. That's future work though. llvm-svn: 219022
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/combine-or.ll4
-rw-r--r--llvm/test/CodeGen/X86/sse41.ll24
-rw-r--r--llvm/test/CodeGen/X86/vec_set-3.ll2
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll208
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll8
5 files changed, 186 insertions, 60 deletions
diff --git a/llvm/test/CodeGen/X86/combine-or.ll b/llvm/test/CodeGen/X86/combine-or.ll
index 42a50b65905..c1f6c79e81a 100644
--- a/llvm/test/CodeGen/X86/combine-or.ll
+++ b/llvm/test/CodeGen/X86/combine-or.ll
@@ -228,9 +228,9 @@ define <4 x i32> @test18(<4 x i32> %a, <4 x i32> %b) {
; CHECK: # BB#0:
; CHECK-NEXT: xorps %xmm2, %xmm2
; CHECK-NEXT: xorps %xmm3, %xmm3
-; CHECK-NEXT: movss %xmm0, %xmm3
+; CHECK-NEXT: blendps $1, %xmm0, %xmm3
; CHECK-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[0,0]
-; CHECK-NEXT: movss %xmm1, %xmm2
+; CHECK-NEXT: blendps $1, %xmm1, %xmm2
; CHECK-NEXT: orps %xmm3, %xmm2
; CHECK-NEXT: movaps %xmm2, %xmm0
; CHECK-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/sse41.ll b/llvm/test/CodeGen/X86/sse41.ll
index 3be2520ac00..c8e509ce69e 100644
--- a/llvm/test/CodeGen/X86/sse41.ll
+++ b/llvm/test/CodeGen/X86/sse41.ll
@@ -522,7 +522,7 @@ define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X00A:
; X32: ## BB#0:
; X32-NEXT: xorps %xmm2, %xmm2
-; X32-NEXT: movss %xmm0, %xmm2
+; X32-NEXT: blendps $1, %xmm0, %xmm2
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
; X32-NEXT: movaps %xmm2, %xmm0
; X32-NEXT: retl
@@ -530,7 +530,7 @@ define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
; X64-LABEL: shuf_X00A:
; X64: ## BB#0:
; X64-NEXT: xorps %xmm2, %xmm2
-; X64-NEXT: movss %xmm0, %xmm2
+; X64-NEXT: blendps $1, %xmm0, %xmm2
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
; X64-NEXT: movaps %xmm2, %xmm0
; X64-NEXT: retq
@@ -546,7 +546,7 @@ define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X00X:
; X32: ## BB#0:
; X32-NEXT: xorps %xmm1, %xmm1
-; X32-NEXT: movss %xmm0, %xmm1
+; X32-NEXT: blendps $1, %xmm0, %xmm1
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
@@ -554,7 +554,7 @@ define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
; X64-LABEL: shuf_X00X:
; X64: ## BB#0:
; X64-NEXT: xorps %xmm1, %xmm1
-; X64-NEXT: movss %xmm0, %xmm1
+; X64-NEXT: blendps $1, %xmm0, %xmm1
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -570,7 +570,7 @@ define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
; X32-LABEL: shuf_X0YC:
; X32: ## BB#0:
; X32-NEXT: xorps %xmm2, %xmm2
-; X32-NEXT: movss %xmm0, %xmm2
+; X32-NEXT: blendps $1, %xmm0, %xmm2
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
; X32-NEXT: movaps %xmm2, %xmm0
@@ -579,7 +579,7 @@ define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
; X64-LABEL: shuf_X0YC:
; X64: ## BB#0:
; X64-NEXT: xorps %xmm2, %xmm2
-; X64-NEXT: movss %xmm0, %xmm2
+; X64-NEXT: blendps $1, %xmm0, %xmm2
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
; X64-NEXT: movaps %xmm2, %xmm0
@@ -692,7 +692,7 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X00A:
; X32: ## BB#0:
; X32-NEXT: xorps %xmm2, %xmm2
-; X32-NEXT: movss %xmm0, %xmm2
+; X32-NEXT: blendps $1, %xmm0, %xmm2
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
; X32-NEXT: movaps %xmm2, %xmm0
; X32-NEXT: retl
@@ -700,7 +700,7 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
; X64-LABEL: i32_shuf_X00A:
; X64: ## BB#0:
; X64-NEXT: xorps %xmm2, %xmm2
-; X64-NEXT: movss %xmm0, %xmm2
+; X64-NEXT: blendps $1, %xmm0, %xmm2
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
; X64-NEXT: movaps %xmm2, %xmm0
; X64-NEXT: retq
@@ -716,7 +716,7 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X00X:
; X32: ## BB#0:
; X32-NEXT: xorps %xmm1, %xmm1
-; X32-NEXT: movss %xmm0, %xmm1
+; X32-NEXT: blendps $1, %xmm0, %xmm1
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; X32-NEXT: movaps %xmm1, %xmm0
; X32-NEXT: retl
@@ -724,7 +724,7 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
; X64-LABEL: i32_shuf_X00X:
; X64: ## BB#0:
; X64-NEXT: xorps %xmm1, %xmm1
-; X64-NEXT: movss %xmm0, %xmm1
+; X64-NEXT: blendps $1, %xmm0, %xmm1
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: retq
@@ -740,7 +740,7 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
; X32-LABEL: i32_shuf_X0YC:
; X32: ## BB#0:
; X32-NEXT: xorps %xmm2, %xmm2
-; X32-NEXT: movss %xmm0, %xmm2
+; X32-NEXT: blendps $1, %xmm0, %xmm2
; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
; X32-NEXT: movaps %xmm2, %xmm0
@@ -749,7 +749,7 @@ define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
; X64-LABEL: i32_shuf_X0YC:
; X64: ## BB#0:
; X64-NEXT: xorps %xmm2, %xmm2
-; X64-NEXT: movss %xmm0, %xmm2
+; X64-NEXT: blendps $1, %xmm0, %xmm2
; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1,0]
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
; X64-NEXT: movaps %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/vec_set-3.ll b/llvm/test/CodeGen/X86/vec_set-3.ll
index 043cf96a671..b38b8bfb81f 100644
--- a/llvm/test/CodeGen/X86/vec_set-3.ll
+++ b/llvm/test/CodeGen/X86/vec_set-3.ll
@@ -39,7 +39,7 @@ entry:
define <4 x float> @test3(<4 x float> %A) {
; CHECK-LABEL: test3:
; CHECK: xorps %[[X1:xmm[0-9]+]], %[[X1]]
-; CHECK-NEXT: movss %xmm0, %[[X1]]
+; CHECK-NEXT: blendps $1, %xmm0, %[[X1]]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = [[X1]][1,0,1,1]
; CHECK-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
index f6ba5db85f3..019988b3762 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -438,17 +438,38 @@ define <4 x i32> @shuffle_v4i32_4015(<4 x i32> %a, <4 x i32> %b) {
}
define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
-; SSE-LABEL: shuffle_v4f32_4zzz:
-; SSE: # BB#0:
-; SSE-NEXT: xorps %xmm1, %xmm1
-; SSE-NEXT: movss %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4f32_4zzz:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4f32_4zzz:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4f32_4zzz:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4f32_4zzz:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: movaps %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4f32_4zzz:
; AVX: # BB#0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
ret <4 x float> %shuffle
@@ -639,34 +660,76 @@ define <4 x float> @shuffle_v4f32_z6zz(<4 x float> %a) {
}
define <4 x i32> @shuffle_v4i32_4zzz(<4 x i32> %a) {
-; SSE-LABEL: shuffle_v4i32_4zzz:
-; SSE: # BB#0:
-; SSE-NEXT: xorps %xmm1, %xmm1
-; SSE-NEXT: movss %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_4zzz:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4i32_4zzz:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_4zzz:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_4zzz:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: movaps %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_4zzz:
; AVX: # BB#0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
ret <4 x i32> %shuffle
}
define <4 x i32> @shuffle_v4i32_z4zz(<4 x i32> %a) {
-; SSE-LABEL: shuffle_v4i32_z4zz:
-; SSE: # BB#0:
-; SSE-NEXT: xorps %xmm1, %xmm1
-; SSE-NEXT: movss %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_z4zz:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4i32_z4zz:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_z4zz:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_z4zz:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,1,1]
+; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_z4zz:
; AVX: # BB#0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 2, i32 4, i32 3, i32 0>
@@ -674,17 +737,38 @@ define <4 x i32> @shuffle_v4i32_z4zz(<4 x i32> %a) {
}
define <4 x i32> @shuffle_v4i32_zz4z(<4 x i32> %a) {
-; SSE-LABEL: shuffle_v4i32_zz4z:
-; SSE: # BB#0:
-; SSE-NEXT: xorps %xmm1, %xmm1
-; SSE-NEXT: movss %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_zz4z:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4i32_zz4z:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_zz4z:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_zz4z:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,1]
+; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_zz4z:
; AVX: # BB#0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,0,1]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 0, i32 4, i32 0>
@@ -692,17 +776,38 @@ define <4 x i32> @shuffle_v4i32_zz4z(<4 x i32> %a) {
}
define <4 x i32> @shuffle_v4i32_zuu4(<4 x i32> %a) {
-; SSE-LABEL: shuffle_v4i32_zuu4:
-; SSE: # BB#0:
-; SSE-NEXT: xorps %xmm1, %xmm1
-; SSE-NEXT: movss %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
-; SSE-NEXT: retq
+; SSE2-LABEL: shuffle_v4i32_zuu4:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_v4i32_zuu4:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_v4i32_zuu4:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_v4i32_zuu4:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,0]
+; SSE41-NEXT: retq
;
; AVX-LABEL: shuffle_v4i32_zuu4:
; AVX: # BB#0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,0]
; AVX-NEXT: retq
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 0, i32 undef, i32 undef, i32 4>
@@ -1031,12 +1136,33 @@ define <4 x i32> @insert_mem_and_zero_v4i32(i32* %ptr) {
}
define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
-; SSE-LABEL: insert_reg_and_zero_v4f32:
-; SSE: # BB#0:
-; SSE-NEXT: xorps %xmm1, %xmm1
-; SSE-NEXT: movss %xmm0, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: insert_reg_and_zero_v4f32:
+; SSE2: # BB#0:
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movss %xmm0, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: insert_reg_and_zero_v4f32:
+; SSE3: # BB#0:
+; SSE3-NEXT: xorps %xmm1, %xmm1
+; SSE3-NEXT: movss %xmm0, %xmm1
+; SSE3-NEXT: movaps %xmm1, %xmm0
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: insert_reg_and_zero_v4f32:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: movss %xmm0, %xmm1
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: insert_reg_and_zero_v4f32:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE41-NEXT: movaps %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: insert_reg_and_zero_v4f32:
; AVX: # BB#0:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 595447775b5..32ee62fa985 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -678,8 +678,8 @@ define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) {
; AVX1-LABEL: insert_reg_and_zero_v4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vmovq %rdi, %xmm0
-; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vmovsd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_reg_and_zero_v4i64:
@@ -697,8 +697,8 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
; AVX1-LABEL: insert_mem_and_zero_v4i64:
; AVX1: # BB#0:
; AVX1-NEXT: vmovq (%rdi), %xmm0
-; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vmovsd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_mem_and_zero_v4i64:
OpenPOWER on IntegriCloud