summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp55
-rw-r--r--llvm/test/CodeGen/X86/combine-sra.ll56
-rw-r--r--llvm/test/CodeGen/X86/combine-srl.ll28
-rw-r--r--llvm/test/CodeGen/X86/combine-udiv.ll84
-rw-r--r--llvm/test/CodeGen/X86/combine-urem.ll28
-rw-r--r--llvm/test/CodeGen/X86/vector-rotate-128.ll266
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-ashr-128.ll120
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-lshr-128.ll120
8 files changed, 356 insertions, 401 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 778909be90f..b48780dbed9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -23453,22 +23453,45 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
break;
}
// The SSE2 shifts use the lower i64 as the same shift amount for
- // all lanes and the upper i64 is ignored. These shuffle masks
- // optimally zero-extend each lanes on SSE2/SSE41/AVX targets.
- SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
- Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
- Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
- Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
- Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
- }
-
- SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
- SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
- SDValue R2 = DAG.getNode(Opc, dl, VT, R, Amt2);
- SDValue R3 = DAG.getNode(Opc, dl, VT, R, Amt3);
- SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
- SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
- return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
+ // all lanes and the upper i64 is ignored. On AVX we're better off
+ // just zero-extending, but for SSE just duplicating the top 16-bits is
+ // cheaper and has the same effect for out of range values.
+ if (Subtarget.hasAVX()) {
+ SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
+ Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
+ Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
+ Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
+ Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
+ } else {
+ SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
+ SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
+ {4, 5, 6, 7, -1, -1, -1, -1});
+ Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
+ {0, 1, 1, 1, -1, -1, -1, -1});
+ Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
+ {2, 3, 3, 3, -1, -1, -1, -1});
+ Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
+ {0, 1, 1, 1, -1, -1, -1, -1});
+ Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
+ {2, 3, 3, 3, -1, -1, -1, -1});
+ }
+ }
+
+ SDValue R0 = DAG.getNode(Opc, dl, VT, R, DAG.getBitcast(VT, Amt0));
+ SDValue R1 = DAG.getNode(Opc, dl, VT, R, DAG.getBitcast(VT, Amt1));
+ SDValue R2 = DAG.getNode(Opc, dl, VT, R, DAG.getBitcast(VT, Amt2));
+ SDValue R3 = DAG.getNode(Opc, dl, VT, R, DAG.getBitcast(VT, Amt3));
+
+ // Merge the shifted lane results optimally with/without PBLENDW.
+ // TODO - ideally shuffle combining would handle this.
+ if (Subtarget.hasSSE41()) {
+ SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
+ SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
+ return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
+ }
+ SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
+ SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
+ return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
}
// It's worth extending once and using the vXi16/vXi32 shifts for smaller
diff --git a/llvm/test/CodeGen/X86/combine-sra.ll b/llvm/test/CodeGen/X86/combine-sra.ll
index a8226e435ef..7231da5134d 100644
--- a/llvm/test/CodeGen/X86/combine-sra.ll
+++ b/llvm/test/CodeGen/X86/combine-sra.ll
@@ -150,23 +150,21 @@ define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrad %xmm2, %xmm3
-; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrad %xmm2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrad %xmm1, %xmm2
-; SSE-NEXT: psrad %xmm3, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: psrad %xmm4, %xmm5
+; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrad %xmm1, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE-NEXT: psrad %xmm1, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
; SSE-NEXT: retq
;
; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_and:
@@ -275,23 +273,21 @@ define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_ashr_positive:
; SSE: # %bb.0:
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrld %xmm2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrld %xmm1, %xmm2
-; SSE-NEXT: psrld %xmm3, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: psrld %xmm4, %xmm5
+; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrld %xmm1, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE-NEXT: psrld %xmm1, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_positive:
diff --git a/llvm/test/CodeGen/X86/combine-srl.ll b/llvm/test/CodeGen/X86/combine-srl.ll
index 2c99e941f5e..9bd0be073f6 100644
--- a/llvm/test/CodeGen/X86/combine-srl.ll
+++ b/llvm/test/CodeGen/X86/combine-srl.ll
@@ -425,23 +425,21 @@ define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
-; SSE-NEXT: movaps %xmm1, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrld %xmm2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrld %xmm1, %xmm2
-; SSE-NEXT: psrld %xmm3, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: psrld %xmm4, %xmm5
+; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrld %xmm1, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE-NEXT: psrld %xmm1, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
; SSE-NEXT: retq
;
; AVX2-SLOW-LABEL: combine_vec_lshr_trunc_and:
diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll
index 6b60065debd..3b9b6617a74 100644
--- a/llvm/test/CodeGen/X86/combine-udiv.ll
+++ b/llvm/test/CodeGen/X86/combine-udiv.ll
@@ -130,23 +130,21 @@ define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) {
define <4 x i32> @combine_vec_udiv_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_udiv_by_pow2c:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrld %xmm2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrld %xmm1, %xmm2
-; SSE-NEXT: psrld %xmm3, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: psrld %xmm4, %xmm5
+; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrld %xmm1, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE-NEXT: psrld %xmm1, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_udiv_by_pow2c:
@@ -179,23 +177,21 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_udiv_by_shl_pow2a:
; SSE: # %bb.0:
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrld %xmm2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrld %xmm1, %xmm2
-; SSE-NEXT: psrld %xmm3, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: psrld %xmm4, %xmm5
+; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrld %xmm1, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE-NEXT: psrld %xmm1, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_udiv_by_shl_pow2a:
@@ -230,23 +226,21 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_udiv_by_shl_pow2b:
; SSE: # %bb.0:
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrld %xmm2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrld %xmm1, %xmm2
-; SSE-NEXT: psrld %xmm3, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: psrld %xmm4, %xmm5
+; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrld %xmm1, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE-NEXT: psrld %xmm1, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_vec_udiv_by_shl_pow2b:
diff --git a/llvm/test/CodeGen/X86/combine-urem.ll b/llvm/test/CodeGen/X86/combine-urem.ll
index c698e8b7236..91b853979f9 100644
--- a/llvm/test/CodeGen/X86/combine-urem.ll
+++ b/llvm/test/CodeGen/X86/combine-urem.ll
@@ -159,24 +159,22 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_urem_by_pow2d:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psrld %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: psrld %xmm2, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: psrld %xmm1, %xmm2
-; SSE-NEXT: psrld %xmm4, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3],xmm3[4,5],xmm5[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[2,3,3,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: psrld %xmm5, %xmm6
+; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: psrld %xmm1, %xmm4
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE-NEXT: psrld %xmm1, %xmm3
+; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5],xmm6[6,7]
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
; SSE-NEXT: paddd %xmm3, %xmm1
; SSE-NEXT: pand %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll
index 7ea470dfea3..166b75f3310 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-128.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll
@@ -129,32 +129,28 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: psrlq $32, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psrld %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrld %xmm1, %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld %xmm5, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: psrld %xmm3, %xmm5
-; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,3,2,3]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: psrld %xmm5, %xmm6
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
; SSE2-NEXT: psrld %xmm2, %xmm0
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
+; SSE2-NEXT: orps %xmm4, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_rotate_v4i32:
@@ -165,23 +161,21 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: psrld %xmm3, %xmm4
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrlq $32, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: psrld %xmm3, %xmm5
-; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; SSE41-NEXT: pxor %xmm3, %xmm3
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrld %xmm2, %xmm3
-; SSE41-NEXT: psrld %xmm4, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm6
+; SSE41-NEXT: psrld %xmm5, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psrld %xmm2, %xmm4
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: psrld %xmm2, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -245,32 +239,28 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; X32-SSE-NEXT: pmuludq %xmm0, %xmm1
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,2,3]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X32-SSE-NEXT: pmuludq %xmm3, %xmm1
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; X32-SSE-NEXT: pmuludq %xmm3, %xmm4
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: psrlq $32, %xmm3
-; X32-SSE-NEXT: movdqa %xmm0, %xmm4
-; X32-SSE-NEXT: psrld %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm3
+; X32-SSE-NEXT: psrld %xmm1, %xmm3
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[0,1,1,1,4,5,6,7]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm1
+; X32-SSE-NEXT: psrld %xmm5, %xmm1
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm0, %xmm5
; X32-SSE-NEXT: psrld %xmm3, %xmm5
-; X32-SSE-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,3,2,3]
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: movdqa %xmm2, %xmm5
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm6
-; X32-SSE-NEXT: psrld %xmm5, %xmm6
-; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
; X32-SSE-NEXT: psrld %xmm2, %xmm0
-; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-SSE-NEXT: por %xmm1, %xmm0
+; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1]
+; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
+; X32-SSE-NEXT: orps %xmm4, %xmm1
+; X32-SSE-NEXT: movaps %xmm1, %xmm0
; X32-SSE-NEXT: retl
%b32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %b
%shl = shl <4 x i32> %a, %b
@@ -910,35 +900,30 @@ define <2 x i64> @splatvar_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: splatvar_rotate_v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: xorps %xmm3, %xmm3
-; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32,32,32,32]
-; SSE2-NEXT: psubd %xmm1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32]
+; SSE2-NEXT: psubd %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pslld %xmm2, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,3,3,3,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrld %xmm1, %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[0,1,1,1,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pslld %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: psrlq $32, %xmm3
+; SSE2-NEXT: psrld %xmm5, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: psrld %xmm3, %xmm5
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: psrld %xmm3, %xmm6
-; SSE2-NEXT: movsd {{.*#+}} xmm6 = xmm5[0],xmm6[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,3,2,3]
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: psrld %xmm5, %xmm6
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE2-NEXT: psrld %xmm4, %xmm0
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: psrld %xmm2, %xmm0
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
+; SSE2-NEXT: orps %xmm4, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_rotate_v4i32:
@@ -949,23 +934,21 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE41-NEXT: psubd %xmm1, %xmm3
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pslld %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: psrld %xmm2, %xmm4
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: psrlq $32, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: psrld %xmm2, %xmm5
-; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psrld %xmm3, %xmm2
-; SSE41-NEXT: psrld %xmm4, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm6
+; SSE41-NEXT: psrld %xmm5, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psrld %xmm3, %xmm4
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: psrld %xmm2, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -1031,35 +1014,30 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; X32-SSE-LABEL: splatvar_rotate_v4i32:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: xorps %xmm3, %xmm3
-; X32-SSE-NEXT: movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
+; X32-SSE-NEXT: xorps %xmm2, %xmm2
+; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [32,32,32,32]
-; X32-SSE-NEXT: psubd %xmm1, %xmm4
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32]
+; X32-SSE-NEXT: psubd %xmm1, %xmm3
+; X32-SSE-NEXT: movdqa %xmm0, %xmm4
+; X32-SSE-NEXT: pslld %xmm2, %xmm4
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,3,3,3,4,5,6,7]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm2
+; X32-SSE-NEXT: psrld %xmm1, %xmm2
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[0,1,1,1,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: pslld %xmm3, %xmm1
-; X32-SSE-NEXT: movdqa %xmm4, %xmm3
-; X32-SSE-NEXT: psrlq $32, %xmm3
+; X32-SSE-NEXT: psrld %xmm5, %xmm1
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm0, %xmm5
; X32-SSE-NEXT: psrld %xmm3, %xmm5
-; X32-SSE-NEXT: movdqa %xmm4, %xmm3
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: movdqa %xmm0, %xmm6
-; X32-SSE-NEXT: psrld %xmm3, %xmm6
-; X32-SSE-NEXT: movsd {{.*#+}} xmm6 = xmm5[0],xmm6[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,3,2,3]
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm6
-; X32-SSE-NEXT: psrld %xmm5, %xmm6
-; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; X32-SSE-NEXT: psrld %xmm4, %xmm0
-; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-SSE-NEXT: por %xmm0, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
+; X32-SSE-NEXT: psrld %xmm2, %xmm0
+; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1]
+; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
+; X32-SSE-NEXT: orps %xmm4, %xmm1
+; X32-SSE-NEXT: movaps %xmm1, %xmm0
; X32-SSE-NEXT: retl
%splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
%splat32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %splat
@@ -1673,18 +1651,16 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $27, %xmm1
+; SSE2-NEXT: psrld $25, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psrld $25, %xmm3
-; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psrld $28, %xmm3
-; SSE2-NEXT: psrld $26, %xmm0
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: psrld $26, %xmm3
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $27, %xmm1
+; SSE2-NEXT: psrld $28, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[0,3]
+; SSE2-NEXT: orps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_rotate_v4i32:
@@ -1755,18 +1731,16 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; X32-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $27, %xmm1
+; X32-SSE-NEXT: psrld $25, %xmm1
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
-; X32-SSE-NEXT: psrld $25, %xmm3
-; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = xmm1[0],xmm3[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm3
-; X32-SSE-NEXT: psrld $28, %xmm3
-; X32-SSE-NEXT: psrld $26, %xmm0
-; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE-NEXT: por %xmm2, %xmm0
+; X32-SSE-NEXT: psrld $26, %xmm3
+; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm1
+; X32-SSE-NEXT: psrld $27, %xmm1
+; X32-SSE-NEXT: psrld $28, %xmm0
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[0,3]
+; X32-SSE-NEXT: orps %xmm2, %xmm0
; X32-SSE-NEXT: retl
%shl = shl <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7>
%lshr = lshr <4 x i32> %a, <i32 28, i32 27, i32 26, i32 25>
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
index 82a42c94a84..a46d043219c 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -116,47 +116,41 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: var_shift_v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrlq $32, %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psrad %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad %xmm4, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psrad %xmm2, %xmm4
-; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psrad %xmm4, %xmm5
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: psrad %xmm3, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
; SSE2-NEXT: psrad %xmm1, %xmm0
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
+; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: psrad %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlq $32, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: psrad %xmm2, %xmm4
-; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psrad %xmm1, %xmm2
-; SSE41-NEXT: psrad %xmm3, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: psrad %xmm4, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrad %xmm1, %xmm3
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: psrad %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v4i32:
@@ -204,26 +198,22 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; X32-SSE-LABEL: var_shift_v4i32:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psrlq $32, %xmm2
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
; X32-SSE-NEXT: psrad %xmm2, %xmm3
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm2
+; X32-SSE-NEXT: psrad %xmm4, %xmm2
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm0, %xmm4
-; X32-SSE-NEXT: psrad %xmm2, %xmm4
-; X32-SSE-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3]
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: movdqa %xmm1, %xmm4
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: psrad %xmm4, %xmm5
-; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X32-SSE-NEXT: psrad %xmm3, %xmm4
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
; X32-SSE-NEXT: psrad %xmm1, %xmm0
-; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
+; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
+; X32-SSE-NEXT: movaps %xmm2, %xmm0
; X32-SSE-NEXT: retl
%shift = ashr <4 x i32> %a, %b
ret <4 x i32> %shift
@@ -1161,17 +1151,15 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: constant_shift_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrad $5, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrad $7, %xmm2
-; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $7, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrad $4, %xmm2
-; SSE2-NEXT: psrad $6, %xmm0
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: psrad $6, %xmm2
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $5, %xmm1
+; SSE2-NEXT: psrad $4, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v4i32:
@@ -1227,17 +1215,15 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; X32-SSE-LABEL: constant_shift_v4i32:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrad $5, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: psrad $7, %xmm2
-; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; X32-SSE-NEXT: psrad $7, %xmm1
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: psrad $4, %xmm2
-; X32-SSE-NEXT: psrad $6, %xmm0
-; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT: psrad $6, %xmm2
+; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm1
+; X32-SSE-NEXT: psrad $5, %xmm1
+; X32-SSE-NEXT: psrad $4, %xmm0
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
; X32-SSE-NEXT: retl
%shift = ashr <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7>
ret <4 x i32> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
index 8d1760e1b3f..d4a9ec10580 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -86,47 +86,41 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: var_shift_v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrlq $32, %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: psrld %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrld %xmm4, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psrld %xmm2, %xmm4
-; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psrld %xmm4, %xmm5
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: psrld %xmm3, %xmm4
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
; SSE2-NEXT: psrld %xmm1, %xmm0
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
+; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE41-NEXT: movdqa %xmm0, %xmm3
; SSE41-NEXT: psrld %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlq $32, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: psrld %xmm2, %xmm4
-; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psrld %xmm1, %xmm2
-; SSE41-NEXT: psrld %xmm3, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: psrld %xmm4, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrld %xmm1, %xmm3
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
+; SSE41-NEXT: psrld %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_shift_v4i32:
@@ -174,26 +168,22 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; X32-SSE-LABEL: var_shift_v4i32:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psrlq $32, %xmm2
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm0, %xmm3
; X32-SSE-NEXT: psrld %xmm2, %xmm3
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm2
+; X32-SSE-NEXT: psrld %xmm4, %xmm2
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
; X32-SSE-NEXT: movdqa %xmm0, %xmm4
-; X32-SSE-NEXT: psrld %xmm2, %xmm4
-; X32-SSE-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,3,2,3]
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: movdqa %xmm1, %xmm4
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: psrld %xmm4, %xmm5
-; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X32-SSE-NEXT: psrld %xmm3, %xmm4
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
; X32-SSE-NEXT: psrld %xmm1, %xmm0
-; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
+; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
+; X32-SSE-NEXT: movaps %xmm2, %xmm0
; X32-SSE-NEXT: retl
%shift = lshr <4 x i32> %a, %b
ret <4 x i32> %shift
@@ -921,17 +911,15 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; SSE2-LABEL: constant_shift_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $5, %xmm1
+; SSE2-NEXT: psrld $7, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrld $7, %xmm2
-; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrld $4, %xmm2
-; SSE2-NEXT: psrld $6, %xmm0
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: psrld $6, %xmm2
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $5, %xmm1
+; SSE2-NEXT: psrld $4, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_shift_v4i32:
@@ -987,17 +975,15 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; X32-SSE-LABEL: constant_shift_v4i32:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $5, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: psrld $7, %xmm2
-; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; X32-SSE-NEXT: psrld $7, %xmm1
; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: psrld $4, %xmm2
-; X32-SSE-NEXT: psrld $6, %xmm0
-; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT: psrld $6, %xmm2
+; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm1
+; X32-SSE-NEXT: psrld $5, %xmm1
+; X32-SSE-NEXT: psrld $4, %xmm0
+; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
; X32-SSE-NEXT: retl
%shift = lshr <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7>
ret <4 x i32> %shift
OpenPOWER on IntegriCloud