summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp6
-rw-r--r--llvm/test/CodeGen/X86/combine-addo.ll36
-rw-r--r--llvm/test/CodeGen/X86/combine-mulo.ll24
-rw-r--r--llvm/test/CodeGen/X86/combine-subo.ll39
-rw-r--r--llvm/test/CodeGen/X86/nontemporal-loads.ll48
-rw-r--r--llvm/test/CodeGen/X86/vsel-cmp-load.ll8
6 files changed, 68 insertions, 93 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index fa18985e4f8..afb76014dff 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -34684,6 +34684,12 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
}
+ // Commute LHS/RHS if the Cond has been XOR'd.
+ // TODO: Move this to DAGCombine.
+ if (CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
+ isBitwiseNot(Cond))
+ return DAG.getNode(N->getOpcode(), DL, VT, Cond.getOperand(0), RHS, LHS);
+
// If we have SSE[12] support, try to form min/max nodes. SSE min/max
// instructions match the semantics of the common C idiom x<y?x:y but not
// x<=y?x:y, because of how they handle negative zero (which can be
diff --git a/llvm/test/CodeGen/X86/combine-addo.ll b/llvm/test/CodeGen/X86/combine-addo.ll
index 9b408eb8359..703521ad028 100644
--- a/llvm/test/CodeGen/X86/combine-addo.ll
+++ b/llvm/test/CodeGen/X86/combine-addo.ll
@@ -87,19 +87,15 @@ define <4 x i32> @combine_vec_uadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pmaxud %xmm0, %xmm0
; SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_uadd_zero:
; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm0, %xmm0, %xmm2
; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
@@ -134,26 +130,24 @@ define i32 @combine_uadd_not(i32 %a0, i32 %a1) {
define <4 x i32> @combine_vec_uadd_not(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_vec_uadd_not:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psubd %xmm3, %xmm2
-; SSE-NEXT: pmaxud %xmm2, %xmm0
-; SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psubd %xmm2, %xmm3
+; SSE-NEXT: pmaxud %xmm3, %xmm0
+; SSE-NEXT: pcmpeqd %xmm3, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm3, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_uadd_not:
; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsubd %xmm2, %xmm0, %xmm3
-; AVX-NEXT: vpmaxud %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vpcmpeqd %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm3, %xmm0
+; AVX-NEXT: vpsubd %xmm2, %xmm0, %xmm2
+; AVX-NEXT: vpmaxud %xmm0, %xmm2, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = xor <4 x i32> %a0, <i32 -1, i32 -1, i32 -1, i32 -1>
%2 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
diff --git a/llvm/test/CodeGen/X86/combine-mulo.ll b/llvm/test/CodeGen/X86/combine-mulo.ll
index 045e6595ed2..e34f5d84ff4 100644
--- a/llvm/test/CodeGen/X86/combine-mulo.ll
+++ b/llvm/test/CodeGen/X86/combine-mulo.ll
@@ -44,10 +44,8 @@ define <4 x i32> @combine_vec_smul_two(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pcmpeqd %xmm3, %xmm0
-; SSE-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_smul_two:
@@ -61,9 +59,7 @@ define <4 x i32> @combine_vec_smul_two(<4 x i32> %a0, <4 x i32> %a1) {
; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpsrad $31, %xmm0, %xmm3
; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> <i32 2, i32 2, i32 2, i32 2>)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
@@ -104,13 +100,11 @@ define <4 x i32> @combine_vec_umul_two(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: pmuludq %xmm2, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7]
-; SSE-NEXT: pxor %xmm4, %xmm4
-; SSE-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE-NEXT: pcmpeqd %xmm0, %xmm0
-; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: pcmpeqd %xmm3, %xmm0
; SSE-NEXT: paddd %xmm2, %xmm2
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_umul_two:
@@ -123,10 +117,8 @@ define <4 x i32> @combine_vec_umul_two(<4 x i32> %a0, <4 x i32> %a1) {
; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> <i32 2, i32 2, i32 2, i32 2>)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
diff --git a/llvm/test/CodeGen/X86/combine-subo.ll b/llvm/test/CodeGen/X86/combine-subo.ll
index 7673edb9ebb..15b01985bdb 100644
--- a/llvm/test/CodeGen/X86/combine-subo.ll
+++ b/llvm/test/CodeGen/X86/combine-subo.ll
@@ -90,19 +90,15 @@ define <4 x i32> @combine_vec_usub_zero(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pminud %xmm0, %xmm0
; SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_usub_zero:
; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm0, %xmm0, %xmm2
; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
@@ -175,10 +171,8 @@ define <4 x i32> @combine_vec_usub_self(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-NEXT: psubd %xmm0, %xmm2
; SSE-NEXT: pminud %xmm2, %xmm0
; SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_usub_self:
@@ -186,9 +180,7 @@ define <4 x i32> @combine_vec_usub_self(<4 x i32> %a0, <4 x i32> %a1) {
; AVX-NEXT: vpsubd %xmm0, %xmm0, %xmm2
; AVX-NEXT: vpminud %xmm0, %xmm2, %xmm0
; AVX-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpxor %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a0)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
@@ -221,24 +213,21 @@ define <4 x i32> @combine_vec_usub_negone(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_vec_usub_negone:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE-NEXT: pxor %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pminud %xmm3, %xmm0
+; SSE-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE-NEXT: pxor %xmm0, %xmm2
+; SSE-NEXT: pminud %xmm2, %xmm0
; SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_usub_negone:
; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpminud %xmm2, %xmm0, %xmm3
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpxor %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpminud %xmm2, %xmm0, %xmm2
+; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a0)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
diff --git a/llvm/test/CodeGen/X86/nontemporal-loads.ll b/llvm/test/CodeGen/X86/nontemporal-loads.ll
index 4b9e39e5731..8f0118d39bd 100644
--- a/llvm/test/CodeGen/X86/nontemporal-loads.ll
+++ b/llvm/test/CodeGen/X86/nontemporal-loads.ll
@@ -1821,29 +1821,30 @@ define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %m
;
; SSE41-LABEL: test_masked_v16i32:
; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm7, %xmm9
+; SSE41-NEXT: movdqa %xmm6, %xmm10
+; SSE41-NEXT: movdqa %xmm5, %xmm11
; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm9, %xmm9
-; SSE41-NEXT: pxor %xmm9, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT: pxor %xmm9, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT: pxor %xmm9, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movntdqa 48(%rdi), %xmm9
-; SSE41-NEXT: movntdqa 32(%rdi), %xmm10
-; SSE41-NEXT: movntdqa 16(%rdi), %xmm11
-; SSE41-NEXT: movntdqa (%rdi), %xmm4
-; SSE41-NEXT: blendvps %xmm0, %xmm4, %xmm8
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: blendvps %xmm0, %xmm11, %xmm1
-; SSE41-NEXT: movdqa %xmm6, %xmm0
-; SSE41-NEXT: blendvps %xmm0, %xmm10, %xmm2
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: blendvps %xmm0, %xmm9, %xmm3
-; SSE41-NEXT: movaps %xmm8, %xmm0
+; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
+; SSE41-NEXT: movntdqa 32(%rdi), %xmm7
+; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
+; SSE41-NEXT: movntdqa (%rdi), %xmm5
+; SSE41-NEXT: blendvps %xmm0, %xmm8, %xmm5
+; SSE41-NEXT: movdqa %xmm11, %xmm0
+; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm6
+; SSE41-NEXT: movdqa %xmm10, %xmm0
+; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm7
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: blendvps %xmm0, %xmm3, %xmm4
+; SSE41-NEXT: movaps %xmm5, %xmm0
+; SSE41-NEXT: movaps %xmm6, %xmm1
+; SSE41-NEXT: movaps %xmm7, %xmm2
+; SSE41-NEXT: movaps %xmm4, %xmm3
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_masked_v16i32:
@@ -1876,14 +1877,11 @@ define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %m
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX2-NEXT: vpcmpeqd %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
-; AVX2-NEXT: vpxor %ymm5, %ymm3, %ymm3
; AVX2-NEXT: vpcmpeqd %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpxor %ymm5, %ymm2, %ymm2
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm4
-; AVX2-NEXT: vblendvps %ymm3, %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vblendvps %ymm3, %ymm1, %ymm4, %ymm1
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
-; AVX2-NEXT: vblendvps %ymm2, %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_masked_v16i32:
diff --git a/llvm/test/CodeGen/X86/vsel-cmp-load.ll b/llvm/test/CodeGen/X86/vsel-cmp-load.ll
index cbbbd6300fc..43b975caf0a 100644
--- a/llvm/test/CodeGen/X86/vsel-cmp-load.ll
+++ b/llvm/test/CodeGen/X86/vsel-cmp-load.ll
@@ -58,9 +58,7 @@ define <4 x i64> @ne_zero(<4 x i16>* %p, <4 x i64> %x, <4 x i64> %y) {
; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ne_zero:
@@ -192,9 +190,7 @@ define <8 x float> @ne_zero_fp_select(<8 x i8>* %p, <8 x float> %x, <8 x float>
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: ne_zero_fp_select:
OpenPOWER on IntegriCloud