summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp9
-rw-r--r--llvm/test/CodeGen/X86/vector-compare-simplify.ll11
2 files changed, 20 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index ba956202481..5dd0b9ea189 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -18099,6 +18099,15 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
}
}
+ // If this is a SETNE against the signed minimum value, change it to SETGT.
+ // Otherwise we use PCMPEQ+invert.
+ APInt ConstValue;
+ if (Cond == ISD::SETNE &&
+ ISD::isConstantSplatVector(Op1.getNode(), ConstValue),
+ ConstValue.isMinSignedValue()) {
+ Cond = ISD::SETGT;
+ }
+
// If both operands are known non-negative, then an unsigned compare is the
// same as a signed compare and there's no need to flip signbits.
// TODO: We could check for more general simplifications here since we're
diff --git a/llvm/test/CodeGen/X86/vector-compare-simplify.ll b/llvm/test/CodeGen/X86/vector-compare-simplify.ll
index 718e69b9511..f1ac60134eb 100644
--- a/llvm/test/CodeGen/X86/vector-compare-simplify.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-simplify.ll
@@ -334,3 +334,14 @@ define <4 x i32> @uge_smin(<4 x i32> %x) {
ret <4 x i32> %r
}
+; Make sure we can efficiently handle ne smin by turning into sgt.
+define <4 x i32> @ne_smin(<4 x i32> %x) {
+; CHECK-LABEL: ne_smin:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-NEXT: retq
+ %cmp = icmp ne <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
+ %r = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %r
+}
+
OpenPOWER on IntegriCloud