summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-03-25 21:16:33 +0000
committerSanjay Patel <spatel@rotateright.com>2018-03-25 21:16:33 +0000
commit93e64dd9a1e2ed5e31881994cdde5e6ef94ddc79 (patch)
tree5867bafd5f56667416d68c5575768f8f61f58c67 /llvm/lib
parent68a8fbc1021cd612bb69bb6d50bcb9763ad00f00 (diff)
downloadbcm5719-llvm-93e64dd9a1e2ed5e31881994cdde5e6ef94ddc79.tar.gz
bcm5719-llvm-93e64dd9a1e2ed5e31881994cdde5e6ef94ddc79.zip
[PatternMatch] allow undef elements when matching vector FP +0.0
This continues the FP constant pattern matching improvements from: https://reviews.llvm.org/rL327627 https://reviews.llvm.org/rL327339 https://reviews.llvm.org/rL327307 Several integer constant matchers also have this ability. I'm separating matching of integer/pointer null from FP positive zero and renaming/commenting to make the functionality clearer. llvm-svn: 328461
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp19
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp8
7 files changed, 22 insertions, 21 deletions
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 149a1967164..fb0650a5cba 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -4196,11 +4196,11 @@ static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
return C;
// fadd X, -0 ==> X
- if (match(Op1, m_NegZero()))
+ if (match(Op1, m_NegZeroFP()))
return Op0;
// fadd X, 0 ==> X, when we know X is not -0
- if (match(Op1, m_Zero()) &&
+ if (match(Op1, m_PosZeroFP()) &&
(FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
return Op0;
@@ -4228,18 +4228,19 @@ static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
if (Constant *C = simplifyFPBinop(Op0, Op1))
return C;
- // fsub X, 0 ==> X
- if (match(Op1, m_Zero()))
+ // fsub X, +0 ==> X
+ if (match(Op1, m_PosZeroFP()))
return Op0;
// fsub X, -0 ==> X, when we know X is not -0
- if (match(Op1, m_NegZero()) &&
+ if (match(Op1, m_NegZeroFP()) &&
(FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
return Op0;
// fsub -0.0, (fsub -0.0, X) ==> X
Value *X;
- if (match(Op0, m_NegZero()) && match(Op1, m_FSub(m_NegZero(), m_Value(X))))
+ if (match(Op0, m_NegZeroFP()) &&
+ match(Op1, m_FSub(m_NegZeroFP(), m_Value(X))))
return X;
// fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
@@ -4358,11 +4359,11 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
// The constant match may include undef elements in a vector, so return a full
// zero constant as the result.
if (FMF.noNaNs()) {
- // 0 % X -> 0
- if (match(Op0, m_Zero()))
+ // +0 % X -> 0
+ if (match(Op0, m_PosZeroFP()))
return ConstantFP::getNullValue(Op0->getType());
// -0 % X -> -0
- if (match(Op0, m_NegZero()))
+ if (match(Op0, m_NegZeroFP()))
return ConstantFP::getNegativeZero(Op0->getType());
}
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 2ec9cc11493..1a41d0cb657 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -2687,7 +2687,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
return true;
// (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
- if (match(Op, m_FAdd(m_Value(), m_Zero())))
+ if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
return true;
// sitofp and uitofp turn into +0.0 for zero.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index b37b201ff25..1f04a8b3a3a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1700,7 +1700,7 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
// Subtraction from -0.0 is the canonical form of fneg.
// fsub nsz 0, X ==> fsub nsz -0.0, X
- if (I.getFastMathFlags().noSignedZeros() && match(Op0, m_Zero()))
+ if (I.getFastMathFlags().noSignedZeros() && match(Op0, m_PosZeroFP()))
return BinaryOperator::CreateFNegFMF(Op1, &I);
if (isa<Constant>(Op0))
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 16e8c4ff92c..04a85c7a3e7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1121,8 +1121,8 @@ Value *InstCombiner::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
return nullptr;
// FCmp canonicalization ensures that (fcmp ord/uno X, X) and
- // (fcmp ord/uno X, C) will be transformed to (fcmp X, 0.0).
- if (match(LHS1, m_Zero()) && LHS1 == RHS1)
+ // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
+ if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP()))
// Ignore the constants because they are obviously not NANs:
// (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
// (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index bdd1a58171e..f5400f978a4 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -2362,7 +2362,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Folding cmp(sub(a,b),0) -> cmp(a,b) and cmp(0,sub(a,b)) -> cmp(b,a)
Value *Arg0 = II->getArgOperand(0);
Value *Arg1 = II->getArgOperand(1);
- bool Arg0IsZero = match(Arg0, m_Zero());
+ bool Arg0IsZero = match(Arg0, m_PosZeroFP());
if (Arg0IsZero)
std::swap(Arg0, Arg1);
Value *A, *B;
@@ -2374,7 +2374,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// The compare intrinsic uses the above assumptions and therefore
// doesn't require additional flags.
if ((match(Arg0, m_OneUse(m_FSub(m_Value(A), m_Value(B)))) &&
- match(Arg1, m_Zero()) && isa<Instruction>(Arg0) &&
+ match(Arg1, m_PosZeroFP()) && isa<Instruction>(Arg0) &&
cast<Instruction>(Arg0)->getFastMathFlags().noInfs())) {
if (Arg0IsZero)
std::swap(A, B);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 46448aa7f8d..c0876a27594 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -4942,11 +4942,11 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
// If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
// then canonicalize the operand to 0.0.
if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
- if (!match(Op0, m_Zero()) && isKnownNeverNaN(Op0)) {
+ if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0)) {
I.setOperand(0, ConstantFP::getNullValue(Op0->getType()));
return &I;
}
- if (!match(Op1, m_Zero()) && isKnownNeverNaN(Op1)) {
+ if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1)) {
I.setOperand(1, ConstantFP::getNullValue(Op0->getType()));
return &I;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index ff682acd3fd..d6d046f0185 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -1579,10 +1579,10 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (match(FCI->getOperand(1), m_AnyZeroFP()) && FCI->hasNoNaNs()) {
// (X <= +/-0.0) ? (0.0 - X) : X --> fabs(X)
// (X > +/-0.0) ? X : (0.0 - X) --> fabs(X)
- if ((X == FalseVal && match(TrueVal, m_FSub(m_Zero(), m_Specific(X))) &&
- Pred == FCmpInst::FCMP_OLE) ||
- (X == TrueVal && match(FalseVal, m_FSub(m_Zero(), m_Specific(X))) &&
- Pred == FCmpInst::FCMP_OGT)) {
+ if ((X == FalseVal && Pred == FCmpInst::FCMP_OLE &&
+ match(TrueVal, m_FSub(m_PosZeroFP(), m_Specific(X)))) ||
+ (X == TrueVal && Pred == FCmpInst::FCMP_OGT &&
+ match(FalseVal, m_FSub(m_PosZeroFP(), m_Specific(X))))) {
Value *Fabs = Builder.CreateIntrinsic(Intrinsic::fabs, { X }, FCI);
return replaceInstUsesWith(SI, Fabs);
}
OpenPOWER on IntegriCloud