diff options
author | Sanjay Patel <spatel@rotateright.com> | 2017-11-06 16:27:15 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2017-11-06 16:27:15 +0000 |
commit | 629c41153876b18e4c35b82dd7d410e35d79ea56 (patch) | |
tree | 5648dbb96d419584982a77223f7466ae7be5caf0 /llvm/lib | |
parent | 0ed8d48c4ba133b12455ac9ed4db0f1b65e22663 (diff) | |
download | bcm5719-llvm-629c41153876b18e4c35b82dd7d410e35d79ea56.tar.gz bcm5719-llvm-629c41153876b18e4c35b82dd7d410e35d79ea56.zip |
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
Diffstat (limited to 'llvm/lib')
19 files changed, 99 insertions, 67 deletions
diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp index 50b391fdf73..b8b56d79c82 100644 --- a/llvm/lib/AsmParser/LLLexer.cpp +++ b/llvm/lib/AsmParser/LLLexer.cpp @@ -552,6 +552,8 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(nsz); KEYWORD(arcp); KEYWORD(contract); + KEYWORD(reassoc); + KEYWORD(afn); KEYWORD(fast); KEYWORD(nuw); KEYWORD(nsw); diff --git a/llvm/lib/AsmParser/LLParser.h b/llvm/lib/AsmParser/LLParser.h index dcc3f22e03b..94e4c1ae96d 100644 --- a/llvm/lib/AsmParser/LLParser.h +++ b/llvm/lib/AsmParser/LLParser.h @@ -193,7 +193,7 @@ namespace llvm { FastMathFlags FMF; while (true) switch (Lex.getKind()) { - case lltok::kw_fast: FMF.setUnsafeAlgebra(); Lex.Lex(); continue; + case lltok::kw_fast: FMF.setFast(); Lex.Lex(); continue; case lltok::kw_nnan: FMF.setNoNaNs(); Lex.Lex(); continue; case lltok::kw_ninf: FMF.setNoInfs(); Lex.Lex(); continue; case lltok::kw_nsz: FMF.setNoSignedZeros(); Lex.Lex(); continue; @@ -202,6 +202,8 @@ namespace llvm { FMF.setAllowContract(true); Lex.Lex(); continue; + case lltok::kw_reassoc: FMF.setAllowReassoc(); Lex.Lex(); continue; + case lltok::kw_afn: FMF.setApproxFunc(); Lex.Lex(); continue; default: return FMF; } return FMF; diff --git a/llvm/lib/AsmParser/LLToken.h b/llvm/lib/AsmParser/LLToken.h index db0de6c0d5a..0c5cf6b5d45 100644 --- a/llvm/lib/AsmParser/LLToken.h +++ b/llvm/lib/AsmParser/LLToken.h @@ -102,6 +102,8 @@ enum Kind { kw_nsz, kw_arcp, kw_contract, + kw_reassoc, + kw_afn, kw_fast, kw_nuw, kw_nsw, diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index d0f11db8f61..3e0a39c099b 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1046,8 +1046,8 @@ static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) { static FastMathFlags getDecodedFastMathFlags(unsigned Val) { FastMathFlags FMF; - if (0 != (Val & FastMathFlags::UnsafeAlgebra)) - FMF.setUnsafeAlgebra(); + if (0 != (Val & FastMathFlags::AllowReassoc)) + FMF.setAllowReassoc(); if (0 != (Val & FastMathFlags::NoNaNs)) FMF.setNoNaNs(); if (0 != (Val & FastMathFlags::NoInfs)) @@ -1058,6 +1058,8 @@ static FastMathFlags getDecodedFastMathFlags(unsigned Val) { FMF.setAllowReciprocal(); if (0 != (Val & FastMathFlags::AllowContract)) FMF.setAllowContract(true); + if (0 != (Val & FastMathFlags::ApproxFunc)) + FMF.setApproxFunc(); return FMF; } diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index c5d376c9426..03a77c9734e 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -1321,8 +1321,8 @@ static uint64_t getOptimizationFlags(const Value *V) { if (PEO->isExact()) Flags |= 1 << bitc::PEO_EXACT; } else if (const auto *FPMO = dyn_cast<FPMathOperator>(V)) { - if (FPMO->hasUnsafeAlgebra()) - Flags |= FastMathFlags::UnsafeAlgebra; + if (FPMO->hasAllowReassoc()) + Flags |= FastMathFlags::AllowReassoc; if (FPMO->hasNoNaNs()) Flags |= FastMathFlags::NoNaNs; if (FPMO->hasNoInfs()) @@ -1333,6 +1333,8 @@ static uint64_t getOptimizationFlags(const Value *V) { Flags |= FastMathFlags::AllowReciprocal; if (FPMO->hasAllowContract()) Flags |= FastMathFlags::AllowContract; + if (FPMO->hasApproxFunc()) + Flags |= FastMathFlags::ApproxFunc; } return Flags; diff --git a/llvm/lib/CodeGen/ExpandReductions.cpp b/llvm/lib/CodeGen/ExpandReductions.cpp index 70dca3b74b2..abf487a4f19 100644 --- a/llvm/lib/CodeGen/ExpandReductions.cpp +++ b/llvm/lib/CodeGen/ExpandReductions.cpp @@ -95,7 +95,7 @@ bool expandReductions(Function &F, const TargetTransformInfo *TTI) { // and it can't be handled by generating this shuffle sequence. // TODO: Implement scalarization of ordered reductions here for targets // without native support. - if (!II->getFastMathFlags().unsafeAlgebra()) + if (!II->getFastMathFlags().isFast()) continue; Vec = II->getArgOperand(1); break; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index c8abe25b7c6..f45e264f6e2 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2585,7 +2585,7 @@ static bool isVectorReductionOp(const User *I) { case Instruction::FAdd: case Instruction::FMul: if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst)) - if (FPOp->getFastMathFlags().unsafeAlgebra()) + if (FPOp->getFastMathFlags().isFast()) break; LLVM_FALLTHROUGH; default: @@ -2631,7 +2631,7 @@ static bool isVectorReductionOp(const User *I) { if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) { if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst)) - if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().unsafeAlgebra()) + if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast()) return false; UsersToVisit.push_back(U); } else if (const ShuffleVectorInst *ShufInst = @@ -2725,7 +2725,7 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) { Flags.setNoInfs(FMF.noInfs()); Flags.setNoNaNs(FMF.noNaNs()); Flags.setNoSignedZeros(FMF.noSignedZeros()); - Flags.setUnsafeAlgebra(FMF.unsafeAlgebra()); + Flags.setUnsafeAlgebra(FMF.isFast()); SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(), Op1, Op2, Flags); @@ -7959,13 +7959,13 @@ void SelectionDAGBuilder::visitVectorReduce(const CallInst &I, switch (Intrinsic) { case Intrinsic::experimental_vector_reduce_fadd: - if (FMF.unsafeAlgebra()) + if (FMF.isFast()) Res = DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2); else Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2); break; case Intrinsic::experimental_vector_reduce_fmul: - if (FMF.unsafeAlgebra()) + if (FMF.isFast()) Res = DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2); else Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2); diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 3f41a1dc066..0fafe82404e 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -1108,10 +1108,12 @@ static void writeAtomicRMWOperation(raw_ostream &Out, static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { if (const FPMathOperator *FPO = dyn_cast<const FPMathOperator>(U)) { - // Unsafe algebra implies all the others, no need to write them all out - if (FPO->hasUnsafeAlgebra()) + // 'Fast' is an abbreviation for all fast-math-flags. + if (FPO->isFast()) Out << " fast"; else { + if (FPO->hasAllowReassoc()) + Out << " reassoc"; if (FPO->hasNoNaNs()) Out << " nnan"; if (FPO->hasNoInfs()) @@ -1122,6 +1124,8 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { Out << " arcp"; if (FPO->hasAllowContract()) Out << " contract"; + if (FPO->hasApproxFunc()) + Out << " afn"; } } diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index ceb521c4c48..ffc3a30e6a1 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -146,9 +146,14 @@ bool Instruction::isExact() const { return cast<PossiblyExactOperator>(this)->isExact(); } -void Instruction::setHasUnsafeAlgebra(bool B) { +void Instruction::setFast(bool B) { assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); - cast<FPMathOperator>(this)->setHasUnsafeAlgebra(B); + cast<FPMathOperator>(this)->setFast(B); +} + +void Instruction::setHasAllowReassoc(bool B) { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + cast<FPMathOperator>(this)->setHasAllowReassoc(B); } void Instruction::setHasNoNaNs(bool B) { @@ -171,6 +176,11 @@ void Instruction::setHasAllowReciprocal(bool B) { cast<FPMathOperator>(this)->setHasAllowReciprocal(B); } +void Instruction::setHasApproxFunc(bool B) { + assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); + cast<FPMathOperator>(this)->setHasApproxFunc(B); +} + void Instruction::setFastMathFlags(FastMathFlags FMF) { assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); cast<FPMathOperator>(this)->setFastMathFlags(FMF); @@ -181,9 +191,14 @@ void Instruction::copyFastMathFlags(FastMathFlags FMF) { cast<FPMathOperator>(this)->copyFastMathFlags(FMF); } -bool Instruction::hasUnsafeAlgebra() const { +bool Instruction::isFast() const { assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); - return cast<FPMathOperator>(this)->hasUnsafeAlgebra(); + return cast<FPMathOperator>(this)->isFast(); +} + +bool Instruction::hasAllowReassoc() const { + assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); + return cast<FPMathOperator>(this)->hasAllowReassoc(); } bool Instruction::hasNoNaNs() const { @@ -211,6 +226,11 @@ bool Instruction::hasAllowContract() const { return cast<FPMathOperator>(this)->hasAllowContract(); } +bool Instruction::hasApproxFunc() const { + assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); + return cast<FPMathOperator>(this)->hasApproxFunc(); +} + FastMathFlags Instruction::getFastMathFlags() const { assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op"); return cast<FPMathOperator>(this)->getFastMathFlags(); @@ -579,7 +599,7 @@ bool Instruction::isAssociative() const { switch (Opcode) { case FMul: case FAdd: - return cast<FPMathOperator>(this)->hasUnsafeAlgebra(); + return cast<FPMathOperator>(this)->isFast(); default: return false; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index 156f7bc6512..b17b6716766 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -400,7 +400,7 @@ bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) { return false; FastMathFlags FMF = FPOp->getFastMathFlags(); - bool UnsafeDiv = HasUnsafeFPMath || FMF.unsafeAlgebra() || + bool UnsafeDiv = HasUnsafeFPMath || FMF.isFast() || FMF.allowReciprocal(); // With UnsafeDiv node will be optimized to just rcp and mul. diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp index 97cce266e8e..714c60a7446 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp @@ -487,7 +487,7 @@ bool AMDGPULibCalls::parseFunctionName(const StringRef& FMangledName, bool AMDGPULibCalls::isUnsafeMath(const CallInst *CI) const { if (auto Op = dyn_cast<FPMathOperator>(CI)) - if (Op->hasUnsafeAlgebra()) + if (Op->isFast()) return true; const Function *F = CI->getParent()->getParent(); Attribute Attr = F->getFnAttribute("unsafe-fp-math"); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 18b246b5d99..d28d615f47e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -482,7 +482,7 @@ Value *FAddCombine::performFactorization(Instruction *I) { return nullptr; FastMathFlags Flags; - Flags.setUnsafeAlgebra(); + Flags.setFast(); if (I0) Flags &= I->getFastMathFlags(); if (I1) Flags &= I->getFastMathFlags(); @@ -511,7 +511,7 @@ Value *FAddCombine::performFactorization(Instruction *I) { } Value *FAddCombine::simplify(Instruction *I) { - assert(I->hasUnsafeAlgebra() && "Should be in unsafe mode"); + assert(I->isFast() && "Expected 'fast' instruction"); // Currently we are not able to handle vector type. if (I->getType()->isVectorTy()) @@ -1386,7 +1386,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) { if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS)) return replaceInstUsesWith(I, V); - if (I.hasUnsafeAlgebra()) { + if (I.isFast()) { if (Value *V = FAddCombine(Builder).simplify(&I)) return replaceInstUsesWith(I, V); } @@ -1736,7 +1736,7 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) { if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) return replaceInstUsesWith(I, V); - if (I.hasUnsafeAlgebra()) { + if (I.isFast()) { if (Value *V = FAddCombine(Builder).simplify(&I)) return replaceInstUsesWith(I, V); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 7a4abc9aca0..a00e6f73ab8 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2017,7 +2017,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { } case Intrinsic::fmuladd: { // Canonicalize fast fmuladd to the separate fmul + fadd. - if (II->hasUnsafeAlgebra()) { + if (II->isFast()) { BuilderTy::FastMathFlagGuard Guard(Builder); Builder.setFastMathFlags(II->getFastMathFlags()); Value *Mul = Builder.CreateFMul(II->getArgOperand(0), diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index e6b97538267..87666360c1a 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -487,7 +487,7 @@ static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) { IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op); if (!II) return; - if (II->getIntrinsicID() != Intrinsic::log2 || !II->hasUnsafeAlgebra()) + if (II->getIntrinsicID() != Intrinsic::log2 || !II->isFast()) return; Log2 = II; @@ -498,7 +498,8 @@ static void detectLog2OfHalf(Value *&Op, Value *&Y, IntrinsicInst *&Log2) { Instruction *I = dyn_cast<Instruction>(OpLog2Of); if (!I) return; - if (I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra()) + + if (I->getOpcode() != Instruction::FMul || !I->isFast()) return; if (match(I->getOperand(0), m_SpecificFP(0.5))) @@ -601,7 +602,7 @@ Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, Constant *C, } if (R) { - R->setHasUnsafeAlgebra(true); + R->setFast(true); InsertNewInstWith(R, *InsertBefore); } @@ -622,7 +623,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) { SQ.getWithInstruction(&I))) return replaceInstUsesWith(I, V); - bool AllowReassociate = I.hasUnsafeAlgebra(); + bool AllowReassociate = I.isFast(); // Simplify mul instructions with a constant RHS. if (isa<Constant>(Op1)) { @@ -1341,7 +1342,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { if (Instruction *R = FoldOpIntoSelect(I, SI)) return R; - bool AllowReassociate = I.hasUnsafeAlgebra(); + bool AllowReassociate = I.isFast(); bool AllowReciprocal = I.hasAllowReciprocal(); if (Constant *Op1C = dyn_cast<Constant>(Op1)) { diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp index a44ca333fee..1f32f9f24aa 100644 --- a/llvm/lib/Transforms/Scalar/Reassociate.cpp +++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp @@ -145,8 +145,7 @@ XorOpnd::XorOpnd(Value *V) { static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) { if (V->hasOneUse() && isa<Instruction>(V) && cast<Instruction>(V)->getOpcode() == Opcode && - (!isa<FPMathOperator>(V) || - cast<Instruction>(V)->hasUnsafeAlgebra())) + (!isa<FPMathOperator>(V) || cast<Instruction>(V)->isFast())) return cast<BinaryOperator>(V); return nullptr; } @@ -156,8 +155,7 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode1, if (V->hasOneUse() && isa<Instruction>(V) && (cast<Instruction>(V)->getOpcode() == Opcode1 || cast<Instruction>(V)->getOpcode() == Opcode2) && - (!isa<FPMathOperator>(V) || - cast<Instruction>(V)->hasUnsafeAlgebra())) + (!isa<FPMathOperator>(V) || cast<Instruction>(V)->isFast())) return cast<BinaryOperator>(V); return nullptr; } @@ -565,7 +563,7 @@ static bool LinearizeExprTree(BinaryOperator *I, assert((!isa<Instruction>(Op) || cast<Instruction>(Op)->getOpcode() != Opcode || (isa<FPMathOperator>(Op) && - !cast<Instruction>(Op)->hasUnsafeAlgebra())) && + !cast<Instruction>(Op)->isFast())) && "Should have been handled above!"); assert(Op->hasOneUse() && "Has uses outside the expression tree!"); @@ -2017,8 +2015,8 @@ void ReassociatePass::OptimizeInst(Instruction *I) { if (I->isCommutative()) canonicalizeOperands(I); - // Don't optimize floating point instructions that don't have unsafe algebra. - if (I->getType()->isFPOrFPVectorTy() && !I->hasUnsafeAlgebra()) + // Don't optimize floating-point instructions unless they are 'fast'. + if (I->getType()->isFPOrFPVectorTy() && !I->isFast()) return; // Do not reassociate boolean (i1) expressions. We want to preserve the diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp index 13c0bfbcb2e..0de6924e635 100644 --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -432,7 +432,7 @@ RecurrenceDescriptor::isRecurrenceInstr(Instruction *I, RecurrenceKind Kind, InstDesc &Prev, bool HasFunNoNaNAttr) { bool FP = I->getType()->isFloatingPointTy(); Instruction *UAI = Prev.getUnsafeAlgebraInst(); - if (!UAI && FP && !I->hasUnsafeAlgebra()) + if (!UAI && FP && !I->isFast()) UAI = I; // Found an unsafe (unvectorizable) algebra instruction. switch (I->getOpcode()) { @@ -660,11 +660,11 @@ Value *RecurrenceDescriptor::createMinMaxOp(IRBuilder<> &Builder, break; } - // We only match FP sequences with unsafe algebra, so we can unconditionally + // We only match FP sequences that are 'fast', so we can unconditionally // set it on any generated instructions. IRBuilder<>::FastMathFlagGuard FMFG(Builder); FastMathFlags FMF; - FMF.setUnsafeAlgebra(); + FMF.setFast(); Builder.setFastMathFlags(FMF); Value *Cmp; @@ -768,7 +768,7 @@ Value *InductionDescriptor::transform(IRBuilder<> &B, Value *Index, // Floating point operations had to be 'fast' to enable the induction. FastMathFlags Flags; - Flags.setUnsafeAlgebra(); + Flags.setFast(); Value *MulExp = B.CreateFMul(StepValue, Index); if (isa<Instruction>(MulExp)) @@ -1338,7 +1338,7 @@ Optional<unsigned> llvm::getLoopEstimatedTripCount(Loop *L) { static Value *addFastMathFlag(Value *V) { if (isa<FPMathOperator>(V)) { FastMathFlags Flags; - Flags.setUnsafeAlgebra(); + Flags.setFast(); cast<Instruction>(V)->setFastMathFlags(Flags); } return V; @@ -1401,7 +1401,7 @@ Value *llvm::createSimpleTargetReduction( RD::MinMaxRecurrenceKind MinMaxKind = RD::MRK_Invalid; // TODO: Support creating ordered reductions. FastMathFlags FMFUnsafe; - FMFUnsafe.setUnsafeAlgebra(); + FMFUnsafe.setFast(); switch (Opcode) { case Instruction::Add: diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp index 33117659489..a29b83717f3 100644 --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -1111,7 +1111,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) { // Example: x = 1000, y = 0.001. // pow(exp(x), y) = pow(inf, 0.001) = inf, whereas exp(x*y) = exp(1). auto *OpC = dyn_cast<CallInst>(Op1); - if (OpC && OpC->hasUnsafeAlgebra() && CI->hasUnsafeAlgebra()) { + if (OpC && OpC->isFast() && CI->isFast()) { LibFunc Func; Function *OpCCallee = OpC->getCalledFunction(); if (OpCCallee && TLI->getLibFunc(OpCCallee->getName(), Func) && @@ -1136,7 +1136,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) { LibFunc_sqrtl)) { // If -ffast-math: // pow(x, -0.5) -> 1.0 / sqrt(x) - if (CI->hasUnsafeAlgebra()) { + if (CI->isFast()) { IRBuilder<>::FastMathFlagGuard Guard(B); B.setFastMathFlags(CI->getFastMathFlags()); @@ -1157,7 +1157,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) { LibFunc_sqrtl)) { // In -ffast-math, pow(x, 0.5) -> sqrt(x). - if (CI->hasUnsafeAlgebra()) { + if (CI->isFast()) { IRBuilder<>::FastMathFlagGuard Guard(B); B.setFastMathFlags(CI->getFastMathFlags()); @@ -1196,7 +1196,7 @@ Value *LibCallSimplifier::optimizePow(CallInst *CI, IRBuilder<> &B) { return B.CreateFDiv(ConstantFP::get(CI->getType(), 1.0), Op1, "powrecip"); // In -ffast-math, generate repeated fmul instead of generating pow(x, n). - if (CI->hasUnsafeAlgebra()) { + if (CI->isFast()) { APFloat V = abs(Op2C->getValueAPF()); // We limit to a max of 7 fmul(s). Thus max exponent is 32. // This transformation applies to integer exponents only. @@ -1284,9 +1284,9 @@ Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilder<> &B) { IRBuilder<>::FastMathFlagGuard Guard(B); FastMathFlags FMF; - if (CI->hasUnsafeAlgebra()) { - // Unsafe algebra sets all fast-math-flags to true. - FMF.setUnsafeAlgebra(); + if (CI->isFast()) { + // If the call is 'fast', then anything we create here will also be 'fast'. + FMF.setFast(); } else { // At a minimum, no-nans-fp-math must be true. if (!CI->hasNoNaNs()) @@ -1317,13 +1317,13 @@ Value *LibCallSimplifier::optimizeLog(CallInst *CI, IRBuilder<> &B) { if (UnsafeFPShrink && hasFloatVersion(Name)) Ret = optimizeUnaryDoubleFP(CI, B, true); - if (!CI->hasUnsafeAlgebra()) + if (!CI->isFast()) return Ret; Value *Op1 = CI->getArgOperand(0); auto *OpC = dyn_cast<CallInst>(Op1); - // The earlier call must also be unsafe in order to do these transforms. - if (!OpC || !OpC->hasUnsafeAlgebra()) + // The earlier call must also be 'fast' in order to do these transforms. + if (!OpC || !OpC->isFast()) return Ret; // log(pow(x,y)) -> y*log(x) @@ -1333,7 +1333,7 @@ Value *LibCallSimplifier::optimizeLog(CallInst *CI, IRBuilder<> &B) { IRBuilder<>::FastMathFlagGuard Guard(B); FastMathFlags FMF; - FMF.setUnsafeAlgebra(); + FMF.setFast(); B.setFastMathFlags(FMF); LibFunc Func; @@ -1365,11 +1365,11 @@ Value *LibCallSimplifier::optimizeSqrt(CallInst *CI, IRBuilder<> &B) { Callee->getIntrinsicID() == Intrinsic::sqrt)) Ret = optimizeUnaryDoubleFP(CI, B, true); - if (!CI->hasUnsafeAlgebra()) + if (!CI->isFast()) return Ret; Instruction *I = dyn_cast<Instruction>(CI->getArgOperand(0)); - if (!I || I->getOpcode() != Instruction::FMul || !I->hasUnsafeAlgebra()) + if (!I || I->getOpcode() != Instruction::FMul || !I->isFast()) return Ret; // We're looking for a repeated factor in a multiplication tree, @@ -1391,8 +1391,7 @@ Value *LibCallSimplifier::optimizeSqrt(CallInst *CI, IRBuilder<> &B) { Value *OtherMul0, *OtherMul1; if (match(Op0, m_FMul(m_Value(OtherMul0), m_Value(OtherMul1)))) { // Pattern: sqrt((x * y) * z) - if (OtherMul0 == OtherMul1 && - cast<Instruction>(Op0)->hasUnsafeAlgebra()) { + if (OtherMul0 == OtherMul1 && cast<Instruction>(Op0)->isFast()) { // Matched: sqrt((x * x) * z) RepeatOp = OtherMul0; OtherOp = Op1; @@ -1437,8 +1436,8 @@ Value *LibCallSimplifier::optimizeTan(CallInst *CI, IRBuilder<> &B) { if (!OpC) return Ret; - // Both calls must allow unsafe optimizations in order to remove them. - if (!CI->hasUnsafeAlgebra() || !OpC->hasUnsafeAlgebra()) + // Both calls must be 'fast' in order to remove them. + if (!CI->isFast() || !OpC->isFast()) return Ret; // tan(atan(x)) -> x @@ -2167,10 +2166,10 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI) { // Command-line parameter overrides instruction attribute. // This can't be moved to optimizeFloatingPointLibCall() because it may be - // used by the intrinsic optimizations. + // used by the intrinsic optimizations. if (EnableUnsafeFPShrink.getNumOccurrences() > 0) UnsafeFPShrink = EnableUnsafeFPShrink; - else if (isa<FPMathOperator>(CI) && CI->hasUnsafeAlgebra()) + else if (isa<FPMathOperator>(CI) && CI->isFast()) UnsafeFPShrink = true; // First, check for intrinsics. diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index ca2f5a178e0..ed29ca0b573 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -385,7 +385,7 @@ static unsigned getReciprocalPredBlockProb() { return 2; } static Value *addFastMathFlag(Value *V) { if (isa<FPMathOperator>(V)) { FastMathFlags Flags; - Flags.setUnsafeAlgebra(); + Flags.setFast(); cast<Instruction>(V)->setFastMathFlags(Flags); } return V; @@ -2720,7 +2720,7 @@ Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, // Floating point operations had to be 'fast' to enable the induction. FastMathFlags Flags; - Flags.setUnsafeAlgebra(); + Flags.setFast(); Value *MulOp = Builder.CreateFMul(Cv, Step); if (isa<Instruction>(MulOp)) @@ -5396,7 +5396,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { // operations, shuffles, or casts, as they don't change precision or // semantics. } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && - !I.hasUnsafeAlgebra()) { + !I.isFast()) { DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); Hints->setPotentiallyUnsafe(); } diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 5dcf5528ac9..4232252af36 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -4880,7 +4880,7 @@ class HorizontalReduction { case RK_Min: case RK_Max: return Opcode == Instruction::ICmp || - cast<Instruction>(I->getOperand(0))->hasUnsafeAlgebra(); + cast<Instruction>(I->getOperand(0))->isFast(); case RK_UMin: case RK_UMax: assert(Opcode == Instruction::ICmp && @@ -5232,7 +5232,7 @@ public: Value *VectorizedTree = nullptr; IRBuilder<> Builder(ReductionRoot); FastMathFlags Unsafe; - Unsafe.setUnsafeAlgebra(); + Unsafe.setFast(); Builder.setFastMathFlags(Unsafe); unsigned i = 0; |