diff options
Diffstat (limited to 'llvm/lib/Target')
7 files changed, 18 insertions, 48 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp index 3afcdfb8b93..33922fd7107 100644 --- a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp +++ b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp @@ -207,9 +207,7 @@ bool AArch64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) { } static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) { - if (isa<SelectInst>(Inst) && OpIdx == 0) - return false; - return true; + return !(isa<SelectInst>(Inst) && OpIdx == 0); } bool diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index 5b6880db849..edc5751dca3 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -946,10 +946,7 @@ bool AArch64FastISel::isValueAvailable(const Value *V) const { return true; const auto *I = cast<Instruction>(V); - if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) - return true; - - return false; + return FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB; } bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 75a8b151b2d..5b6385e716c 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -130,9 +130,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { // Note: currently hasFP() is always true for hasCalls(), but that's an // implementation detail of the current code, not a strict requirement, // so stay safe here and check both. - if (MFI->hasCalls() || hasFP(MF) || NumBytes > 128) - return false; - return true; + return !(MFI->hasCalls() || hasFP(MF) || NumBytes > 128); } /// hasFP - Return true if the specified function should have a dedicated frame diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 1016e14a94a..134f107698d 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -328,9 +328,7 @@ static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) { bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const { // it hurts if the value is used at least twice, unless we are optimizing // for code size. - if (ForCodeSize || V.hasOneUse()) - return true; - return false; + return ForCodeSize || V.hasOneUse(); } /// SelectShiftedRegister - Select a "shifted register" operand. If the value @@ -797,10 +795,7 @@ bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size, if (ShiftVal != 0 && ShiftVal != LegalShiftVal) return false; - if (isWorthFolding(N)) - return true; - - return false; + return isWorthFolding(N); } bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size, diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index a6a9694a21d..1b7365474b7 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2812,9 +2812,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( return false; if (getTargetMachine().Options.GuaranteedTailCallOpt) { - if (IsTailCallConvention(CalleeCC) && CCMatch) - return true; - return false; + return IsTailCallConvention(CalleeCC) && CCMatch; } // Externally-defined functions with weak linkage should not be @@ -6974,12 +6972,10 @@ bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const { const DataLayout &DL = I->getModule()->getDataLayout(); EVT VT = getValueType(DL, User->getOperand(0)->getType()); - if (isFMAFasterThanFMulAndFAdd(VT) && - isOperationLegalOrCustom(ISD::FMA, VT) && - (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath)) - return false; - - return true; + return !(isFMAFasterThanFMulAndFAdd(VT) && + isOperationLegalOrCustom(ISD::FMA, VT) && + (Options.AllowFPOpFusion == FPOpFusion::Fast || + Options.UnsafeFPMath)); } // All 32-bit GPR operations implicitly zero the high-half of the corresponding @@ -7282,9 +7278,7 @@ EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { // Same encoding for add/sub, just flip the sign. Immed = std::abs(Immed); - if ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0)) - return true; - return false; + return ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0)); } // Integer comparisons are implemented with ADDS/SUBS, so the range of valid @@ -7341,10 +7335,8 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL, // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2 - if (!AM.Scale || AM.Scale == 1 || - (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes)) - return true; - return false; + return !AM.Scale || AM.Scale == 1 || + (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes); } int AArch64TargetLowering::getScalingFactorCost(const DataLayout &DL, @@ -9309,10 +9301,8 @@ bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) { } case ISD::Constant: case ISD::TargetConstant: { - if (std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) < - 1LL << (width - 1)) - return true; - return false; + return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) < + 1LL << (width - 1); } } @@ -9922,10 +9912,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, // return instructions to help enable tail call optimizations for this // instruction. bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { - if (!CI->isTailCall()) - return false; - - return true; + return CI->isTailCall(); } bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base, diff --git a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp index 79c09d9f058..1214806f340 100644 --- a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp +++ b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -285,10 +285,7 @@ static bool shouldConvertUse(const Constant *Cst, const Instruction *Instr, // Do not mess with inline asm. const CallInst *CI = dyn_cast<const CallInst>(Instr); - if (CI && isa<const InlineAsm>(CI->getCalledValue())) - return false; - - return true; + return !(CI && isa<const InlineAsm>(CI->getCalledValue())); } /// Check if the given Cst should be converted into diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp index 32b4888f2f6..5f207c1a21a 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -190,9 +190,7 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const { // If it's wrong, we'll materialize the constant and still get to the // object; it's just suboptimal. Negative offsets use the unscaled // load/store instructions, which have a 9-bit signed immediate. - if (MFI->getLocalFrameSize() < 256) - return false; - return true; + return MFI->getLocalFrameSize() >= 256; } return false; |