diff options
Diffstat (limited to 'llvm/lib/Transforms/Scalar')
-rw-r--r-- | llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/ConstantHoisting.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/EarlyCSE.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVNSink.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GuardWidening.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LICM.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopPredication.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopUnswitch.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/NewGVN.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/Reassociate.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp | 22 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/SROA.cpp | 2 |
17 files changed, 52 insertions, 52 deletions
diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp index fa7bcec677f..0830ff5dd04 100644 --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -280,7 +280,7 @@ bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I, return false; // Sign extend the offset to 64 bits (so that it is like all of the other - // expressions). + // expressions). unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits(); if (OffSCEVBits < 64) OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty); diff --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp index 3a675b97901..55759e8b166 100644 --- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp +++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp @@ -781,7 +781,7 @@ bool ConstantHoistingPass::runImpl(Function &Fn, TargetTransformInfo &TTI, this->TTI = &TTI; this->DT = &DT; this->BFI = BFI; - this->Entry = &Entry; + this->Entry = &Entry; // Collect all constant candidates. collectConstantCandidates(Fn); diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp index ea148b728a1..2f2d7f620a2 100644 --- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -473,7 +473,7 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) { // relatively expensive analysis for constants which are obviously either // null or non-null to start with. if (Type && !CS.paramHasAttr(ArgNo, Attribute::NonNull) && - !isa<Constant>(V) && + !isa<Constant>(V) && LVI->getPredicateAt(ICmpInst::ICMP_EQ, V, ConstantPointerNull::get(Type), CS.getInstruction()) == LazyValueInfo::False) @@ -670,12 +670,12 @@ static Constant *getConstantAt(Value *V, Instruction *At, LazyValueInfo *LVI) { Value *Op0 = C->getOperand(0); Constant *Op1 = dyn_cast<Constant>(C->getOperand(1)); if (!Op1) return nullptr; - + LazyValueInfo::Tristate Result = LVI->getPredicateAt(C->getPredicate(), Op0, Op1, At); if (Result == LazyValueInfo::Unknown) return nullptr; - + return (Result == LazyValueInfo::True) ? ConstantInt::getTrue(C->getContext()) : ConstantInt::getFalse(C->getContext()); @@ -747,7 +747,7 @@ static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT, if (auto *C = getConstantAt(RetVal, RI, LVI)) { ++NumReturns; RI->replaceUsesOfWith(RetVal, C); - BBChanged = true; + BBChanged = true; } } } diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp index dd1a2a6adb8..9a7405e98e7 100644 --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -188,7 +188,7 @@ static bool hasAnalyzableMemoryWrite(Instruction *I, /// returns true, this function and getLocForRead completely describe the memory /// operations for this instruction. static MemoryLocation getLocForWrite(Instruction *Inst) { - + if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) return MemoryLocation::get(SI); diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp index 565745d12e9..533d16e088c 100644 --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -384,7 +384,7 @@ public: LoadMapAllocator>; LoadHTType AvailableLoads; - + // A scoped hash table mapping memory locations (represented as typed // addresses) to generation numbers at which that memory location became // (henceforth indefinitely) invariant. @@ -844,7 +844,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // start a scope in the current generaton which is true for all future // generations. Also, we dont need to consume the last store since the // semantics of invariant.start allow us to perform DSE of the last - // store, if there was a store following invariant.start. Consider: + // store, if there was a store following invariant.start. Consider: // // store 30, i8* p // invariant.start(p) @@ -852,7 +852,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // We can DSE the store to 30, since the store 40 to invariant location p // causes undefined behaviour. if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) { - // If there are any uses, the scope might end. + // If there are any uses, the scope might end. if (!Inst->use_empty()) continue; auto *CI = cast<CallInst>(Inst); diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp index 28c5940db1e..8959038de59 100644 --- a/llvm/lib/Transforms/Scalar/GVNSink.cpp +++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp @@ -568,7 +568,7 @@ public: ReversePostOrderTraversal<Function*> RPOT(&F); for (auto *N : RPOT) NumSunk += sinkBB(N); - + return NumSunk > 0; } diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp index 506b38ba251..b939ef359ad 100644 --- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp +++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp @@ -389,7 +389,7 @@ GuardWideningImpl::WideningScore GuardWideningImpl::computeWideningScore( // case. At the moment, we really only consider the second in our heuristic // here. TODO: evaluate cost model for spurious deopt // NOTE: As written, this also lets us hoist right over another guard which - // is essentially just another spelling for control flow. + // is essentially just another spelling for control flow. if (isWideningCondProfitable(getGuardCondition(DominatedGuard), getGuardCondition(DominatingGuard))) return HoistingOutOfLoop ? WS_VeryPositive : WS_Positive; @@ -403,7 +403,7 @@ GuardWideningImpl::WideningScore GuardWideningImpl::computeWideningScore( auto MaybeHoistingOutOfIf = [&]() { auto *DominatingBlock = DominatingGuard->getParent(); auto *DominatedBlock = DominatedGuard->getParent(); - + // Same Block? if (DominatedBlock == DominatingBlock) return false; diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp index e2f29705f2d..c5ed6d5c1b8 100644 --- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp +++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp @@ -735,7 +735,7 @@ static bool isSafeDecreasingBound(const SCEV *Start, assert(LatchBrExitIdx == 0 && "LatchBrExitIdx should be either 0 or 1"); - + const SCEV *StepPlusOne = SE.getAddExpr(Step, SE.getOne(Step->getType())); unsigned BitWidth = cast<IntegerType>(BoundSCEV->getType())->getBitWidth(); APInt Min = IsSigned ? APInt::getSignedMinValue(BitWidth) : @@ -786,7 +786,7 @@ static bool isSafeIncreasingBound(const SCEV *Start, const SCEV *StepMinusOne = SE.getMinusSCEV(Step, SE.getOne(Step->getType())); unsigned BitWidth = cast<IntegerType>(BoundSCEV->getType())->getBitWidth(); - APInt Max = IsSigned ? APInt::getSignedMaxValue(BitWidth) : + APInt Max = IsSigned ? APInt::getSignedMaxValue(BitWidth) : APInt::getMaxValue(BitWidth); const SCEV *Limit = SE.getMinusSCEV(SE.getConstant(Max), StepMinusOne); @@ -798,7 +798,7 @@ static bool isSafeIncreasingBound(const SCEV *Start, static bool CannotBeMinInLoop(const SCEV *BoundSCEV, Loop *L, ScalarEvolution &SE, bool Signed) { unsigned BitWidth = cast<IntegerType>(BoundSCEV->getType())->getBitWidth(); - APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) : + APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) : APInt::getMinValue(BitWidth); auto Predicate = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; return SE.isAvailableAtLoopEntry(BoundSCEV, L) && diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index ff66632f039..c4ea43a4324 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -455,7 +455,7 @@ bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, // Keep track of whether the prefix of instructions visited so far are such // that the next instruction visited is guaranteed to execute if the loop - // is entered. + // is entered. bool IsMustExecute = CurLoop->getHeader() == BB; for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { @@ -1186,9 +1186,9 @@ bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) { if (isa<AllocaInst>(Object)) // Since the alloca goes out of scope, we know the caller can't retain a // reference to it and be well defined. Thus, we don't need to check for - // capture. + // capture. return true; - + // For all other objects we need to know that the caller can't possibly // have gotten a reference to the object. There are two components of // that: @@ -1282,7 +1282,7 @@ bool llvm::promoteLoopAccessesToScalars( // That said, we can't actually make the unwind edge explicit. Therefore, // we have to prove that the store is dead along the unwind edge. We do // this by proving that the caller can't have a reference to the object - // after return and thus can't possibly load from the object. + // after return and thus can't possibly load from the object. Value *Object = GetUnderlyingObject(SomePtr, MDL); if (!isKnownNonEscaping(Object, TLI)) return false; diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index d8692198f7a..653948717fb 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1573,7 +1573,7 @@ void LoopIdiomRecognize::transformLoopToCountable( InitXNext = Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1)); else - llvm_unreachable("Unexpected opcode!"); + llvm_unreachable("Unexpected opcode!"); } else InitXNext = InitX; CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck); diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp index 561ceea1d88..cbb6594cf8f 100644 --- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp +++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp @@ -74,7 +74,7 @@ // } // // One solution for M is M = forall X . (G(X) && B(X)) => G(X + Step) -// +// // Informal proof that the transformation above is correct: // // By the definition of guards we can rewrite the guard condition to: @@ -83,7 +83,7 @@ // Let's prove that for each iteration of the loop: // G(0) && M => G(I) // And the condition above can be simplified to G(Start) && M. -// +// // Induction base. // G(0) && M => G(0) // @@ -379,7 +379,7 @@ Value *LoopPredication::expandCheck(SCEVExpander &Expander, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, Instruction *InsertAt) { // TODO: we can check isLoopEntryGuardedByCond before emitting the check - + Type *Ty = LHS->getType(); assert(Ty == RHS->getType() && "expandCheck operands have different types?"); diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp index 634215c9770..e955821effa 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -888,7 +888,7 @@ bool llvm::computeUnrollCount( UP.Count = 0; return false; } - + // Check if the runtime trip count is too small when profile is available. if (L->getHeader()->getParent()->hasProfileData()) { if (auto ProfileTripCount = getLoopEstimatedTripCount(L)) { @@ -897,7 +897,7 @@ bool llvm::computeUnrollCount( else UP.AllowExpensiveTripCount = true; } - } + } // Reduce count based on the type of unrolling and the threshold values. UP.Runtime |= PragmaEnableUnroll || PragmaCount > 0 || UserUnrollCount; diff --git a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp index b1258675892..6aad077ff19 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -708,7 +708,7 @@ bool LoopUnswitch::processCurrentLoop() { // Unswitch only those branches that are reachable. if (isUnreachableDueToPreviousUnswitching(*I)) continue; - + // If this isn't branching on an invariant condition, we can't unswitch // it. if (BI->isConditional()) { @@ -754,7 +754,7 @@ bool LoopUnswitch::processCurrentLoop() { // We are unswitching ~0 out. UnswitchVal = AllOne; } else { - assert(OpChain == OC_OpChainNone && + assert(OpChain == OC_OpChainNone && "Expect to unswitch on trivial chain"); // Do not process same value again and again. // At this point we have some cases already unswitched and @@ -1440,11 +1440,11 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC, // This in-loop instruction has been simplified w.r.t. its context, // i.e. LIC != Val, make sure we propagate its replacement value to // all its users. - // + // // We can not yet delete UI, the LIC user, yet, because that would invalidate // the LIC->users() iterator !. However, we can make this instruction // dead by replacing all its users and push it onto the worklist so that - // it can be properly deleted and its operands simplified. + // it can be properly deleted and its operands simplified. UI->replaceAllUsesWith(Replacement); } } @@ -1609,7 +1609,7 @@ Value *LoopUnswitch::SimplifyInstructionWithNotEqual(Instruction *Inst, LLVMContext &Ctx = Inst->getContext(); if (CI->getPredicate() == CmpInst::ICMP_EQ) return ConstantInt::getFalse(Ctx); - else + else return ConstantInt::getTrue(Ctx); } } diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp index 2eb887c986b..3e47e9441d1 100644 --- a/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -2007,7 +2007,7 @@ NewGVN::performSymbolicEvaluation(Value *V, case Instruction::Load: E = performSymbolicLoadEvaluation(I); break; - case Instruction::BitCast: + case Instruction::BitCast: E = createExpression(I); break; case Instruction::ICmp: diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp index c81ac70d99e..1df0a9c49fb 100644 --- a/llvm/lib/Transforms/Scalar/Reassociate.cpp +++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp @@ -1179,7 +1179,7 @@ static Value *createAndInstr(Instruction *InsertBefore, Value *Opnd, // and both "Res" and "ConstOpnd" remain unchanged. bool ReassociatePass::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, APInt &ConstOpnd, Value *&Res) { - // Xor-Rule 1: (x | c1) ^ c2 = (x | c1) ^ (c1 ^ c1) ^ c2 + // Xor-Rule 1: (x | c1) ^ c2 = (x | c1) ^ (c1 ^ c1) ^ c2 // = ((x | c1) ^ c1) ^ (c1 ^ c2) // = (x & ~c1) ^ (c1 ^ c2) // It is useful only when c1 == c2. @@ -1202,12 +1202,12 @@ bool ReassociatePass::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, RedoInsts.insert(T); return true; } - + // Helper function of OptimizeXor(). It tries to simplify // "Opnd1 ^ Opnd2 ^ ConstOpnd" into "R ^ C", where C would be 0, and R is a -// symbolic value. -// -// If it was successful, true is returned, and the "R" and "C" is returned +// symbolic value. +// +// If it was successful, true is returned, and the "R" and "C" is returned // via "Res" and "ConstOpnd", respectively (If the entire expression is // evaluated to a constant, the Res is set to NULL); otherwise, false is // returned, and both "Res" and "ConstOpnd" remain unchanged. @@ -1254,7 +1254,7 @@ bool ReassociatePass::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, const APInt &C1 = Opnd1->getConstPart(); const APInt &C2 = Opnd2->getConstPart(); APInt C3 = C1 ^ C2; - + // Do not increase code size if (!C3.isNullValue() && !C3.isAllOnesValue()) { int NewInstNum = ConstOpnd.getBoolValue() ? 1 : 2; @@ -1290,7 +1290,7 @@ Value *ReassociatePass::OptimizeXor(Instruction *I, SmallVectorImpl<ValueEntry> &Ops) { if (Value *V = OptimizeAndOrXor(Instruction::Xor, Ops)) return V; - + if (Ops.size() == 1) return nullptr; @@ -1365,7 +1365,7 @@ Value *ReassociatePass::OptimizeXor(Instruction *I, } // step 3.2: When previous and current operands share the same symbolic - // value, try to simplify "PrevOpnd ^ CurrOpnd ^ ConstOpnd" + // value, try to simplify "PrevOpnd ^ CurrOpnd ^ ConstOpnd" if (CombineXorOpnd(I, CurrOpnd, PrevOpnd, ConstOpnd, CV)) { // Remove previous operand PrevOpnd->Invalidate(); diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index 391e43f7912..0de2bc72b52 100644 --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -401,7 +401,7 @@ namespace { /// defining value. The 'base defining value' for 'Def' is the transitive /// closure of this relation stopping at the first instruction which has no /// immediate base defining value. The b.d.v. might itself be a base pointer, -/// but it can also be an arbitrary derived pointer. +/// but it can also be an arbitrary derived pointer. struct BaseDefiningValueResult { /// Contains the value which is the base defining value. Value * const BDV; @@ -427,13 +427,13 @@ static BaseDefiningValueResult findBaseDefiningValue(Value *I); /// Return a base defining value for the 'Index' element of the given vector /// instruction 'I'. If Index is null, returns a BDV for the entire vector -/// 'I'. As an optimization, this method will try to determine when the +/// 'I'. As an optimization, this method will try to determine when the /// element is known to already be a base pointer. If this can be established, /// the second value in the returned pair will be true. Note that either a /// vector or a pointer typed value can be returned. For the former, the /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. /// If the later, the return pointer is a BDV (or possibly a base) for the -/// particular element in 'I'. +/// particular element in 'I'. static BaseDefiningValueResult findBaseDefiningValueOfVector(Value *I) { // Each case parallels findBaseDefiningValue below, see that code for @@ -444,7 +444,7 @@ findBaseDefiningValueOfVector(Value *I) { return BaseDefiningValueResult(I, true); if (isa<Constant>(I)) - // Base of constant vector consists only of constant null pointers. + // Base of constant vector consists only of constant null pointers. // For reasoning see similar case inside 'findBaseDefiningValue' function. return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), true); @@ -508,11 +508,11 @@ static BaseDefiningValueResult findBaseDefiningValue(Value *I) { if (isa<Constant>(I)) { // We assume that objects with a constant base (e.g. a global) can't move // and don't need to be reported to the collector because they are always - // live. Besides global references, all kinds of constants (e.g. undef, + // live. Besides global references, all kinds of constants (e.g. undef, // constant expressions, null pointers) can be introduced by the inliner or // the optimizer, especially on dynamically dead paths. // Here we treat all of them as having single null base. By doing this we - // trying to avoid problems reporting various conflicts in a form of + // trying to avoid problems reporting various conflicts in a form of // "phi (const1, const2)" or "phi (const, regular gc ptr)". // See constant.ll file for relevant test cases. @@ -1285,14 +1285,14 @@ static void CreateGCRelocates(ArrayRef<Value *> LiveVariables, return Index; }; Module *M = StatepointToken->getModule(); - + // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose // element type is i8 addrspace(1)*). We originally generated unique // declarations for each pointer type, but this proved problematic because // the intrinsic mangling code is incomplete and fragile. Since we're moving // towards a single unified pointer type anyways, we can just cast everything // to an i8* of the right address space. A bitcast is added later to convert - // gc_relocate to the actual value's type. + // gc_relocate to the actual value's type. auto getGCRelocateDecl = [&] (Type *Ty) { assert(isHandledGCPointerType(Ty)); auto AS = Ty->getScalarType()->getPointerAddressSpace(); @@ -1413,7 +1413,7 @@ static StringRef getDeoptLowering(CallSite CS) { } return "live-through"; } - + static void makeStatepointExplicitImpl(const CallSite CS, /* to replace */ const SmallVectorImpl<Value *> &BasePtrs, @@ -2570,7 +2570,7 @@ bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, } // Before we start introducing relocations, we want to tweak the IR a bit to - // avoid unfortunate code generation effects. The main example is that we + // avoid unfortunate code generation effects. The main example is that we // want to try to make sure the comparison feeding a branch is after any // safepoints. Otherwise, we end up with a comparison of pre-relocation // values feeding a branch after relocation. This is semantically correct, @@ -2593,7 +2593,7 @@ bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, TerminatorInst *TI = BB.getTerminator(); if (auto *Cond = getConditionInst(TI)) // TODO: Handle more than just ICmps here. We should be able to move - // most instructions without side effects or memory access. + // most instructions without side effects or memory access. if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { MadeChange = true; Cond->moveBefore(TI); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 6c3f012c628..de16b608f75 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -3730,7 +3730,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { PartPtrTy, BasePtr->getName() + "."), getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, LI->getName()); - PLoad->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access); + PLoad->copyMetadata(*LI, LLVMContext::MD_mem_parallel_loop_access); // Append this load onto the list of split loads so we can find it later // to rewrite the stores. |