diff options
-rw-r--r-- | clang/lib/CodeGen/CGCleanup.cpp | 1144 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGCleanup.h | 560 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGException.cpp | 173 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGException.h | 539 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprCXX.cpp | 65 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGObjCGNU.cpp | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGObjCMac.cpp | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.cpp | 886 |
8 files changed, 1710 insertions, 1661 deletions
diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp new file mode 100644 index 00000000000..3c4d80f2360 --- /dev/null +++ b/clang/lib/CodeGen/CGCleanup.cpp @@ -0,0 +1,1144 @@ +//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains code dealing with the IR generation for cleanups +// and related information. +// +// A "cleanup" is a piece of code which needs to be executed whenever +// control transfers out of a particular scope. This can be +// conditionalized to occur only on exceptional control flow, only on +// normal control flow, or both. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CGCleanup.h" + +using namespace clang; +using namespace CodeGen; + +bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { + if (rv.isScalar()) + return DominatingLLVMValue::needsSaving(rv.getScalarVal()); + if (rv.isAggregate()) + return DominatingLLVMValue::needsSaving(rv.getAggregateAddr()); + return true; +} + +DominatingValue<RValue>::saved_type +DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { + if (rv.isScalar()) { + llvm::Value *V = rv.getScalarVal(); + + // These automatically dominate and don't need to be saved. + if (!DominatingLLVMValue::needsSaving(V)) + return saved_type(V, ScalarLiteral); + + // Everything else needs an alloca. + llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); + CGF.Builder.CreateStore(V, addr); + return saved_type(addr, ScalarAddress); + } + + if (rv.isComplex()) { + CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); + const llvm::Type *ComplexTy = + llvm::StructType::get(CGF.getLLVMContext(), + V.first->getType(), V.second->getType(), + (void*) 0); + llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); + CGF.StoreComplexToAddr(V, addr, /*volatile*/ false); + return saved_type(addr, ComplexAddress); + } + + assert(rv.isAggregate()); + llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile? + if (!DominatingLLVMValue::needsSaving(V)) + return saved_type(V, AggregateLiteral); + + llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); + CGF.Builder.CreateStore(V, addr); + return saved_type(addr, AggregateAddress); +} + +/// Given a saved r-value produced by SaveRValue, perform the code +/// necessary to restore it to usability at the current insertion +/// point. +RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { + switch (K) { + case ScalarLiteral: + return RValue::get(Value); + case ScalarAddress: + return RValue::get(CGF.Builder.CreateLoad(Value)); + case AggregateLiteral: + return RValue::getAggregate(Value); + case AggregateAddress: + return RValue::getAggregate(CGF.Builder.CreateLoad(Value)); + case ComplexAddress: + return RValue::getComplex(CGF.LoadComplexFromAddr(Value, false)); + } + + llvm_unreachable("bad saved r-value kind"); + return RValue(); +} + +/// Push an entry of the given size onto this protected-scope stack. +char *EHScopeStack::allocate(size_t Size) { + if (!StartOfBuffer) { + unsigned Capacity = 1024; + while (Capacity < Size) Capacity *= 2; + StartOfBuffer = new char[Capacity]; + StartOfData = EndOfBuffer = StartOfBuffer + Capacity; + } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { + unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; + unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); + + unsigned NewCapacity = CurrentCapacity; + do { + NewCapacity *= 2; + } while (NewCapacity < UsedCapacity + Size); + + char *NewStartOfBuffer = new char[NewCapacity]; + char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; + char *NewStartOfData = NewEndOfBuffer - UsedCapacity; + memcpy(NewStartOfData, StartOfData, UsedCapacity); + delete [] StartOfBuffer; + StartOfBuffer = NewStartOfBuffer; + EndOfBuffer = NewEndOfBuffer; + StartOfData = NewStartOfData; + } + + assert(StartOfBuffer + Size <= StartOfData); + StartOfData -= Size; + return StartOfData; +} + +EHScopeStack::stable_iterator +EHScopeStack::getEnclosingEHCleanup(iterator it) const { + assert(it != end()); + do { + if (isa<EHCleanupScope>(*it)) { + if (cast<EHCleanupScope>(*it).isEHCleanup()) + return stabilize(it); + return cast<EHCleanupScope>(*it).getEnclosingEHCleanup(); + } + ++it; + } while (it != end()); + return stable_end(); +} + + +void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { + assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned"); + char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); + bool IsNormalCleanup = Kind & NormalCleanup; + bool IsEHCleanup = Kind & EHCleanup; + bool IsActive = !(Kind & InactiveCleanup); + EHCleanupScope *Scope = + new (Buffer) EHCleanupScope(IsNormalCleanup, + IsEHCleanup, + IsActive, + Size, + BranchFixups.size(), + InnermostNormalCleanup, + InnermostEHCleanup); + if (IsNormalCleanup) + InnermostNormalCleanup = stable_begin(); + if (IsEHCleanup) + InnermostEHCleanup = stable_begin(); + + return Scope->getCleanupBuffer(); +} + +void EHScopeStack::popCleanup() { + assert(!empty() && "popping exception stack when not empty"); + + assert(isa<EHCleanupScope>(*begin())); + EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); + InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); + InnermostEHCleanup = Cleanup.getEnclosingEHCleanup(); + StartOfData += Cleanup.getAllocatedSize(); + + if (empty()) NextEHDestIndex = FirstEHDestIndex; + + // Destroy the cleanup. + Cleanup.~EHCleanupScope(); + + // Check whether we can shrink the branch-fixups stack. + if (!BranchFixups.empty()) { + // If we no longer have any normal cleanups, all the fixups are + // complete. + if (!hasNormalCleanups()) + BranchFixups.clear(); + + // Otherwise we can still trim out unnecessary nulls. + else + popNullFixups(); + } +} + +EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) { + char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters)); + CatchDepth++; + return new (Buffer) EHFilterScope(NumFilters); +} + +void EHScopeStack::popFilter() { + assert(!empty() && "popping exception stack when not empty"); + + EHFilterScope &Filter = cast<EHFilterScope>(*begin()); + StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters()); + + if (empty()) NextEHDestIndex = FirstEHDestIndex; + + assert(CatchDepth > 0 && "mismatched filter push/pop"); + CatchDepth--; +} + +EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) { + char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers)); + CatchDepth++; + EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers); + for (unsigned I = 0; I != NumHandlers; ++I) + Scope->getHandlers()[I].Index = getNextEHDestIndex(); + return Scope; +} + +void EHScopeStack::pushTerminate() { + char *Buffer = allocate(EHTerminateScope::getSize()); + CatchDepth++; + new (Buffer) EHTerminateScope(getNextEHDestIndex()); +} + +/// Remove any 'null' fixups on the stack. However, we can't pop more +/// fixups than the fixup depth on the innermost normal cleanup, or +/// else fixups that we try to add to that cleanup will end up in the +/// wrong place. We *could* try to shrink fixup depths, but that's +/// actually a lot of work for little benefit. +void EHScopeStack::popNullFixups() { + // We expect this to only be called when there's still an innermost + // normal cleanup; otherwise there really shouldn't be any fixups. + assert(hasNormalCleanups()); + + EHScopeStack::iterator it = find(InnermostNormalCleanup); + unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); + assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); + + while (BranchFixups.size() > MinSize && + BranchFixups.back().Destination == 0) + BranchFixups.pop_back(); +} + +void CodeGenFunction::initFullExprCleanup() { + // Create a variable to decide whether the cleanup needs to be run. + llvm::AllocaInst *active + = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond"); + + // Initialize it to false at a site that's guaranteed to be run + // before each evaluation. + llvm::BasicBlock *block = OutermostConditional->getStartingBlock(); + new llvm::StoreInst(Builder.getFalse(), active, &block->back()); + + // Initialize it to true at the current location. + Builder.CreateStore(Builder.getTrue(), active); + + // Set that as the active flag in the cleanup. + EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); + assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?"); + cleanup.setActiveFlag(active); + + if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); + if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); +} + +EHScopeStack::Cleanup::~Cleanup() { + llvm_unreachable("Cleanup is indestructable"); +} + +/// All the branch fixups on the EH stack have propagated out past the +/// outermost normal cleanup; resolve them all by adding cases to the +/// given switch instruction. +static void ResolveAllBranchFixups(CodeGenFunction &CGF, + llvm::SwitchInst *Switch, + llvm::BasicBlock *CleanupEntry) { + llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; + + for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { + // Skip this fixup if its destination isn't set. + BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); + if (Fixup.Destination == 0) continue; + + // If there isn't an OptimisticBranchBlock, then InitialBranch is + // still pointing directly to its destination; forward it to the + // appropriate cleanup entry. This is required in the specific + // case of + // { std::string s; goto lbl; } + // lbl: + // i.e. where there's an unresolved fixup inside a single cleanup + // entry which we're currently popping. + if (Fixup.OptimisticBranchBlock == 0) { + new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex), + CGF.getNormalCleanupDestSlot(), + Fixup.InitialBranch); + Fixup.InitialBranch->setSuccessor(0, CleanupEntry); + } + + // Don't add this case to the switch statement twice. + if (!CasesAdded.insert(Fixup.Destination)) continue; + + Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), + Fixup.Destination); + } + + CGF.EHStack.clearFixups(); +} + +/// Transitions the terminator of the given exit-block of a cleanup to +/// be a cleanup switch. +static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, + llvm::BasicBlock *Block) { + // If it's a branch, turn it into a switch whose default + // destination is its original target. + llvm::TerminatorInst *Term = Block->getTerminator(); + assert(Term && "can't transition block without terminator"); + + if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { + assert(Br->isUnconditional()); + llvm::LoadInst *Load = + new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term); + llvm::SwitchInst *Switch = + llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); + Br->eraseFromParent(); + return Switch; + } else { + return cast<llvm::SwitchInst>(Term); + } +} + +void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { + assert(Block && "resolving a null target block"); + if (!EHStack.getNumBranchFixups()) return; + + assert(EHStack.hasNormalCleanups() && + "branch fixups exist with no normal cleanups on stack"); + + llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; + bool ResolvedAny = false; + + for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { + // Skip this fixup if its destination doesn't match. + BranchFixup &Fixup = EHStack.getBranchFixup(I); + if (Fixup.Destination != Block) continue; + + Fixup.Destination = 0; + ResolvedAny = true; + + // If it doesn't have an optimistic branch block, LatestBranch is + // already pointing to the right place. + llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; + if (!BranchBB) + continue; + + // Don't process the same optimistic branch block twice. + if (!ModifiedOptimisticBlocks.insert(BranchBB)) + continue; + + llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); + + // Add a case to the switch. + Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); + } + + if (ResolvedAny) + EHStack.popNullFixups(); +} + +/// Pops cleanup blocks until the given savepoint is reached. +void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { + assert(Old.isValid()); + + while (EHStack.stable_begin() != Old) { + EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); + + // As long as Old strictly encloses the scope's enclosing normal + // cleanup, we're going to emit another normal cleanup which + // fallthrough can propagate through. + bool FallThroughIsBranchThrough = + Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); + + PopCleanupBlock(FallThroughIsBranchThrough); + } +} + +static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, + EHCleanupScope &Scope) { + assert(Scope.isNormalCleanup()); + llvm::BasicBlock *Entry = Scope.getNormalBlock(); + if (!Entry) { + Entry = CGF.createBasicBlock("cleanup"); + Scope.setNormalBlock(Entry); + } + return Entry; +} + +static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF, + EHCleanupScope &Scope) { + assert(Scope.isEHCleanup()); + llvm::BasicBlock *Entry = Scope.getEHBlock(); + if (!Entry) { + Entry = CGF.createBasicBlock("eh.cleanup"); + Scope.setEHBlock(Entry); + } + return Entry; +} + +/// Attempts to reduce a cleanup's entry block to a fallthrough. This +/// is basically llvm::MergeBlockIntoPredecessor, except +/// simplified/optimized for the tighter constraints on cleanup blocks. +/// +/// Returns the new block, whatever it is. +static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, + llvm::BasicBlock *Entry) { + llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); + if (!Pred) return Entry; + + llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); + if (!Br || Br->isConditional()) return Entry; + assert(Br->getSuccessor(0) == Entry); + + // If we were previously inserting at the end of the cleanup entry + // block, we'll need to continue inserting at the end of the + // predecessor. + bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; + assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); + + // Kill the branch. + Br->eraseFromParent(); + + // Merge the blocks. + Pred->getInstList().splice(Pred->end(), Entry->getInstList()); + + // Replace all uses of the entry with the predecessor, in case there + // are phis in the cleanup. + Entry->replaceAllUsesWith(Pred); + + // Kill the entry block. + Entry->eraseFromParent(); + + if (WasInsertBlock) + CGF.Builder.SetInsertPoint(Pred); + + return Pred; +} + +static void EmitCleanup(CodeGenFunction &CGF, + EHScopeStack::Cleanup *Fn, + bool ForEH, + llvm::Value *ActiveFlag) { + // EH cleanups always occur within a terminate scope. + if (ForEH) CGF.EHStack.pushTerminate(); + + // If there's an active flag, load it and skip the cleanup if it's + // false. + llvm::BasicBlock *ContBB = 0; + if (ActiveFlag) { + ContBB = CGF.createBasicBlock("cleanup.done"); + llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); + llvm::Value *IsActive + = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); + CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); + CGF.EmitBlock(CleanupBB); + } + + // Ask the cleanup to emit itself. + Fn->Emit(CGF, ForEH); + assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); + + // Emit the continuation block if there was an active flag. + if (ActiveFlag) + CGF.EmitBlock(ContBB); + + // Leave the terminate scope. + if (ForEH) CGF.EHStack.popTerminate(); +} + +static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, + llvm::BasicBlock *From, + llvm::BasicBlock *To) { + // Exit is the exit block of a cleanup, so it always terminates in + // an unconditional branch or a switch. + llvm::TerminatorInst *Term = Exit->getTerminator(); + + if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { + assert(Br->isUnconditional() && Br->getSuccessor(0) == From); + Br->setSuccessor(0, To); + } else { + llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); + for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) + if (Switch->getSuccessor(I) == From) + Switch->setSuccessor(I, To); + } +} + +/// Pops a cleanup block. If the block includes a normal cleanup, the +/// current insertion point is threaded through the cleanup, as are +/// any branch fixups on the cleanup. +void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { + assert(!EHStack.empty() && "cleanup stack is empty!"); + assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); + EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); + assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); + + // Remember activation information. + bool IsActive = Scope.isActive(); + llvm::Value *NormalActiveFlag = + Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0; + llvm::Value *EHActiveFlag = + Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0; + + // Check whether we need an EH cleanup. This is only true if we've + // generated a lazy EH cleanup block. + bool RequiresEHCleanup = Scope.hasEHBranches(); + + // Check the three conditions which might require a normal cleanup: + + // - whether there are branch fix-ups through this cleanup + unsigned FixupDepth = Scope.getFixupDepth(); + bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; + + // - whether there are branch-throughs or branch-afters + bool HasExistingBranches = Scope.hasBranches(); + + // - whether there's a fallthrough + llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); + bool HasFallthrough = (FallthroughSource != 0 && IsActive); + + // Branch-through fall-throughs leave the insertion point set to the + // end of the last cleanup, which points to the current scope. The + // rest of IR gen doesn't need to worry about this; it only happens + // during the execution of PopCleanupBlocks(). + bool HasPrebranchedFallthrough = + (FallthroughSource && FallthroughSource->getTerminator()); + + // If this is a normal cleanup, then having a prebranched + // fallthrough implies that the fallthrough source unconditionally + // jumps here. + assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || + (Scope.getNormalBlock() && + FallthroughSource->getTerminator()->getSuccessor(0) + == Scope.getNormalBlock())); + + bool RequiresNormalCleanup = false; + if (Scope.isNormalCleanup() && + (HasFixups || HasExistingBranches || HasFallthrough)) { + RequiresNormalCleanup = true; + } + + // Even if we don't need the normal cleanup, we might still have + // prebranched fallthrough to worry about. + if (Scope.isNormalCleanup() && !RequiresNormalCleanup && + HasPrebranchedFallthrough) { + assert(!IsActive); + + llvm::BasicBlock *NormalEntry = Scope.getNormalBlock(); + + // If we're branching through this cleanup, just forward the + // prebranched fallthrough to the next cleanup, leaving the insert + // point in the old block. + if (FallthroughIsBranchThrough) { + EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); + llvm::BasicBlock *EnclosingEntry = + CreateNormalEntry(*this, cast<EHCleanupScope>(S)); + + ForwardPrebranchedFallthrough(FallthroughSource, + NormalEntry, EnclosingEntry); + assert(NormalEntry->use_empty() && + "uses of entry remain after forwarding?"); + delete NormalEntry; + + // Otherwise, we're branching out; just emit the next block. + } else { + EmitBlock(NormalEntry); + SimplifyCleanupEntry(*this, NormalEntry); + } + } + + // If we don't need the cleanup at all, we're done. + if (!RequiresNormalCleanup && !RequiresEHCleanup) { + EHStack.popCleanup(); // safe because there are no fixups + assert(EHStack.getNumBranchFixups() == 0 || + EHStack.hasNormalCleanups()); + return; + } + + // Copy the cleanup emission data out. Note that SmallVector + // guarantees maximal alignment for its buffer regardless of its + // type parameter. + llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer; + CleanupBuffer.reserve(Scope.getCleanupSize()); + memcpy(CleanupBuffer.data(), + Scope.getCleanupBuffer(), Scope.getCleanupSize()); + CleanupBuffer.set_size(Scope.getCleanupSize()); + EHScopeStack::Cleanup *Fn = + reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data()); + + // We want to emit the EH cleanup after the normal cleanup, but go + // ahead and do the setup for the EH cleanup while the scope is still + // alive. + llvm::BasicBlock *EHEntry = 0; + llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend; + if (RequiresEHCleanup) { + EHEntry = CreateEHEntry(*this, Scope); + + // Figure out the branch-through dest if necessary. + llvm::BasicBlock *EHBranchThroughDest = 0; + if (Scope.hasEHBranchThroughs()) { + assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end()); + EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup()); + EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S)); + } + + // If we have exactly one branch-after and no branch-throughs, we + // can dispatch it without a switch. + if (!Scope.hasEHBranchThroughs() && + Scope.getNumEHBranchAfters() == 1) { + assert(!EHBranchThroughDest); + + // TODO: remove the spurious eh.cleanup.dest stores if this edge + // never went through any switches. + llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0); + EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest)); + + // Otherwise, if we have any branch-afters, we need a switch. + } else if (Scope.getNumEHBranchAfters()) { + // The default of the switch belongs to the branch-throughs if + // they exist. + llvm::BasicBlock *Default = + (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock()); + + const unsigned SwitchCapacity = Scope.getNumEHBranchAfters(); + + llvm::LoadInst *Load = + new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest"); + llvm::SwitchInst *Switch = + llvm::SwitchInst::Create(Load, Default, SwitchCapacity); + + EHInstsToAppend.push_back(Load); + EHInstsToAppend.push_back(Switch); + + for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I) + Switch->addCase(Scope.getEHBranchAfterIndex(I), + Scope.getEHBranchAfterBlock(I)); + + // Otherwise, we have only branch-throughs; jump to the next EH + // cleanup. + } else { + assert(EHBranchThroughDest); + EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest)); + } + } + + if (!RequiresNormalCleanup) { + EHStack.popCleanup(); + } else { + // If we have a fallthrough and no other need for the cleanup, + // emit it directly. + if (HasFallthrough && !HasPrebranchedFallthrough && + !HasFixups && !HasExistingBranches) { + + // Fixups can cause us to optimistically create a normal block, + // only to later have no real uses for it. Just delete it in + // this case. + // TODO: we can potentially simplify all the uses after this. + if (Scope.getNormalBlock()) { + Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock()); + delete Scope.getNormalBlock(); + } + + EHStack.popCleanup(); + + EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag); + + // Otherwise, the best approach is to thread everything through + // the cleanup block and then try to clean up after ourselves. + } else { + // Force the entry block to exist. + llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); + + // I. Set up the fallthrough edge in. + + // If there's a fallthrough, we need to store the cleanup + // destination index. For fall-throughs this is always zero. + if (HasFallthrough) { + if (!HasPrebranchedFallthrough) + Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); + + // Otherwise, clear the IP if we don't have fallthrough because + // the cleanup is inactive. We don't need to save it because + // it's still just FallthroughSource. + } else if (FallthroughSource) { + assert(!IsActive && "source without fallthrough for active cleanup"); + Builder.ClearInsertionPoint(); + } + + // II. Emit the entry block. This implicitly branches to it if + // we have fallthrough. All the fixups and existing branches + // should already be branched to it. + EmitBlock(NormalEntry); + + // III. Figure out where we're going and build the cleanup + // epilogue. + + bool HasEnclosingCleanups = + (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); + + // Compute the branch-through dest if we need it: + // - if there are branch-throughs threaded through the scope + // - if fall-through is a branch-through + // - if there are fixups that will be optimistically forwarded + // to the enclosing cleanup + llvm::BasicBlock *BranchThroughDest = 0; + if (Scope.hasBranchThroughs() || + (FallthroughSource && FallthroughIsBranchThrough) || + (HasFixups && HasEnclosingCleanups)) { + assert(HasEnclosingCleanups); + EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); + BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); + } + + llvm::BasicBlock *FallthroughDest = 0; + llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend; + + // If there's exactly one branch-after and no other threads, + // we can route it without a switch. + if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && + Scope.getNumBranchAfters() == 1) { + assert(!BranchThroughDest || !IsActive); + + // TODO: clean up the possibly dead stores to the cleanup dest slot. + llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); + InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); + + // Build a switch-out if we need it: + // - if there are branch-afters threaded through the scope + // - if fall-through is a branch-after + // - if there are fixups that have nowhere left to go and + // so must be immediately resolved + } else if (Scope.getNumBranchAfters() || + (HasFallthrough && !FallthroughIsBranchThrough) || + (HasFixups && !HasEnclosingCleanups)) { + + llvm::BasicBlock *Default = + (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); + + // TODO: base this on the number of branch-afters and fixups + const unsigned SwitchCapacity = 10; + + llvm::LoadInst *Load = + new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest"); + llvm::SwitchInst *Switch = + llvm::SwitchInst::Create(Load, Default, SwitchCapacity); + + InstsToAppend.push_back(Load); + InstsToAppend.push_back(Switch); + + // Branch-after fallthrough. + if (FallthroughSource && !FallthroughIsBranchThrough) { + FallthroughDest = createBasicBlock("cleanup.cont"); + if (HasFallthrough) + Switch->addCase(Builder.getInt32(0), FallthroughDest); + } + + for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { + Switch->addCase(Scope.getBranchAfterIndex(I), + Scope.getBranchAfterBlock(I)); + } + + // If there aren't any enclosing cleanups, we can resolve all + // the fixups now. + if (HasFixups && !HasEnclosingCleanups) + ResolveAllBranchFixups(*this, Switch, NormalEntry); + } else { + // We should always have a branch-through destination in this case. + assert(BranchThroughDest); + InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); + } + + // IV. Pop the cleanup and emit it. + EHStack.popCleanup(); + assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); + + EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag); + + // Append the prepared cleanup prologue from above. + llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); + for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) + NormalExit->getInstList().push_back(InstsToAppend[I]); + + // Optimistically hope that any fixups will continue falling through. + for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); + I < E; ++I) { + BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); + if (!Fixup.Destination) continue; + if (!Fixup.OptimisticBranchBlock) { + new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex), + getNormalCleanupDestSlot(), + Fixup.InitialBranch); + Fixup.InitialBranch->setSuccessor(0, NormalEntry); + } + Fixup.OptimisticBranchBlock = NormalExit; + } + + // V. Set up the fallthrough edge out. + + // Case 1: a fallthrough source exists but shouldn't branch to + // the cleanup because the cleanup is inactive. + if (!HasFallthrough && FallthroughSource) { + assert(!IsActive); + + // If we have a prebranched fallthrough, that needs to be + // forwarded to the right block. + if (HasPrebranchedFallthrough) { + llvm::BasicBlock *Next; + if (FallthroughIsBranchThrough) { + Next = BranchThroughDest; + assert(!FallthroughDest); + } else { + Next = FallthroughDest; + } + + ForwardPrebranchedFallthrough(FallthroughSource, NormalEntry, Next); + } + Builder.SetInsertPoint(FallthroughSource); + + // Case 2: a fallthrough source exists and should branch to the + // cleanup, but we're not supposed to branch through to the next + // cleanup. + } else if (HasFallthrough && FallthroughDest) { + assert(!FallthroughIsBranchThrough); + EmitBlock(FallthroughDest); + + // Case 3: a fallthrough source exists and should branch to the + // cleanup and then through to the next. + } else if (HasFallthrough) { + // Everything is already set up for this. + + // Case 4: no fallthrough source exists. + } else { + Builder.ClearInsertionPoint(); + } + + // VI. Assorted cleaning. + + // Check whether we can merge NormalEntry into a single predecessor. + // This might invalidate (non-IR) pointers to NormalEntry. + llvm::BasicBlock *NewNormalEntry = + SimplifyCleanupEntry(*this, NormalEntry); + + // If it did invalidate those pointers, and NormalEntry was the same + // as NormalExit, go back and patch up the fixups. + if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) + for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); + I < E; ++I) + CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; + } + } + + assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); + + // Emit the EH cleanup if required. + if (RequiresEHCleanup) { + CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); + + EmitBlock(EHEntry); + EmitCleanup(*this, Fn, /*ForEH*/ true, EHActiveFlag); + + // Append the prepared cleanup prologue from above. + llvm::BasicBlock *EHExit = Builder.GetInsertBlock(); + for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I) + EHExit->getInstList().push_back(EHInstsToAppend[I]); + + Builder.restoreIP(SavedIP); + + SimplifyCleanupEntry(*this, EHEntry); + } +} + +/// Terminate the current block by emitting a branch which might leave +/// the current cleanup-protected scope. The target scope may not yet +/// be known, in which case this will require a fixup. +/// +/// As a side-effect, this method clears the insertion point. +void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { + assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup()) + && "stale jump destination"); + + if (!HaveInsertPoint()) + return; + + // Create the branch. + llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); + + // Calculate the innermost active normal cleanup. + EHScopeStack::stable_iterator + TopCleanup = EHStack.getInnermostActiveNormalCleanup(); + + // If we're not in an active normal cleanup scope, or if the + // destination scope is within the innermost active normal cleanup + // scope, we don't need to worry about fixups. + if (TopCleanup == EHStack.stable_end() || + TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid + Builder.ClearInsertionPoint(); + return; + } + + // If we can't resolve the destination cleanup scope, just add this + // to the current cleanup scope as a branch fixup. + if (!Dest.getScopeDepth().isValid()) { + BranchFixup &Fixup = EHStack.addBranchFixup(); + Fixup.Destination = Dest.getBlock(); + Fixup.DestinationIndex = Dest.getDestIndex(); + Fixup.InitialBranch = BI; + Fixup.OptimisticBranchBlock = 0; + + Builder.ClearInsertionPoint(); + return; + } + + // Otherwise, thread through all the normal cleanups in scope. + + // Store the index at the start. + llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); + new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI); + + // Adjust BI to point to the first cleanup block. + { + EHCleanupScope &Scope = + cast<EHCleanupScope>(*EHStack.find(TopCleanup)); + BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); + } + + // Add this destination to all the scopes involved. + EHScopeStack::stable_iterator I = TopCleanup; + EHScopeStack::stable_iterator E = Dest.getScopeDepth(); + if (E.strictlyEncloses(I)) { + while (true) { + EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); + assert(Scope.isNormalCleanup()); + I = Scope.getEnclosingNormalCleanup(); + + // If this is the last cleanup we're propagating through, tell it + // that there's a resolved jump moving through it. + if (!E.strictlyEncloses(I)) { + Scope.addBranchAfter(Index, Dest.getBlock()); + break; + } + + // Otherwise, tell the scope that there's a jump propoagating + // through it. If this isn't new information, all the rest of + // the work has been done before. + if (!Scope.addBranchThrough(Dest.getBlock())) + break; + } + } + + Builder.ClearInsertionPoint(); +} + +void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) { + // We should never get invalid scope depths for an UnwindDest; that + // implies that the destination wasn't set up correctly. + assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?"); + + if (!HaveInsertPoint()) + return; + + // Create the branch. + llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); + + // Calculate the innermost active cleanup. + EHScopeStack::stable_iterator + InnermostCleanup = EHStack.getInnermostActiveEHCleanup(); + + // If the destination is in the same EH cleanup scope as us, we + // don't need to thread through anything. + if (InnermostCleanup.encloses(Dest.getScopeDepth())) { + Builder.ClearInsertionPoint(); + return; + } + assert(InnermostCleanup != EHStack.stable_end()); + + // Store the index at the start. + llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); + new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI); + + // Adjust BI to point to the first cleanup block. + { + EHCleanupScope &Scope = + cast<EHCleanupScope>(*EHStack.find(InnermostCleanup)); + BI->setSuccessor(0, CreateEHEntry(*this, Scope)); + } + + // Add this destination to all the scopes involved. + for (EHScopeStack::stable_iterator + I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) { + assert(E.strictlyEncloses(I)); + EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); + assert(Scope.isEHCleanup()); + I = Scope.getEnclosingEHCleanup(); + + // If this is the last cleanup we're propagating through, add this + // as a branch-after. + if (I == E) { + Scope.addEHBranchAfter(Index, Dest.getBlock()); + break; + } + + // Otherwise, add it as a branch-through. If this isn't new + // information, all the rest of the work has been done before. + if (!Scope.addEHBranchThrough(Dest.getBlock())) + break; + } + + Builder.ClearInsertionPoint(); +} + +static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, + EHScopeStack::stable_iterator C) { + // If we needed a normal block for any reason, that counts. + if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) + return true; + + // Check whether any enclosed cleanups were needed. + for (EHScopeStack::stable_iterator + I = EHStack.getInnermostNormalCleanup(); + I != C; ) { + assert(C.strictlyEncloses(I)); + EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); + if (S.getNormalBlock()) return true; + I = S.getEnclosingNormalCleanup(); + } + + return false; +} + +static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, + EHScopeStack::stable_iterator C) { + // If we needed an EH block for any reason, that counts. + if (cast<EHCleanupScope>(*EHStack.find(C)).getEHBlock()) + return true; + + // Check whether any enclosed cleanups were needed. + for (EHScopeStack::stable_iterator + I = EHStack.getInnermostEHCleanup(); I != C; ) { + assert(C.strictlyEncloses(I)); + EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); + if (S.getEHBlock()) return true; + I = S.getEnclosingEHCleanup(); + } + + return false; +} + +enum ForActivation_t { + ForActivation, + ForDeactivation +}; + +/// The given cleanup block is changing activation state. Configure a +/// cleanup variable if necessary. +/// +/// It would be good if we had some way of determining if there were +/// extra uses *after* the change-over point. +static void SetupCleanupBlockActivation(CodeGenFunction &CGF, + EHScopeStack::stable_iterator C, + ForActivation_t Kind) { + EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); + + // We always need the flag if we're activating the cleanup, because + // we have to assume that the current location doesn't necessarily + // dominate all future uses of the cleanup. + bool NeedFlag = (Kind == ForActivation); + + // Calculate whether the cleanup was used: + + // - as a normal cleanup + if (Scope.isNormalCleanup() && IsUsedAsNormalCleanup(CGF.EHStack, C)) { + Scope.setTestFlagInNormalCleanup(); + NeedFlag = true; + } + + // - as an EH cleanup + if (Scope.isEHCleanup() && IsUsedAsEHCleanup(CGF.EHStack, C)) { + Scope.setTestFlagInEHCleanup(); + NeedFlag = true; + } + + // If it hasn't yet been used as either, we're done. + if (!NeedFlag) return; + + llvm::AllocaInst *Var = Scope.getActiveFlag(); + if (!Var) { + Var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive"); + Scope.setActiveFlag(Var); + + // Initialize to true or false depending on whether it was + // active up to this point. + CGF.InitTempAlloca(Var, CGF.Builder.getInt1(Kind == ForDeactivation)); + } + + CGF.Builder.CreateStore(CGF.Builder.getInt1(Kind == ForActivation), Var); +} + +/// Activate a cleanup that was created in an inactivated state. +void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C) { + assert(C != EHStack.stable_end() && "activating bottom of stack?"); + EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); + assert(!Scope.isActive() && "double activation"); + + SetupCleanupBlockActivation(*this, C, ForActivation); + + Scope.setActive(true); +} + +/// Deactive a cleanup that was created in an active state. +void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) { + assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); + EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); + assert(Scope.isActive() && "double deactivation"); + + // If it's the top of the stack, just pop it. + if (C == EHStack.stable_begin()) { + // If it's a normal cleanup, we need to pretend that the + // fallthrough is unreachable. + CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); + PopCleanupBlock(); + Builder.restoreIP(SavedIP); + return; + } + + // Otherwise, follow the general case. + SetupCleanupBlockActivation(*this, C, ForDeactivation); + + Scope.setActive(false); +} + +llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() { + if (!NormalCleanupDest) + NormalCleanupDest = + CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); + return NormalCleanupDest; +} + +llvm::Value *CodeGenFunction::getEHCleanupDestSlot() { + if (!EHCleanupDest) + EHCleanupDest = + CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot"); + return EHCleanupDest; +} diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h new file mode 100644 index 00000000000..c93ec5bb76a --- /dev/null +++ b/clang/lib/CodeGen/CGCleanup.h @@ -0,0 +1,560 @@ +//===-- CGCleanup.h - Classes for cleanups IR generation --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// These classes support the generation of LLVM IR for cleanups. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CGCLEANUP_H +#define CLANG_CODEGEN_CGCLEANUP_H + +/// EHScopeStack is defined in CodeGenFunction.h, but its +/// implementation is in this file and in CGCleanup.cpp. +#include "CodeGenFunction.h" + +namespace llvm { + class Value; + class BasicBlock; +} + +namespace clang { +namespace CodeGen { + +/// A protected scope for zero-cost EH handling. +class EHScope { + llvm::BasicBlock *CachedLandingPad; + + unsigned K : 2; + +protected: + enum { BitsRemaining = 30 }; + +public: + enum Kind { Cleanup, Catch, Terminate, Filter }; + + EHScope(Kind K) : CachedLandingPad(0), K(K) {} + + Kind getKind() const { return static_cast<Kind>(K); } + + llvm::BasicBlock *getCachedLandingPad() const { + return CachedLandingPad; + } + + void setCachedLandingPad(llvm::BasicBlock *Block) { + CachedLandingPad = Block; + } +}; + +/// A scope which attempts to handle some, possibly all, types of +/// exceptions. +/// +/// Objective C @finally blocks are represented using a cleanup scope +/// after the catch scope. +class EHCatchScope : public EHScope { + unsigned NumHandlers : BitsRemaining; + + // In effect, we have a flexible array member + // Handler Handlers[0]; + // But that's only standard in C99, not C++, so we have to do + // annoying pointer arithmetic instead. + +public: + struct Handler { + /// A type info value, or null (C++ null, not an LLVM null pointer) + /// for a catch-all. + llvm::Value *Type; + + /// The catch handler for this type. + llvm::BasicBlock *Block; + + /// The unwind destination index for this handler. + unsigned Index; + }; + +private: + friend class EHScopeStack; + + Handler *getHandlers() { + return reinterpret_cast<Handler*>(this+1); + } + + const Handler *getHandlers() const { + return reinterpret_cast<const Handler*>(this+1); + } + +public: + static size_t getSizeForNumHandlers(unsigned N) { + return sizeof(EHCatchScope) + N * sizeof(Handler); + } + + EHCatchScope(unsigned NumHandlers) + : EHScope(Catch), NumHandlers(NumHandlers) { + } + + unsigned getNumHandlers() const { + return NumHandlers; + } + + void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) { + setHandler(I, /*catchall*/ 0, Block); + } + + void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) { + assert(I < getNumHandlers()); + getHandlers()[I].Type = Type; + getHandlers()[I].Block = Block; + } + + const Handler &getHandler(unsigned I) const { + assert(I < getNumHandlers()); + return getHandlers()[I]; + } + + typedef const Handler *iterator; + iterator begin() const { return getHandlers(); } + iterator end() const { return getHandlers() + getNumHandlers(); } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Catch; + } +}; + +/// A cleanup scope which generates the cleanup blocks lazily. +class EHCleanupScope : public EHScope { + /// Whether this cleanup needs to be run along normal edges. + bool IsNormalCleanup : 1; + + /// Whether this cleanup needs to be run along exception edges. + bool IsEHCleanup : 1; + + /// Whether this cleanup is currently active. + bool IsActive : 1; + + /// Whether the normal cleanup should test the activation flag. + bool TestFlagInNormalCleanup : 1; + + /// Whether the EH cleanup should test the activation flag. + bool TestFlagInEHCleanup : 1; + + /// The amount of extra storage needed by the Cleanup. + /// Always a multiple of the scope-stack alignment. + unsigned CleanupSize : 12; + + /// The number of fixups required by enclosing scopes (not including + /// this one). If this is the top cleanup scope, all the fixups + /// from this index onwards belong to this scope. + unsigned FixupDepth : BitsRemaining - 17; // currently 13 + + /// The nearest normal cleanup scope enclosing this one. + EHScopeStack::stable_iterator EnclosingNormal; + + /// The nearest EH cleanup scope enclosing this one. + EHScopeStack::stable_iterator EnclosingEH; + + /// The dual entry/exit block along the normal edge. This is lazily + /// created if needed before the cleanup is popped. + llvm::BasicBlock *NormalBlock; + + /// The dual entry/exit block along the EH edge. This is lazily + /// created if needed before the cleanup is popped. + llvm::BasicBlock *EHBlock; + + /// An optional i1 variable indicating whether this cleanup has been + /// activated yet. + llvm::AllocaInst *ActiveFlag; + + /// Extra information required for cleanups that have resolved + /// branches through them. This has to be allocated on the side + /// because everything on the cleanup stack has be trivially + /// movable. + struct ExtInfo { + /// The destinations of normal branch-afters and branch-throughs. + llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches; + + /// Normal branch-afters. + llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4> + BranchAfters; + + /// The destinations of EH branch-afters and branch-throughs. + /// TODO: optimize for the extremely common case of a single + /// branch-through. + llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches; + + /// EH branch-afters. + llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4> + EHBranchAfters; + }; + mutable struct ExtInfo *ExtInfo; + + struct ExtInfo &getExtInfo() { + if (!ExtInfo) ExtInfo = new struct ExtInfo(); + return *ExtInfo; + } + + const struct ExtInfo &getExtInfo() const { + if (!ExtInfo) ExtInfo = new struct ExtInfo(); + return *ExtInfo; + } + +public: + /// Gets the size required for a lazy cleanup scope with the given + /// cleanup-data requirements. + static size_t getSizeForCleanupSize(size_t Size) { + return sizeof(EHCleanupScope) + Size; + } + + size_t getAllocatedSize() const { + return sizeof(EHCleanupScope) + CleanupSize; + } + + EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive, + unsigned CleanupSize, unsigned FixupDepth, + EHScopeStack::stable_iterator EnclosingNormal, + EHScopeStack::stable_iterator EnclosingEH) + : EHScope(EHScope::Cleanup), + IsNormalCleanup(IsNormal), IsEHCleanup(IsEH), IsActive(IsActive), + TestFlagInNormalCleanup(false), TestFlagInEHCleanup(false), + CleanupSize(CleanupSize), FixupDepth(FixupDepth), + EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH), + NormalBlock(0), EHBlock(0), ActiveFlag(0), ExtInfo(0) + { + assert(this->CleanupSize == CleanupSize && "cleanup size overflow"); + } + + ~EHCleanupScope() { + delete ExtInfo; + } + + bool isNormalCleanup() const { return IsNormalCleanup; } + llvm::BasicBlock *getNormalBlock() const { return NormalBlock; } + void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; } + + bool isEHCleanup() const { return IsEHCleanup; } + llvm::BasicBlock *getEHBlock() const { return EHBlock; } + void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; } + + bool isActive() const { return IsActive; } + void setActive(bool A) { IsActive = A; } + + llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; } + void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; } + + void setTestFlagInNormalCleanup() { TestFlagInNormalCleanup = true; } + bool shouldTestFlagInNormalCleanup() const { return TestFlagInNormalCleanup; } + + void setTestFlagInEHCleanup() { TestFlagInEHCleanup = true; } + bool shouldTestFlagInEHCleanup() const { return TestFlagInEHCleanup; } + + unsigned getFixupDepth() const { return FixupDepth; } + EHScopeStack::stable_iterator getEnclosingNormalCleanup() const { + return EnclosingNormal; + } + EHScopeStack::stable_iterator getEnclosingEHCleanup() const { + return EnclosingEH; + } + + size_t getCleanupSize() const { return CleanupSize; } + void *getCleanupBuffer() { return this + 1; } + + EHScopeStack::Cleanup *getCleanup() { + return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer()); + } + + /// True if this cleanup scope has any branch-afters or branch-throughs. + bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); } + + /// Add a branch-after to this cleanup scope. A branch-after is a + /// branch from a point protected by this (normal) cleanup to a + /// point in the normal cleanup scope immediately containing it. + /// For example, + /// for (;;) { A a; break; } + /// contains a branch-after. + /// + /// Branch-afters each have their own destination out of the + /// cleanup, guaranteed distinct from anything else threaded through + /// it. Therefore branch-afters usually force a switch after the + /// cleanup. + void addBranchAfter(llvm::ConstantInt *Index, + llvm::BasicBlock *Block) { + struct ExtInfo &ExtInfo = getExtInfo(); + if (ExtInfo.Branches.insert(Block)) + ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index)); + } + + /// Return the number of unique branch-afters on this scope. + unsigned getNumBranchAfters() const { + return ExtInfo ? ExtInfo->BranchAfters.size() : 0; + } + + llvm::BasicBlock *getBranchAfterBlock(unsigned I) const { + assert(I < getNumBranchAfters()); + return ExtInfo->BranchAfters[I].first; + } + + llvm::ConstantInt *getBranchAfterIndex(unsigned I) const { + assert(I < getNumBranchAfters()); + return ExtInfo->BranchAfters[I].second; + } + + /// Add a branch-through to this cleanup scope. A branch-through is + /// a branch from a scope protected by this (normal) cleanup to an + /// enclosing scope other than the immediately-enclosing normal + /// cleanup scope. + /// + /// In the following example, the branch through B's scope is a + /// branch-through, while the branch through A's scope is a + /// branch-after: + /// for (;;) { A a; B b; break; } + /// + /// All branch-throughs have a common destination out of the + /// cleanup, one possibly shared with the fall-through. Therefore + /// branch-throughs usually don't force a switch after the cleanup. + /// + /// \return true if the branch-through was new to this scope + bool addBranchThrough(llvm::BasicBlock *Block) { + return getExtInfo().Branches.insert(Block); + } + + /// Determines if this cleanup scope has any branch throughs. + bool hasBranchThroughs() const { + if (!ExtInfo) return false; + return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size()); + } + + // Same stuff, only for EH branches instead of normal branches. + // It's quite possible that we could find a better representation + // for this. + + bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); } + void addEHBranchAfter(llvm::ConstantInt *Index, + llvm::BasicBlock *Block) { + struct ExtInfo &ExtInfo = getExtInfo(); + if (ExtInfo.EHBranches.insert(Block)) + ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index)); + } + + unsigned getNumEHBranchAfters() const { + return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0; + } + + llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const { + assert(I < getNumEHBranchAfters()); + return ExtInfo->EHBranchAfters[I].first; + } + + llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const { + assert(I < getNumEHBranchAfters()); + return ExtInfo->EHBranchAfters[I].second; + } + + bool addEHBranchThrough(llvm::BasicBlock *Block) { + return getExtInfo().EHBranches.insert(Block); + } + + bool hasEHBranchThroughs() const { + if (!ExtInfo) return false; + return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size()); + } + + static bool classof(const EHScope *Scope) { + return (Scope->getKind() == Cleanup); + } +}; + +/// An exceptions scope which filters exceptions thrown through it. +/// Only exceptions matching the filter types will be permitted to be +/// thrown. +/// +/// This is used to implement C++ exception specifications. +class EHFilterScope : public EHScope { + unsigned NumFilters : BitsRemaining; + + // Essentially ends in a flexible array member: + // llvm::Value *FilterTypes[0]; + + llvm::Value **getFilters() { + return reinterpret_cast<llvm::Value**>(this+1); + } + + llvm::Value * const *getFilters() const { + return reinterpret_cast<llvm::Value* const *>(this+1); + } + +public: + EHFilterScope(unsigned NumFilters) : + EHScope(Filter), NumFilters(NumFilters) {} + + static size_t getSizeForNumFilters(unsigned NumFilters) { + return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*); + } + + unsigned getNumFilters() const { return NumFilters; } + + void setFilter(unsigned I, llvm::Value *FilterValue) { + assert(I < getNumFilters()); + getFilters()[I] = FilterValue; + } + + llvm::Value *getFilter(unsigned I) const { + assert(I < getNumFilters()); + return getFilters()[I]; + } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Filter; + } +}; + +/// An exceptions scope which calls std::terminate if any exception +/// reaches it. +class EHTerminateScope : public EHScope { + unsigned DestIndex : BitsRemaining; +public: + EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {} + static size_t getSize() { return sizeof(EHTerminateScope); } + + unsigned getDestIndex() const { return DestIndex; } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Terminate; + } +}; + +/// A non-stable pointer into the scope stack. +class EHScopeStack::iterator { + char *Ptr; + + friend class EHScopeStack; + explicit iterator(char *Ptr) : Ptr(Ptr) {} + +public: + iterator() : Ptr(0) {} + + EHScope *get() const { + return reinterpret_cast<EHScope*>(Ptr); + } + + EHScope *operator->() const { return get(); } + EHScope &operator*() const { return *get(); } + + iterator &operator++() { + switch (get()->getKind()) { + case EHScope::Catch: + Ptr += EHCatchScope::getSizeForNumHandlers( + static_cast<const EHCatchScope*>(get())->getNumHandlers()); + break; + + case EHScope::Filter: + Ptr += EHFilterScope::getSizeForNumFilters( + static_cast<const EHFilterScope*>(get())->getNumFilters()); + break; + + case EHScope::Cleanup: + Ptr += static_cast<const EHCleanupScope*>(get()) + ->getAllocatedSize(); + break; + + case EHScope::Terminate: + Ptr += EHTerminateScope::getSize(); + break; + } + + return *this; + } + + iterator next() { + iterator copy = *this; + ++copy; + return copy; + } + + iterator operator++(int) { + iterator copy = *this; + operator++(); + return copy; + } + + bool encloses(iterator other) const { return Ptr >= other.Ptr; } + bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; } + + bool operator==(iterator other) const { return Ptr == other.Ptr; } + bool operator!=(iterator other) const { return Ptr != other.Ptr; } +}; + +inline EHScopeStack::iterator EHScopeStack::begin() const { + return iterator(StartOfData); +} + +inline EHScopeStack::iterator EHScopeStack::end() const { + return iterator(EndOfBuffer); +} + +inline void EHScopeStack::popCatch() { + assert(!empty() && "popping exception stack when not empty"); + + assert(isa<EHCatchScope>(*begin())); + StartOfData += EHCatchScope::getSizeForNumHandlers( + cast<EHCatchScope>(*begin()).getNumHandlers()); + + if (empty()) NextEHDestIndex = FirstEHDestIndex; + + assert(CatchDepth > 0 && "mismatched catch/terminate push/pop"); + CatchDepth--; +} + +inline void EHScopeStack::popTerminate() { + assert(!empty() && "popping exception stack when not empty"); + + assert(isa<EHTerminateScope>(*begin())); + StartOfData += EHTerminateScope::getSize(); + + if (empty()) NextEHDestIndex = FirstEHDestIndex; + + assert(CatchDepth > 0 && "mismatched catch/terminate push/pop"); + CatchDepth--; +} + +inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const { + assert(sp.isValid() && "finding invalid savepoint"); + assert(sp.Size <= stable_begin().Size && "finding savepoint after pop"); + return iterator(EndOfBuffer - sp.Size); +} + +inline EHScopeStack::stable_iterator +EHScopeStack::stabilize(iterator ir) const { + assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer); + return stable_iterator(EndOfBuffer - ir.Ptr); +} + +inline EHScopeStack::stable_iterator +EHScopeStack::getInnermostActiveNormalCleanup() const { + for (EHScopeStack::stable_iterator + I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) { + EHCleanupScope &S = cast<EHCleanupScope>(*find(I)); + if (S.isActive()) return I; + I = S.getEnclosingNormalCleanup(); + } + return stable_end(); +} + +inline EHScopeStack::stable_iterator +EHScopeStack::getInnermostActiveEHCleanup() const { + for (EHScopeStack::stable_iterator + I = getInnermostEHCleanup(), E = stable_end(); I != E; ) { + EHCleanupScope &S = cast<EHCleanupScope>(*find(I)); + if (S.isActive()) return I; + I = S.getEnclosingEHCleanup(); + } + return stable_end(); +} + +} +} + +#endif diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp index edfee95efcc..fe51ce67e4a 100644 --- a/clang/lib/CodeGen/CGException.cpp +++ b/clang/lib/CodeGen/CGException.cpp @@ -20,180 +20,12 @@ #include "CGObjCRuntime.h" #include "CodeGenFunction.h" #include "CGException.h" +#include "CGCleanup.h" #include "TargetInfo.h" using namespace clang; using namespace CodeGen; -/// Push an entry of the given size onto this protected-scope stack. -char *EHScopeStack::allocate(size_t Size) { - if (!StartOfBuffer) { - unsigned Capacity = 1024; - while (Capacity < Size) Capacity *= 2; - StartOfBuffer = new char[Capacity]; - StartOfData = EndOfBuffer = StartOfBuffer + Capacity; - } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { - unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; - unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); - - unsigned NewCapacity = CurrentCapacity; - do { - NewCapacity *= 2; - } while (NewCapacity < UsedCapacity + Size); - - char *NewStartOfBuffer = new char[NewCapacity]; - char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; - char *NewStartOfData = NewEndOfBuffer - UsedCapacity; - memcpy(NewStartOfData, StartOfData, UsedCapacity); - delete [] StartOfBuffer; - StartOfBuffer = NewStartOfBuffer; - EndOfBuffer = NewEndOfBuffer; - StartOfData = NewStartOfData; - } - - assert(StartOfBuffer + Size <= StartOfData); - StartOfData -= Size; - return StartOfData; -} - -EHScopeStack::stable_iterator -EHScopeStack::getEnclosingEHCleanup(iterator it) const { - assert(it != end()); - do { - if (isa<EHCleanupScope>(*it)) { - if (cast<EHCleanupScope>(*it).isEHCleanup()) - return stabilize(it); - return cast<EHCleanupScope>(*it).getEnclosingEHCleanup(); - } - ++it; - } while (it != end()); - return stable_end(); -} - - -void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { - assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned"); - char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); - bool IsNormalCleanup = Kind & NormalCleanup; - bool IsEHCleanup = Kind & EHCleanup; - bool IsActive = !(Kind & InactiveCleanup); - EHCleanupScope *Scope = - new (Buffer) EHCleanupScope(IsNormalCleanup, - IsEHCleanup, - IsActive, - Size, - BranchFixups.size(), - InnermostNormalCleanup, - InnermostEHCleanup); - if (IsNormalCleanup) - InnermostNormalCleanup = stable_begin(); - if (IsEHCleanup) - InnermostEHCleanup = stable_begin(); - - return Scope->getCleanupBuffer(); -} - -void EHScopeStack::popCleanup() { - assert(!empty() && "popping exception stack when not empty"); - - assert(isa<EHCleanupScope>(*begin())); - EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); - InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); - InnermostEHCleanup = Cleanup.getEnclosingEHCleanup(); - StartOfData += Cleanup.getAllocatedSize(); - - if (empty()) NextEHDestIndex = FirstEHDestIndex; - - // Destroy the cleanup. - Cleanup.~EHCleanupScope(); - - // Check whether we can shrink the branch-fixups stack. - if (!BranchFixups.empty()) { - // If we no longer have any normal cleanups, all the fixups are - // complete. - if (!hasNormalCleanups()) - BranchFixups.clear(); - - // Otherwise we can still trim out unnecessary nulls. - else - popNullFixups(); - } -} - -EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) { - char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters)); - CatchDepth++; - return new (Buffer) EHFilterScope(NumFilters); -} - -void EHScopeStack::popFilter() { - assert(!empty() && "popping exception stack when not empty"); - - EHFilterScope &Filter = cast<EHFilterScope>(*begin()); - StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters()); - - if (empty()) NextEHDestIndex = FirstEHDestIndex; - - assert(CatchDepth > 0 && "mismatched filter push/pop"); - CatchDepth--; -} - -EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) { - char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers)); - CatchDepth++; - EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers); - for (unsigned I = 0; I != NumHandlers; ++I) - Scope->getHandlers()[I].Index = getNextEHDestIndex(); - return Scope; -} - -void EHScopeStack::pushTerminate() { - char *Buffer = allocate(EHTerminateScope::getSize()); - CatchDepth++; - new (Buffer) EHTerminateScope(getNextEHDestIndex()); -} - -/// Remove any 'null' fixups on the stack. However, we can't pop more -/// fixups than the fixup depth on the innermost normal cleanup, or -/// else fixups that we try to add to that cleanup will end up in the -/// wrong place. We *could* try to shrink fixup depths, but that's -/// actually a lot of work for little benefit. -void EHScopeStack::popNullFixups() { - // We expect this to only be called when there's still an innermost - // normal cleanup; otherwise there really shouldn't be any fixups. - assert(hasNormalCleanups()); - - EHScopeStack::iterator it = find(InnermostNormalCleanup); - unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); - assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); - - while (BranchFixups.size() > MinSize && - BranchFixups.back().Destination == 0) - BranchFixups.pop_back(); -} - -void CodeGenFunction::initFullExprCleanup() { - // Create a variable to decide whether the cleanup needs to be run. - llvm::AllocaInst *active - = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond"); - - // Initialize it to false at a site that's guaranteed to be run - // before each evaluation. - llvm::BasicBlock *block = OutermostConditional->getStartingBlock(); - new llvm::StoreInst(Builder.getFalse(), active, &block->back()); - - // Initialize it to true at the current location. - Builder.CreateStore(Builder.getTrue(), active); - - // Set that as the active flag in the cleanup. - EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); - assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?"); - cleanup.setActiveFlag(active); - - if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); - if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); -} - static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) { // void *__cxa_allocate_exception(size_t thrown_size); const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); @@ -1624,6 +1456,3 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() { return RethrowBlock; } -EHScopeStack::Cleanup::~Cleanup() { - llvm_unreachable("Cleanup is indestructable"); -} diff --git a/clang/lib/CodeGen/CGException.h b/clang/lib/CodeGen/CGException.h index 5e6f1511aec..1f9b8964dca 100644 --- a/clang/lib/CodeGen/CGException.h +++ b/clang/lib/CodeGen/CGException.h @@ -15,16 +15,11 @@ #ifndef CLANG_CODEGEN_CGEXCEPTION_H #define CLANG_CODEGEN_CGEXCEPTION_H -/// EHScopeStack is defined in CodeGenFunction.h, but its -/// implementation is in this file and in CGException.cpp. -#include "CodeGenFunction.h" - -namespace llvm { - class Value; - class BasicBlock; -} +#include "llvm/ADT/StringRef.h" namespace clang { +class LangOptions; + namespace CodeGen { /// The exceptions personality for a function. When @@ -54,534 +49,6 @@ public: llvm::StringRef getCatchallRethrowFnName() const { return CatchallRethrowFn; } }; -/// A protected scope for zero-cost EH handling. -class EHScope { - llvm::BasicBlock *CachedLandingPad; - - unsigned K : 2; - -protected: - enum { BitsRemaining = 30 }; - -public: - enum Kind { Cleanup, Catch, Terminate, Filter }; - - EHScope(Kind K) : CachedLandingPad(0), K(K) {} - - Kind getKind() const { return static_cast<Kind>(K); } - - llvm::BasicBlock *getCachedLandingPad() const { - return CachedLandingPad; - } - - void setCachedLandingPad(llvm::BasicBlock *Block) { - CachedLandingPad = Block; - } -}; - -/// A scope which attempts to handle some, possibly all, types of -/// exceptions. -/// -/// Objective C @finally blocks are represented using a cleanup scope -/// after the catch scope. -class EHCatchScope : public EHScope { - unsigned NumHandlers : BitsRemaining; - - // In effect, we have a flexible array member - // Handler Handlers[0]; - // But that's only standard in C99, not C++, so we have to do - // annoying pointer arithmetic instead. - -public: - struct Handler { - /// A type info value, or null (C++ null, not an LLVM null pointer) - /// for a catch-all. - llvm::Value *Type; - - /// The catch handler for this type. - llvm::BasicBlock *Block; - - /// The unwind destination index for this handler. - unsigned Index; - }; - -private: - friend class EHScopeStack; - - Handler *getHandlers() { - return reinterpret_cast<Handler*>(this+1); - } - - const Handler *getHandlers() const { - return reinterpret_cast<const Handler*>(this+1); - } - -public: - static size_t getSizeForNumHandlers(unsigned N) { - return sizeof(EHCatchScope) + N * sizeof(Handler); - } - - EHCatchScope(unsigned NumHandlers) - : EHScope(Catch), NumHandlers(NumHandlers) { - } - - unsigned getNumHandlers() const { - return NumHandlers; - } - - void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) { - setHandler(I, /*catchall*/ 0, Block); - } - - void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) { - assert(I < getNumHandlers()); - getHandlers()[I].Type = Type; - getHandlers()[I].Block = Block; - } - - const Handler &getHandler(unsigned I) const { - assert(I < getNumHandlers()); - return getHandlers()[I]; - } - - typedef const Handler *iterator; - iterator begin() const { return getHandlers(); } - iterator end() const { return getHandlers() + getNumHandlers(); } - - static bool classof(const EHScope *Scope) { - return Scope->getKind() == Catch; - } -}; - -/// A cleanup scope which generates the cleanup blocks lazily. -class EHCleanupScope : public EHScope { - /// Whether this cleanup needs to be run along normal edges. - bool IsNormalCleanup : 1; - - /// Whether this cleanup needs to be run along exception edges. - bool IsEHCleanup : 1; - - /// Whether this cleanup is currently active. - bool IsActive : 1; - - /// Whether the normal cleanup should test the activation flag. - bool TestFlagInNormalCleanup : 1; - - /// Whether the EH cleanup should test the activation flag. - bool TestFlagInEHCleanup : 1; - - /// The amount of extra storage needed by the Cleanup. - /// Always a multiple of the scope-stack alignment. - unsigned CleanupSize : 12; - - /// The number of fixups required by enclosing scopes (not including - /// this one). If this is the top cleanup scope, all the fixups - /// from this index onwards belong to this scope. - unsigned FixupDepth : BitsRemaining - 17; // currently 13 - - /// The nearest normal cleanup scope enclosing this one. - EHScopeStack::stable_iterator EnclosingNormal; - - /// The nearest EH cleanup scope enclosing this one. - EHScopeStack::stable_iterator EnclosingEH; - - /// The dual entry/exit block along the normal edge. This is lazily - /// created if needed before the cleanup is popped. - llvm::BasicBlock *NormalBlock; - - /// The dual entry/exit block along the EH edge. This is lazily - /// created if needed before the cleanup is popped. - llvm::BasicBlock *EHBlock; - - /// An optional i1 variable indicating whether this cleanup has been - /// activated yet. - llvm::AllocaInst *ActiveFlag; - - /// Extra information required for cleanups that have resolved - /// branches through them. This has to be allocated on the side - /// because everything on the cleanup stack has be trivially - /// movable. - struct ExtInfo { - /// The destinations of normal branch-afters and branch-throughs. - llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches; - - /// Normal branch-afters. - llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4> - BranchAfters; - - /// The destinations of EH branch-afters and branch-throughs. - /// TODO: optimize for the extremely common case of a single - /// branch-through. - llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches; - - /// EH branch-afters. - llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4> - EHBranchAfters; - }; - mutable struct ExtInfo *ExtInfo; - - struct ExtInfo &getExtInfo() { - if (!ExtInfo) ExtInfo = new struct ExtInfo(); - return *ExtInfo; - } - - const struct ExtInfo &getExtInfo() const { - if (!ExtInfo) ExtInfo = new struct ExtInfo(); - return *ExtInfo; - } - -public: - /// Gets the size required for a lazy cleanup scope with the given - /// cleanup-data requirements. - static size_t getSizeForCleanupSize(size_t Size) { - return sizeof(EHCleanupScope) + Size; - } - - size_t getAllocatedSize() const { - return sizeof(EHCleanupScope) + CleanupSize; - } - - EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive, - unsigned CleanupSize, unsigned FixupDepth, - EHScopeStack::stable_iterator EnclosingNormal, - EHScopeStack::stable_iterator EnclosingEH) - : EHScope(EHScope::Cleanup), - IsNormalCleanup(IsNormal), IsEHCleanup(IsEH), IsActive(IsActive), - TestFlagInNormalCleanup(false), TestFlagInEHCleanup(false), - CleanupSize(CleanupSize), FixupDepth(FixupDepth), - EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH), - NormalBlock(0), EHBlock(0), ActiveFlag(0), ExtInfo(0) - { - assert(this->CleanupSize == CleanupSize && "cleanup size overflow"); - } - - ~EHCleanupScope() { - delete ExtInfo; - } - - bool isNormalCleanup() const { return IsNormalCleanup; } - llvm::BasicBlock *getNormalBlock() const { return NormalBlock; } - void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; } - - bool isEHCleanup() const { return IsEHCleanup; } - llvm::BasicBlock *getEHBlock() const { return EHBlock; } - void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; } - - bool isActive() const { return IsActive; } - void setActive(bool A) { IsActive = A; } - - llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; } - void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; } - - void setTestFlagInNormalCleanup() { TestFlagInNormalCleanup = true; } - bool shouldTestFlagInNormalCleanup() const { return TestFlagInNormalCleanup; } - - void setTestFlagInEHCleanup() { TestFlagInEHCleanup = true; } - bool shouldTestFlagInEHCleanup() const { return TestFlagInEHCleanup; } - - unsigned getFixupDepth() const { return FixupDepth; } - EHScopeStack::stable_iterator getEnclosingNormalCleanup() const { - return EnclosingNormal; - } - EHScopeStack::stable_iterator getEnclosingEHCleanup() const { - return EnclosingEH; - } - - size_t getCleanupSize() const { return CleanupSize; } - void *getCleanupBuffer() { return this + 1; } - - EHScopeStack::Cleanup *getCleanup() { - return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer()); - } - - /// True if this cleanup scope has any branch-afters or branch-throughs. - bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); } - - /// Add a branch-after to this cleanup scope. A branch-after is a - /// branch from a point protected by this (normal) cleanup to a - /// point in the normal cleanup scope immediately containing it. - /// For example, - /// for (;;) { A a; break; } - /// contains a branch-after. - /// - /// Branch-afters each have their own destination out of the - /// cleanup, guaranteed distinct from anything else threaded through - /// it. Therefore branch-afters usually force a switch after the - /// cleanup. - void addBranchAfter(llvm::ConstantInt *Index, - llvm::BasicBlock *Block) { - struct ExtInfo &ExtInfo = getExtInfo(); - if (ExtInfo.Branches.insert(Block)) - ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index)); - } - - /// Return the number of unique branch-afters on this scope. - unsigned getNumBranchAfters() const { - return ExtInfo ? ExtInfo->BranchAfters.size() : 0; - } - - llvm::BasicBlock *getBranchAfterBlock(unsigned I) const { - assert(I < getNumBranchAfters()); - return ExtInfo->BranchAfters[I].first; - } - - llvm::ConstantInt *getBranchAfterIndex(unsigned I) const { - assert(I < getNumBranchAfters()); - return ExtInfo->BranchAfters[I].second; - } - - /// Add a branch-through to this cleanup scope. A branch-through is - /// a branch from a scope protected by this (normal) cleanup to an - /// enclosing scope other than the immediately-enclosing normal - /// cleanup scope. - /// - /// In the following example, the branch through B's scope is a - /// branch-through, while the branch through A's scope is a - /// branch-after: - /// for (;;) { A a; B b; break; } - /// - /// All branch-throughs have a common destination out of the - /// cleanup, one possibly shared with the fall-through. Therefore - /// branch-throughs usually don't force a switch after the cleanup. - /// - /// \return true if the branch-through was new to this scope - bool addBranchThrough(llvm::BasicBlock *Block) { - return getExtInfo().Branches.insert(Block); - } - - /// Determines if this cleanup scope has any branch throughs. - bool hasBranchThroughs() const { - if (!ExtInfo) return false; - return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size()); - } - - // Same stuff, only for EH branches instead of normal branches. - // It's quite possible that we could find a better representation - // for this. - - bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); } - void addEHBranchAfter(llvm::ConstantInt *Index, - llvm::BasicBlock *Block) { - struct ExtInfo &ExtInfo = getExtInfo(); - if (ExtInfo.EHBranches.insert(Block)) - ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index)); - } - - unsigned getNumEHBranchAfters() const { - return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0; - } - - llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const { - assert(I < getNumEHBranchAfters()); - return ExtInfo->EHBranchAfters[I].first; - } - - llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const { - assert(I < getNumEHBranchAfters()); - return ExtInfo->EHBranchAfters[I].second; - } - - bool addEHBranchThrough(llvm::BasicBlock *Block) { - return getExtInfo().EHBranches.insert(Block); - } - - bool hasEHBranchThroughs() const { - if (!ExtInfo) return false; - return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size()); - } - - static bool classof(const EHScope *Scope) { - return (Scope->getKind() == Cleanup); - } -}; - -/// An exceptions scope which filters exceptions thrown through it. -/// Only exceptions matching the filter types will be permitted to be -/// thrown. -/// -/// This is used to implement C++ exception specifications. -class EHFilterScope : public EHScope { - unsigned NumFilters : BitsRemaining; - - // Essentially ends in a flexible array member: - // llvm::Value *FilterTypes[0]; - - llvm::Value **getFilters() { - return reinterpret_cast<llvm::Value**>(this+1); - } - - llvm::Value * const *getFilters() const { - return reinterpret_cast<llvm::Value* const *>(this+1); - } - -public: - EHFilterScope(unsigned NumFilters) : - EHScope(Filter), NumFilters(NumFilters) {} - - static size_t getSizeForNumFilters(unsigned NumFilters) { - return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*); - } - - unsigned getNumFilters() const { return NumFilters; } - - void setFilter(unsigned I, llvm::Value *FilterValue) { - assert(I < getNumFilters()); - getFilters()[I] = FilterValue; - } - - llvm::Value *getFilter(unsigned I) const { - assert(I < getNumFilters()); - return getFilters()[I]; - } - - static bool classof(const EHScope *Scope) { - return Scope->getKind() == Filter; - } -}; - -/// An exceptions scope which calls std::terminate if any exception -/// reaches it. -class EHTerminateScope : public EHScope { - unsigned DestIndex : BitsRemaining; -public: - EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {} - static size_t getSize() { return sizeof(EHTerminateScope); } - - unsigned getDestIndex() const { return DestIndex; } - - static bool classof(const EHScope *Scope) { - return Scope->getKind() == Terminate; - } -}; - -/// A non-stable pointer into the scope stack. -class EHScopeStack::iterator { - char *Ptr; - - friend class EHScopeStack; - explicit iterator(char *Ptr) : Ptr(Ptr) {} - -public: - iterator() : Ptr(0) {} - - EHScope *get() const { - return reinterpret_cast<EHScope*>(Ptr); - } - - EHScope *operator->() const { return get(); } - EHScope &operator*() const { return *get(); } - - iterator &operator++() { - switch (get()->getKind()) { - case EHScope::Catch: - Ptr += EHCatchScope::getSizeForNumHandlers( - static_cast<const EHCatchScope*>(get())->getNumHandlers()); - break; - - case EHScope::Filter: - Ptr += EHFilterScope::getSizeForNumFilters( - static_cast<const EHFilterScope*>(get())->getNumFilters()); - break; - - case EHScope::Cleanup: - Ptr += static_cast<const EHCleanupScope*>(get()) - ->getAllocatedSize(); - break; - - case EHScope::Terminate: - Ptr += EHTerminateScope::getSize(); - break; - } - - return *this; - } - - iterator next() { - iterator copy = *this; - ++copy; - return copy; - } - - iterator operator++(int) { - iterator copy = *this; - operator++(); - return copy; - } - - bool encloses(iterator other) const { return Ptr >= other.Ptr; } - bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; } - - bool operator==(iterator other) const { return Ptr == other.Ptr; } - bool operator!=(iterator other) const { return Ptr != other.Ptr; } -}; - -inline EHScopeStack::iterator EHScopeStack::begin() const { - return iterator(StartOfData); -} - -inline EHScopeStack::iterator EHScopeStack::end() const { - return iterator(EndOfBuffer); -} - -inline void EHScopeStack::popCatch() { - assert(!empty() && "popping exception stack when not empty"); - - assert(isa<EHCatchScope>(*begin())); - StartOfData += EHCatchScope::getSizeForNumHandlers( - cast<EHCatchScope>(*begin()).getNumHandlers()); - - if (empty()) NextEHDestIndex = FirstEHDestIndex; - - assert(CatchDepth > 0 && "mismatched catch/terminate push/pop"); - CatchDepth--; -} - -inline void EHScopeStack::popTerminate() { - assert(!empty() && "popping exception stack when not empty"); - - assert(isa<EHTerminateScope>(*begin())); - StartOfData += EHTerminateScope::getSize(); - - if (empty()) NextEHDestIndex = FirstEHDestIndex; - - assert(CatchDepth > 0 && "mismatched catch/terminate push/pop"); - CatchDepth--; -} - -inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const { - assert(sp.isValid() && "finding invalid savepoint"); - assert(sp.Size <= stable_begin().Size && "finding savepoint after pop"); - return iterator(EndOfBuffer - sp.Size); -} - -inline EHScopeStack::stable_iterator -EHScopeStack::stabilize(iterator ir) const { - assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer); - return stable_iterator(EndOfBuffer - ir.Ptr); -} - -inline EHScopeStack::stable_iterator -EHScopeStack::getInnermostActiveNormalCleanup() const { - for (EHScopeStack::stable_iterator - I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) { - EHCleanupScope &S = cast<EHCleanupScope>(*find(I)); - if (S.isActive()) return I; - I = S.getEnclosingNormalCleanup(); - } - return stable_end(); -} - -inline EHScopeStack::stable_iterator -EHScopeStack::getInnermostActiveEHCleanup() const { - for (EHScopeStack::stable_iterator - I = getInnermostEHCleanup(), E = stable_end(); I != E; ) { - EHCleanupScope &S = cast<EHCleanupScope>(*find(I)); - if (S.isActive()) return I; - I = S.getEnclosingEHCleanup(); - } - return stable_end(); -} - } } diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index 1fab8d40641..513d80be0b1 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -721,71 +721,6 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, StoreAnyExprIntoOneUnit(CGF, E, NewPtr); } -bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { - if (rv.isScalar()) - return DominatingLLVMValue::needsSaving(rv.getScalarVal()); - if (rv.isAggregate()) - return DominatingLLVMValue::needsSaving(rv.getAggregateAddr()); - return true; -} - -DominatingValue<RValue>::saved_type -DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { - if (rv.isScalar()) { - llvm::Value *V = rv.getScalarVal(); - - // These automatically dominate and don't need to be saved. - if (!DominatingLLVMValue::needsSaving(V)) - return saved_type(V, ScalarLiteral); - - // Everything else needs an alloca. - llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); - CGF.Builder.CreateStore(V, addr); - return saved_type(addr, ScalarAddress); - } - - if (rv.isComplex()) { - CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); - const llvm::Type *ComplexTy = - llvm::StructType::get(CGF.getLLVMContext(), - V.first->getType(), V.second->getType(), - (void*) 0); - llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); - CGF.StoreComplexToAddr(V, addr, /*volatile*/ false); - return saved_type(addr, ComplexAddress); - } - - assert(rv.isAggregate()); - llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile? - if (!DominatingLLVMValue::needsSaving(V)) - return saved_type(V, AggregateLiteral); - - llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); - CGF.Builder.CreateStore(V, addr); - return saved_type(addr, AggregateAddress); -} - -/// Given a saved r-value produced by SaveRValue, perform the code -/// necessary to restore it to usability at the current insertion -/// point. -RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { - switch (K) { - case ScalarLiteral: - return RValue::get(Value); - case ScalarAddress: - return RValue::get(CGF.Builder.CreateLoad(Value)); - case AggregateLiteral: - return RValue::getAggregate(Value); - case AggregateAddress: - return RValue::getAggregate(CGF.Builder.CreateLoad(Value)); - case ComplexAddress: - return RValue::getComplex(CGF.LoadComplexFromAddr(Value, false)); - } - - llvm_unreachable("bad saved r-value kind"); - return RValue(); -} - namespace { /// A cleanup to call the given 'operator delete' function upon /// abnormal exit from a new expression. diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp index 6f8bdc1060e..0c8f5be7e08 100644 --- a/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/clang/lib/CodeGen/CGObjCGNU.cpp @@ -17,7 +17,7 @@ #include "CGObjCRuntime.h" #include "CodeGenModule.h" #include "CodeGenFunction.h" -#include "CGException.h" +#include "CGCleanup.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index 2a27e919250..24c080e517b 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -16,7 +16,7 @@ #include "CGRecordLayout.h" #include "CodeGenModule.h" #include "CodeGenFunction.h" -#include "CGException.h" +#include "CGCleanup.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclObjC.h" diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 3923816a18e..2a1024d2e82 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -28,10 +28,6 @@ using namespace clang; using namespace CodeGen; -static void ResolveAllBranchFixups(CodeGenFunction &CGF, - llvm::SwitchInst *Switch, - llvm::BasicBlock *CleanupEntry); - CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) : BlockFunction(cgm, *this, Builder), CGM(cgm), Target(CGM.getContext().Target), @@ -692,888 +688,6 @@ llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { return EmitLValue(E).getAddress(); } -/// Pops cleanup blocks until the given savepoint is reached. -void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { - assert(Old.isValid()); - - while (EHStack.stable_begin() != Old) { - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); - - // As long as Old strictly encloses the scope's enclosing normal - // cleanup, we're going to emit another normal cleanup which - // fallthrough can propagate through. - bool FallThroughIsBranchThrough = - Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); - - PopCleanupBlock(FallThroughIsBranchThrough); - } -} - -static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, - EHCleanupScope &Scope) { - assert(Scope.isNormalCleanup()); - llvm::BasicBlock *Entry = Scope.getNormalBlock(); - if (!Entry) { - Entry = CGF.createBasicBlock("cleanup"); - Scope.setNormalBlock(Entry); - } - return Entry; -} - -static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF, - EHCleanupScope &Scope) { - assert(Scope.isEHCleanup()); - llvm::BasicBlock *Entry = Scope.getEHBlock(); - if (!Entry) { - Entry = CGF.createBasicBlock("eh.cleanup"); - Scope.setEHBlock(Entry); - } - return Entry; -} - -/// Transitions the terminator of the given exit-block of a cleanup to -/// be a cleanup switch. -static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, - llvm::BasicBlock *Block) { - // If it's a branch, turn it into a switch whose default - // destination is its original target. - llvm::TerminatorInst *Term = Block->getTerminator(); - assert(Term && "can't transition block without terminator"); - - if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { - assert(Br->isUnconditional()); - llvm::LoadInst *Load = - new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term); - llvm::SwitchInst *Switch = - llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); - Br->eraseFromParent(); - return Switch; - } else { - return cast<llvm::SwitchInst>(Term); - } -} - -/// Attempts to reduce a cleanup's entry block to a fallthrough. This -/// is basically llvm::MergeBlockIntoPredecessor, except -/// simplified/optimized for the tighter constraints on cleanup blocks. -/// -/// Returns the new block, whatever it is. -static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, - llvm::BasicBlock *Entry) { - llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); - if (!Pred) return Entry; - - llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); - if (!Br || Br->isConditional()) return Entry; - assert(Br->getSuccessor(0) == Entry); - - // If we were previously inserting at the end of the cleanup entry - // block, we'll need to continue inserting at the end of the - // predecessor. - bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; - assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); - - // Kill the branch. - Br->eraseFromParent(); - - // Merge the blocks. - Pred->getInstList().splice(Pred->end(), Entry->getInstList()); - - // Replace all uses of the entry with the predecessor, in case there - // are phis in the cleanup. - Entry->replaceAllUsesWith(Pred); - - // Kill the entry block. - Entry->eraseFromParent(); - - if (WasInsertBlock) - CGF.Builder.SetInsertPoint(Pred); - - return Pred; -} - -static void EmitCleanup(CodeGenFunction &CGF, - EHScopeStack::Cleanup *Fn, - bool ForEH, - llvm::Value *ActiveFlag) { - // EH cleanups always occur within a terminate scope. - if (ForEH) CGF.EHStack.pushTerminate(); - - // If there's an active flag, load it and skip the cleanup if it's - // false. - llvm::BasicBlock *ContBB = 0; - if (ActiveFlag) { - ContBB = CGF.createBasicBlock("cleanup.done"); - llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); - llvm::Value *IsActive - = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); - CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); - CGF.EmitBlock(CleanupBB); - } - - // Ask the cleanup to emit itself. - Fn->Emit(CGF, ForEH); - assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); - - // Emit the continuation block if there was an active flag. - if (ActiveFlag) - CGF.EmitBlock(ContBB); - - // Leave the terminate scope. - if (ForEH) CGF.EHStack.popTerminate(); -} - -static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, - llvm::BasicBlock *From, - llvm::BasicBlock *To) { - // Exit is the exit block of a cleanup, so it always terminates in - // an unconditional branch or a switch. - llvm::TerminatorInst *Term = Exit->getTerminator(); - - if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { - assert(Br->isUnconditional() && Br->getSuccessor(0) == From); - Br->setSuccessor(0, To); - } else { - llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); - for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) - if (Switch->getSuccessor(I) == From) - Switch->setSuccessor(I, To); - } -} - -/// Pops a cleanup block. If the block includes a normal cleanup, the -/// current insertion point is threaded through the cleanup, as are -/// any branch fixups on the cleanup. -void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { - assert(!EHStack.empty() && "cleanup stack is empty!"); - assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); - assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); - - // Remember activation information. - bool IsActive = Scope.isActive(); - llvm::Value *NormalActiveFlag = - Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0; - llvm::Value *EHActiveFlag = - Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0; - - // Check whether we need an EH cleanup. This is only true if we've - // generated a lazy EH cleanup block. - bool RequiresEHCleanup = Scope.hasEHBranches(); - - // Check the three conditions which might require a normal cleanup: - - // - whether there are branch fix-ups through this cleanup - unsigned FixupDepth = Scope.getFixupDepth(); - bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; - - // - whether there are branch-throughs or branch-afters - bool HasExistingBranches = Scope.hasBranches(); - - // - whether there's a fallthrough - llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); - bool HasFallthrough = (FallthroughSource != 0 && IsActive); - - // Branch-through fall-throughs leave the insertion point set to the - // end of the last cleanup, which points to the current scope. The - // rest of IR gen doesn't need to worry about this; it only happens - // during the execution of PopCleanupBlocks(). - bool HasPrebranchedFallthrough = - (FallthroughSource && FallthroughSource->getTerminator()); - - // If this is a normal cleanup, then having a prebranched - // fallthrough implies that the fallthrough source unconditionally - // jumps here. - assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || - (Scope.getNormalBlock() && - FallthroughSource->getTerminator()->getSuccessor(0) - == Scope.getNormalBlock())); - - bool RequiresNormalCleanup = false; - if (Scope.isNormalCleanup() && - (HasFixups || HasExistingBranches || HasFallthrough)) { - RequiresNormalCleanup = true; - } - - // Even if we don't need the normal cleanup, we might still have - // prebranched fallthrough to worry about. - if (Scope.isNormalCleanup() && !RequiresNormalCleanup && - HasPrebranchedFallthrough) { - assert(!IsActive); - - llvm::BasicBlock *NormalEntry = Scope.getNormalBlock(); - - // If we're branching through this cleanup, just forward the - // prebranched fallthrough to the next cleanup, leaving the insert - // point in the old block. - if (FallthroughIsBranchThrough) { - EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); - llvm::BasicBlock *EnclosingEntry = - CreateNormalEntry(*this, cast<EHCleanupScope>(S)); - - ForwardPrebranchedFallthrough(FallthroughSource, - NormalEntry, EnclosingEntry); - assert(NormalEntry->use_empty() && - "uses of entry remain after forwarding?"); - delete NormalEntry; - - // Otherwise, we're branching out; just emit the next block. - } else { - EmitBlock(NormalEntry); - SimplifyCleanupEntry(*this, NormalEntry); - } - } - - // If we don't need the cleanup at all, we're done. - if (!RequiresNormalCleanup && !RequiresEHCleanup) { - EHStack.popCleanup(); // safe because there are no fixups - assert(EHStack.getNumBranchFixups() == 0 || - EHStack.hasNormalCleanups()); - return; - } - - // Copy the cleanup emission data out. Note that SmallVector - // guarantees maximal alignment for its buffer regardless of its - // type parameter. - llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer; - CleanupBuffer.reserve(Scope.getCleanupSize()); - memcpy(CleanupBuffer.data(), - Scope.getCleanupBuffer(), Scope.getCleanupSize()); - CleanupBuffer.set_size(Scope.getCleanupSize()); - EHScopeStack::Cleanup *Fn = - reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data()); - - // We want to emit the EH cleanup after the normal cleanup, but go - // ahead and do the setup for the EH cleanup while the scope is still - // alive. - llvm::BasicBlock *EHEntry = 0; - llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend; - if (RequiresEHCleanup) { - EHEntry = CreateEHEntry(*this, Scope); - - // Figure out the branch-through dest if necessary. - llvm::BasicBlock *EHBranchThroughDest = 0; - if (Scope.hasEHBranchThroughs()) { - assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end()); - EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup()); - EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S)); - } - - // If we have exactly one branch-after and no branch-throughs, we - // can dispatch it without a switch. - if (!Scope.hasEHBranchThroughs() && - Scope.getNumEHBranchAfters() == 1) { - assert(!EHBranchThroughDest); - - // TODO: remove the spurious eh.cleanup.dest stores if this edge - // never went through any switches. - llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0); - EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest)); - - // Otherwise, if we have any branch-afters, we need a switch. - } else if (Scope.getNumEHBranchAfters()) { - // The default of the switch belongs to the branch-throughs if - // they exist. - llvm::BasicBlock *Default = - (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock()); - - const unsigned SwitchCapacity = Scope.getNumEHBranchAfters(); - - llvm::LoadInst *Load = - new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest"); - llvm::SwitchInst *Switch = - llvm::SwitchInst::Create(Load, Default, SwitchCapacity); - - EHInstsToAppend.push_back(Load); - EHInstsToAppend.push_back(Switch); - - for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I) - Switch->addCase(Scope.getEHBranchAfterIndex(I), - Scope.getEHBranchAfterBlock(I)); - - // Otherwise, we have only branch-throughs; jump to the next EH - // cleanup. - } else { - assert(EHBranchThroughDest); - EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest)); - } - } - - if (!RequiresNormalCleanup) { - EHStack.popCleanup(); - } else { - // If we have a fallthrough and no other need for the cleanup, - // emit it directly. - if (HasFallthrough && !HasPrebranchedFallthrough && - !HasFixups && !HasExistingBranches) { - - // Fixups can cause us to optimistically create a normal block, - // only to later have no real uses for it. Just delete it in - // this case. - // TODO: we can potentially simplify all the uses after this. - if (Scope.getNormalBlock()) { - Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock()); - delete Scope.getNormalBlock(); - } - - EHStack.popCleanup(); - - EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag); - - // Otherwise, the best approach is to thread everything through - // the cleanup block and then try to clean up after ourselves. - } else { - // Force the entry block to exist. - llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); - - // I. Set up the fallthrough edge in. - - // If there's a fallthrough, we need to store the cleanup - // destination index. For fall-throughs this is always zero. - if (HasFallthrough) { - if (!HasPrebranchedFallthrough) - Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); - - // Otherwise, clear the IP if we don't have fallthrough because - // the cleanup is inactive. We don't need to save it because - // it's still just FallthroughSource. - } else if (FallthroughSource) { - assert(!IsActive && "source without fallthrough for active cleanup"); - Builder.ClearInsertionPoint(); - } - - // II. Emit the entry block. This implicitly branches to it if - // we have fallthrough. All the fixups and existing branches - // should already be branched to it. - EmitBlock(NormalEntry); - - // III. Figure out where we're going and build the cleanup - // epilogue. - - bool HasEnclosingCleanups = - (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); - - // Compute the branch-through dest if we need it: - // - if there are branch-throughs threaded through the scope - // - if fall-through is a branch-through - // - if there are fixups that will be optimistically forwarded - // to the enclosing cleanup - llvm::BasicBlock *BranchThroughDest = 0; - if (Scope.hasBranchThroughs() || - (FallthroughSource && FallthroughIsBranchThrough) || - (HasFixups && HasEnclosingCleanups)) { - assert(HasEnclosingCleanups); - EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); - BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); - } - - llvm::BasicBlock *FallthroughDest = 0; - llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend; - - // If there's exactly one branch-after and no other threads, - // we can route it without a switch. - if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && - Scope.getNumBranchAfters() == 1) { - assert(!BranchThroughDest || !IsActive); - - // TODO: clean up the possibly dead stores to the cleanup dest slot. - llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); - InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); - - // Build a switch-out if we need it: - // - if there are branch-afters threaded through the scope - // - if fall-through is a branch-after - // - if there are fixups that have nowhere left to go and - // so must be immediately resolved - } else if (Scope.getNumBranchAfters() || - (HasFallthrough && !FallthroughIsBranchThrough) || - (HasFixups && !HasEnclosingCleanups)) { - - llvm::BasicBlock *Default = - (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); - - // TODO: base this on the number of branch-afters and fixups - const unsigned SwitchCapacity = 10; - - llvm::LoadInst *Load = - new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest"); - llvm::SwitchInst *Switch = - llvm::SwitchInst::Create(Load, Default, SwitchCapacity); - - InstsToAppend.push_back(Load); - InstsToAppend.push_back(Switch); - - // Branch-after fallthrough. - if (FallthroughSource && !FallthroughIsBranchThrough) { - FallthroughDest = createBasicBlock("cleanup.cont"); - if (HasFallthrough) - Switch->addCase(Builder.getInt32(0), FallthroughDest); - } - - for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { - Switch->addCase(Scope.getBranchAfterIndex(I), - Scope.getBranchAfterBlock(I)); - } - - // If there aren't any enclosing cleanups, we can resolve all - // the fixups now. - if (HasFixups && !HasEnclosingCleanups) - ResolveAllBranchFixups(*this, Switch, NormalEntry); - } else { - // We should always have a branch-through destination in this case. - assert(BranchThroughDest); - InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); - } - - // IV. Pop the cleanup and emit it. - EHStack.popCleanup(); - assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); - - EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag); - - // Append the prepared cleanup prologue from above. - llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); - for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) - NormalExit->getInstList().push_back(InstsToAppend[I]); - - // Optimistically hope that any fixups will continue falling through. - for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); - I < E; ++I) { - BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); - if (!Fixup.Destination) continue; - if (!Fixup.OptimisticBranchBlock) { - new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex), - getNormalCleanupDestSlot(), - Fixup.InitialBranch); - Fixup.InitialBranch->setSuccessor(0, NormalEntry); - } - Fixup.OptimisticBranchBlock = NormalExit; - } - - // V. Set up the fallthrough edge out. - - // Case 1: a fallthrough source exists but shouldn't branch to - // the cleanup because the cleanup is inactive. - if (!HasFallthrough && FallthroughSource) { - assert(!IsActive); - - // If we have a prebranched fallthrough, that needs to be - // forwarded to the right block. - if (HasPrebranchedFallthrough) { - llvm::BasicBlock *Next; - if (FallthroughIsBranchThrough) { - Next = BranchThroughDest; - assert(!FallthroughDest); - } else { - Next = FallthroughDest; - } - - ForwardPrebranchedFallthrough(FallthroughSource, NormalEntry, Next); - } - Builder.SetInsertPoint(FallthroughSource); - - // Case 2: a fallthrough source exists and should branch to the - // cleanup, but we're not supposed to branch through to the next - // cleanup. - } else if (HasFallthrough && FallthroughDest) { - assert(!FallthroughIsBranchThrough); - EmitBlock(FallthroughDest); - - // Case 3: a fallthrough source exists and should branch to the - // cleanup and then through to the next. - } else if (HasFallthrough) { - // Everything is already set up for this. - - // Case 4: no fallthrough source exists. - } else { - Builder.ClearInsertionPoint(); - } - - // VI. Assorted cleaning. - - // Check whether we can merge NormalEntry into a single predecessor. - // This might invalidate (non-IR) pointers to NormalEntry. - llvm::BasicBlock *NewNormalEntry = - SimplifyCleanupEntry(*this, NormalEntry); - - // If it did invalidate those pointers, and NormalEntry was the same - // as NormalExit, go back and patch up the fixups. - if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) - for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); - I < E; ++I) - CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; - } - } - - assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); - - // Emit the EH cleanup if required. - if (RequiresEHCleanup) { - CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); - - EmitBlock(EHEntry); - EmitCleanup(*this, Fn, /*ForEH*/ true, EHActiveFlag); - - // Append the prepared cleanup prologue from above. - llvm::BasicBlock *EHExit = Builder.GetInsertBlock(); - for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I) - EHExit->getInstList().push_back(EHInstsToAppend[I]); - - Builder.restoreIP(SavedIP); - - SimplifyCleanupEntry(*this, EHEntry); - } -} - -/// Terminate the current block by emitting a branch which might leave -/// the current cleanup-protected scope. The target scope may not yet -/// be known, in which case this will require a fixup. -/// -/// As a side-effect, this method clears the insertion point. -void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { - assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup()) - && "stale jump destination"); - - if (!HaveInsertPoint()) - return; - - // Create the branch. - llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); - - // Calculate the innermost active normal cleanup. - EHScopeStack::stable_iterator - TopCleanup = EHStack.getInnermostActiveNormalCleanup(); - - // If we're not in an active normal cleanup scope, or if the - // destination scope is within the innermost active normal cleanup - // scope, we don't need to worry about fixups. - if (TopCleanup == EHStack.stable_end() || - TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid - Builder.ClearInsertionPoint(); - return; - } - - // If we can't resolve the destination cleanup scope, just add this - // to the current cleanup scope as a branch fixup. - if (!Dest.getScopeDepth().isValid()) { - BranchFixup &Fixup = EHStack.addBranchFixup(); - Fixup.Destination = Dest.getBlock(); - Fixup.DestinationIndex = Dest.getDestIndex(); - Fixup.InitialBranch = BI; - Fixup.OptimisticBranchBlock = 0; - - Builder.ClearInsertionPoint(); - return; - } - - // Otherwise, thread through all the normal cleanups in scope. - - // Store the index at the start. - llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); - new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI); - - // Adjust BI to point to the first cleanup block. - { - EHCleanupScope &Scope = - cast<EHCleanupScope>(*EHStack.find(TopCleanup)); - BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); - } - - // Add this destination to all the scopes involved. - EHScopeStack::stable_iterator I = TopCleanup; - EHScopeStack::stable_iterator E = Dest.getScopeDepth(); - if (E.strictlyEncloses(I)) { - while (true) { - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); - assert(Scope.isNormalCleanup()); - I = Scope.getEnclosingNormalCleanup(); - - // If this is the last cleanup we're propagating through, tell it - // that there's a resolved jump moving through it. - if (!E.strictlyEncloses(I)) { - Scope.addBranchAfter(Index, Dest.getBlock()); - break; - } - - // Otherwise, tell the scope that there's a jump propoagating - // through it. If this isn't new information, all the rest of - // the work has been done before. - if (!Scope.addBranchThrough(Dest.getBlock())) - break; - } - } - - Builder.ClearInsertionPoint(); -} - -void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) { - // We should never get invalid scope depths for an UnwindDest; that - // implies that the destination wasn't set up correctly. - assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?"); - - if (!HaveInsertPoint()) - return; - - // Create the branch. - llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); - - // Calculate the innermost active cleanup. - EHScopeStack::stable_iterator - InnermostCleanup = EHStack.getInnermostActiveEHCleanup(); - - // If the destination is in the same EH cleanup scope as us, we - // don't need to thread through anything. - if (InnermostCleanup.encloses(Dest.getScopeDepth())) { - Builder.ClearInsertionPoint(); - return; - } - assert(InnermostCleanup != EHStack.stable_end()); - - // Store the index at the start. - llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); - new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI); - - // Adjust BI to point to the first cleanup block. - { - EHCleanupScope &Scope = - cast<EHCleanupScope>(*EHStack.find(InnermostCleanup)); - BI->setSuccessor(0, CreateEHEntry(*this, Scope)); - } - - // Add this destination to all the scopes involved. - for (EHScopeStack::stable_iterator - I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) { - assert(E.strictlyEncloses(I)); - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); - assert(Scope.isEHCleanup()); - I = Scope.getEnclosingEHCleanup(); - - // If this is the last cleanup we're propagating through, add this - // as a branch-after. - if (I == E) { - Scope.addEHBranchAfter(Index, Dest.getBlock()); - break; - } - - // Otherwise, add it as a branch-through. If this isn't new - // information, all the rest of the work has been done before. - if (!Scope.addEHBranchThrough(Dest.getBlock())) - break; - } - - Builder.ClearInsertionPoint(); -} - -/// All the branch fixups on the EH stack have propagated out past the -/// outermost normal cleanup; resolve them all by adding cases to the -/// given switch instruction. -static void ResolveAllBranchFixups(CodeGenFunction &CGF, - llvm::SwitchInst *Switch, - llvm::BasicBlock *CleanupEntry) { - llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; - - for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { - // Skip this fixup if its destination isn't set. - BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); - if (Fixup.Destination == 0) continue; - - // If there isn't an OptimisticBranchBlock, then InitialBranch is - // still pointing directly to its destination; forward it to the - // appropriate cleanup entry. This is required in the specific - // case of - // { std::string s; goto lbl; } - // lbl: - // i.e. where there's an unresolved fixup inside a single cleanup - // entry which we're currently popping. - if (Fixup.OptimisticBranchBlock == 0) { - new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex), - CGF.getNormalCleanupDestSlot(), - Fixup.InitialBranch); - Fixup.InitialBranch->setSuccessor(0, CleanupEntry); - } - - // Don't add this case to the switch statement twice. - if (!CasesAdded.insert(Fixup.Destination)) continue; - - Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), - Fixup.Destination); - } - - CGF.EHStack.clearFixups(); -} - -void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { - assert(Block && "resolving a null target block"); - if (!EHStack.getNumBranchFixups()) return; - - assert(EHStack.hasNormalCleanups() && - "branch fixups exist with no normal cleanups on stack"); - - llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; - bool ResolvedAny = false; - - for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { - // Skip this fixup if its destination doesn't match. - BranchFixup &Fixup = EHStack.getBranchFixup(I); - if (Fixup.Destination != Block) continue; - - Fixup.Destination = 0; - ResolvedAny = true; - - // If it doesn't have an optimistic branch block, LatestBranch is - // already pointing to the right place. - llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; - if (!BranchBB) - continue; - - // Don't process the same optimistic branch block twice. - if (!ModifiedOptimisticBlocks.insert(BranchBB)) - continue; - - llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); - - // Add a case to the switch. - Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); - } - - if (ResolvedAny) - EHStack.popNullFixups(); -} - -static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, - EHScopeStack::stable_iterator C) { - // If we needed a normal block for any reason, that counts. - if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) - return true; - - // Check whether any enclosed cleanups were needed. - for (EHScopeStack::stable_iterator - I = EHStack.getInnermostNormalCleanup(); - I != C; ) { - assert(C.strictlyEncloses(I)); - EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); - if (S.getNormalBlock()) return true; - I = S.getEnclosingNormalCleanup(); - } - - return false; -} - -static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, - EHScopeStack::stable_iterator C) { - // If we needed an EH block for any reason, that counts. - if (cast<EHCleanupScope>(*EHStack.find(C)).getEHBlock()) - return true; - - // Check whether any enclosed cleanups were needed. - for (EHScopeStack::stable_iterator - I = EHStack.getInnermostEHCleanup(); I != C; ) { - assert(C.strictlyEncloses(I)); - EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); - if (S.getEHBlock()) return true; - I = S.getEnclosingEHCleanup(); - } - - return false; -} - -enum ForActivation_t { - ForActivation, - ForDeactivation -}; - -/// The given cleanup block is changing activation state. Configure a -/// cleanup variable if necessary. -/// -/// It would be good if we had some way of determining if there were -/// extra uses *after* the change-over point. -static void SetupCleanupBlockActivation(CodeGenFunction &CGF, - EHScopeStack::stable_iterator C, - ForActivation_t Kind) { - EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); - - // We always need the flag if we're activating the cleanup, because - // we have to assume that the current location doesn't necessarily - // dominate all future uses of the cleanup. - bool NeedFlag = (Kind == ForActivation); - - // Calculate whether the cleanup was used: - - // - as a normal cleanup - if (Scope.isNormalCleanup() && IsUsedAsNormalCleanup(CGF.EHStack, C)) { - Scope.setTestFlagInNormalCleanup(); - NeedFlag = true; - } - - // - as an EH cleanup - if (Scope.isEHCleanup() && IsUsedAsEHCleanup(CGF.EHStack, C)) { - Scope.setTestFlagInEHCleanup(); - NeedFlag = true; - } - - // If it hasn't yet been used as either, we're done. - if (!NeedFlag) return; - - llvm::AllocaInst *Var = Scope.getActiveFlag(); - if (!Var) { - Var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive"); - Scope.setActiveFlag(Var); - - // Initialize to true or false depending on whether it was - // active up to this point. - CGF.InitTempAlloca(Var, CGF.Builder.getInt1(Kind == ForDeactivation)); - } - - CGF.Builder.CreateStore(CGF.Builder.getInt1(Kind == ForActivation), Var); -} - -/// Activate a cleanup that was created in an inactivated state. -void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C) { - assert(C != EHStack.stable_end() && "activating bottom of stack?"); - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); - assert(!Scope.isActive() && "double activation"); - - SetupCleanupBlockActivation(*this, C, ForActivation); - - Scope.setActive(true); -} - -/// Deactive a cleanup that was created in an active state. -void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) { - assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); - EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); - assert(Scope.isActive() && "double deactivation"); - - // If it's the top of the stack, just pop it. - if (C == EHStack.stable_begin()) { - // If it's a normal cleanup, we need to pretend that the - // fallthrough is unreachable. - CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); - PopCleanupBlock(); - Builder.restoreIP(SavedIP); - return; - } - - // Otherwise, follow the general case. - SetupCleanupBlockActivation(*this, C, ForDeactivation); - - Scope.setActive(false); -} - -llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() { - if (!NormalCleanupDest) - NormalCleanupDest = - CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); - return NormalCleanupDest; -} - -llvm::Value *CodeGenFunction::getEHCleanupDestSlot() { - if (!EHCleanupDest) - EHCleanupDest = - CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot"); - return EHCleanupDest; -} - void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init) { assert (Init && "Invalid DeclRefExpr initializer!"); |