diff options
-rw-r--r-- | clang/lib/CodeGen/CGClass.cpp | 11 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGDecl.cpp | 270 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprAgg.cpp | 159 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGObjC.cpp | 171 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.cpp | 78 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.h | 56 | ||||
-rw-r--r-- | clang/test/CodeGenCXX/destructors.cpp | 25 | ||||
-rw-r--r-- | clang/test/CodeGenCXX/temporaries.cpp | 22 | ||||
-rw-r--r-- | clang/test/CodeGenCXX/value-init.cpp | 76 | ||||
-rw-r--r-- | clang/test/CodeGenObjC/arc.m | 38 |
10 files changed, 618 insertions, 288 deletions
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp index 313ee57f7ff..911dbf782f3 100644 --- a/clang/lib/CodeGen/CGClass.cpp +++ b/clang/lib/CodeGen/CGClass.cpp @@ -1172,6 +1172,17 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, EmitBlock(AfterFor, true); } +void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, + llvm::Value *addr, + QualType type) { + const RecordType *rtype = type->castAs<RecordType>(); + const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); + const CXXDestructorDecl *dtor = record->getDestructor(); + assert(!dtor->isTrivial()); + CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, + addr); +} + /// EmitCXXAggrDestructorCall - calls the default destructor on array /// elements in reverse order of construction. void diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index 95294bf8e58..5ce7b3374e9 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -304,30 +304,25 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D, } namespace { - struct CallArrayDtor : EHScopeStack::Cleanup { - CallArrayDtor(const CXXDestructorDecl *Dtor, - const ConstantArrayType *Type, - llvm::Value *Loc) - : Dtor(Dtor), Type(Type), Loc(Loc) {} + struct DestroyObject : EHScopeStack::Cleanup { + DestroyObject(llvm::Value *addr, QualType type, + CodeGenFunction::Destroyer *destroyer) + : addr(addr), type(type), destroyer(*destroyer) {} - const CXXDestructorDecl *Dtor; - const ConstantArrayType *Type; - llvm::Value *Loc; + llvm::Value *addr; + QualType type; + CodeGenFunction::Destroyer &destroyer; void Emit(CodeGenFunction &CGF, bool IsForEH) { - QualType BaseElementTy = CGF.getContext().getBaseElementType(Type); - const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy); - BasePtr = llvm::PointerType::getUnqual(BasePtr); - llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr); - CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr); + CGF.emitDestroy(addr, type, destroyer); } }; - struct CallVarDtor : EHScopeStack::Cleanup { - CallVarDtor(const CXXDestructorDecl *Dtor, - llvm::Value *NRVOFlag, - llvm::Value *Loc) - : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {} + struct DestroyNRVOVariable : EHScopeStack::Cleanup { + DestroyNRVOVariable(llvm::Value *addr, + const CXXDestructorDecl *Dtor, + llvm::Value *NRVOFlag) + : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {} const CXXDestructorDecl *Dtor; llvm::Value *NRVOFlag; @@ -1014,6 +1009,59 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, } } +/// Enter a destroy cleanup for the given local variable. +void CodeGenFunction::emitAutoVarTypeCleanup( + const CodeGenFunction::AutoVarEmission &emission, + QualType::DestructionKind dtorKind) { + assert(dtorKind != QualType::DK_none); + + // Note that for __block variables, we want to destroy the + // original stack object, not the possibly forwarded object. + llvm::Value *addr = emission.getObjectAddress(*this); + + const VarDecl *var = emission.Variable; + QualType type = var->getType(); + + CleanupKind cleanupKind = NormalAndEHCleanup; + CodeGenFunction::Destroyer *destroyer = 0; + + switch (dtorKind) { + case QualType::DK_none: + llvm_unreachable("no cleanup for trivially-destructible variable"); + + case QualType::DK_cxx_destructor: + // If there's an NRVO flag on the emission, we need a different + // cleanup. + if (emission.NRVOFlag) { + assert(!type->isArrayType()); + CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor(); + EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, dtor, + emission.NRVOFlag); + return; + } + break; + + case QualType::DK_objc_strong_lifetime: + // Suppress cleanups for pseudo-strong variables. + if (var->isARCPseudoStrong()) return; + + // Otherwise, consider whether to use an EH cleanup or not. + cleanupKind = getARCCleanupKind(); + + // Use the imprecise destroyer by default. + if (!var->hasAttr<ObjCPreciseLifetimeAttr>()) + destroyer = CodeGenFunction::destroyARCStrongImprecise; + break; + + case QualType::DK_objc_weak_lifetime: + break; + } + + // If we haven't chosen a more specific destroyer, use the default. + if (!destroyer) destroyer = &getDestroyer(dtorKind); + EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer); +} + void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { assert(emission.Variable && "emission was not valid!"); @@ -1022,44 +1070,9 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { const VarDecl &D = *emission.Variable; - // Handle C++ or ARC destruction of variables. - if (getLangOptions().CPlusPlus) { - QualType type = D.getType(); - QualType baseType = getContext().getBaseElementType(type); - if (const RecordType *RT = baseType->getAs<RecordType>()) { - CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); - if (!ClassDecl->hasTrivialDestructor()) { - // Note: We suppress the destructor call when the corresponding NRVO - // flag has been set. - - // Note that for __block variables, we want to destroy the - // original stack object, not the possible forwarded object. - llvm::Value *Loc = emission.getObjectAddress(*this); - - const CXXDestructorDecl *D = ClassDecl->getDestructor(); - assert(D && "EmitLocalBlockVarDecl - destructor is nul"); - - if (type != baseType) { - const ConstantArrayType *Array = - getContext().getAsConstantArrayType(type); - assert(Array && "types changed without array?"); - EHStack.pushCleanup<CallArrayDtor>(NormalAndEHCleanup, - D, Array, Loc); - } else { - EHStack.pushCleanup<CallVarDtor>(NormalAndEHCleanup, - D, emission.NRVOFlag, Loc); - } - } - } - } - - if (Qualifiers::ObjCLifetime lifetime - = D.getType().getQualifiers().getObjCLifetime()) { - if (!D.isARCPseudoStrong()) { - llvm::Value *loc = emission.getObjectAddress(*this); - EmitAutoVarWithLifetime(*this, D, loc, lifetime); - } - } + // Check the type for a cleanup. + if (QualType::DestructionKind dtorKind = D.getType().isDestructedType()) + emitAutoVarTypeCleanup(emission, dtorKind); // In GC mode, honor objc_precise_lifetime. if (getLangOptions().getGCMode() != LangOptions::NonGC && @@ -1084,6 +1097,151 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { enterByrefCleanup(emission); } +CodeGenFunction::Destroyer & +CodeGenFunction::getDestroyer(QualType::DestructionKind kind) { + switch (kind) { + case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor"); + case QualType::DK_cxx_destructor: return destroyCXXObject; + case QualType::DK_objc_strong_lifetime: return destroyARCStrongPrecise; + case QualType::DK_objc_weak_lifetime: return destroyARCWeak; + } +} + +void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr, + QualType type, Destroyer &destroyer) { + EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer); +} + +void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type, + Destroyer &destroyer) { + const ArrayType *arrayType = getContext().getAsArrayType(type); + if (!arrayType) + return destroyer(*this, addr, type); + + llvm::Value *begin = addr; + llvm::Value *length = emitArrayLength(arrayType, type, begin); + llvm::Value *end = Builder.CreateInBoundsGEP(begin, length); + emitArrayDestroy(begin, end, type, destroyer); +} + +void CodeGenFunction::emitArrayDestroy(llvm::Value *begin, + llvm::Value *end, + QualType type, + Destroyer &destroyer) { + assert(!type->isArrayType()); + + // The basic structure here is a do-while loop, because we don't + // need to check for the zero-element case. + llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body"); + llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done"); + + // Enter the loop body, making that address the current address. + llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); + EmitBlock(bodyBB); + llvm::PHINode *elementPast = + Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast"); + elementPast->addIncoming(end, entryBB); + + // Shift the address back by one element. + llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true); + llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne, + "arraydestroy.element"); + + // Perform the actual destruction there. + destroyer(*this, element, type); + + // Check whether we've reached the end. + llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done"); + Builder.CreateCondBr(done, doneBB, bodyBB); + elementPast->addIncoming(element, Builder.GetInsertBlock()); + + // Done. + EmitBlock(doneBB); +} + +namespace { + class PartialArrayDestroy : public EHScopeStack::Cleanup { + llvm::Value *ArrayBegin; + llvm::Value *ArrayEndPointer; + QualType ElementType; + CodeGenFunction::Destroyer &Destroyer; + public: + PartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEndPointer, + QualType elementType, + CodeGenFunction::Destroyer *destroyer) + : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer), + ElementType(elementType), Destroyer(*destroyer) {} + + void Emit(CodeGenFunction &CGF, bool isForEH) { + llvm::Value *arrayBegin = ArrayBegin; + llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer); + + // It's possible for the count to be zero here, so we're going + // to need a check. For the sake of prettier IR, we just want + // to jump to the end of the array destroy loop. This assumes + // the structure of the IR generated by emitArrayDestroy, but + // that assumption is pretty reliable. + llvm::Value *earlyTest = + CGF.Builder.CreateICmpEQ(arrayBegin, arrayEnd, "pad.isempty"); + + llvm::BasicBlock *nextBB = CGF.createBasicBlock("pad.arraydestroy"); + + // For now, use a conditional branch with both successors the + // same. We'll patch this later. + llvm::BranchInst *br = + CGF.Builder.CreateCondBr(earlyTest, nextBB, nextBB); + CGF.EmitBlock(nextBB); + + llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); + + // If the element type is itself an array, drill down. + QualType type = ElementType; + llvm::SmallVector<llvm::Value*,4> gepIndices; + gepIndices.push_back(zero); + while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) { + // VLAs don't require a GEP index to walk into. + if (!isa<VariableArrayType>(arrayType)) + gepIndices.push_back(zero); + type = arrayType->getElementType(); + } + if (gepIndices.size() != 1) { + arrayBegin = + CGF.Builder.CreateInBoundsGEP(arrayBegin, gepIndices.begin(), + gepIndices.end(), "pad.arraybegin"); + arrayEnd = + CGF.Builder.CreateInBoundsGEP(arrayEnd, gepIndices.begin(), + gepIndices.end(), "pad.arrayend"); + } + + CGF.emitArrayDestroy(arrayBegin, arrayEnd, type, Destroyer); + + // Set the conditional branch's 'false' successor to doneBB. + llvm::BasicBlock *doneBB = CGF.Builder.GetInsertBlock(); + assert(CGF.Builder.GetInsertPoint() == doneBB->begin()); + br->setSuccessor(1, doneBB); + } + }; +} + +/// pushPartialArrayCleanup - Push a cleanup to destroy +/// already-constructed elements of the given array. The cleanup +/// may be popped with DeactivateCleanupBlock. +/// +/// \param elementType - the immediate element type of the array; +/// possibly still an array type +/// \param array - a value of type elementType* +/// \param destructionKind - the kind of destruction required +/// \param initializedElementCount - a value of type size_t* holding +/// the number of successfully-constructed elements +void CodeGenFunction::pushPartialArrayCleanup(llvm::Value *array, + QualType elementType, + Destroyer &destroyer, + llvm::Value *arrayEndPointer) { + // FIXME: can this be in a conditional expression? + EHStack.pushCleanup<PartialArrayDestroy>(EHCleanup, array, arrayEndPointer, + elementType, &destroyer); +} + namespace { /// A cleanup to perform a release of an object at the end of a /// function. This is used to balance out the incoming +1 of a diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 93f93b77e80..1d473f6012e 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -661,45 +661,132 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { } uint64_t NumArrayElements = AType->getNumElements(); - QualType ElementType = CGF.getContext().getCanonicalType(E->getType()); - ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType(); - ElementType = CGF.getContext().getQualifiedType(ElementType, - Dest.getQualifiers()); - - bool hasNonTrivialCXXConstructor = false; - if (CGF.getContext().getLangOptions().CPlusPlus) - if (const RecordType *RT = CGF.getContext() - .getBaseElementType(ElementType)->getAs<RecordType>()) { - const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); - hasNonTrivialCXXConstructor = !RD->hasTrivialDefaultConstructor(); - } + assert(NumInitElements <= NumArrayElements); + + QualType elementType = E->getType().getCanonicalType(); + elementType = CGF.getContext().getQualifiedType( + cast<ArrayType>(elementType)->getElementType(), + elementType.getQualifiers() + Dest.getQualifiers()); + + // DestPtr is an array*. Construct an elementType* by drilling + // down a level. + llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); + llvm::Value *indices[] = { zero, zero }; + llvm::Value *begin = + Builder.CreateInBoundsGEP(DestPtr, indices, indices+2, "arrayinit.begin"); + + // Exception safety requires us to destroy all the + // already-constructed members if an initializer throws. + // For that, we'll need an EH cleanup. + QualType::DestructionKind dtorKind = elementType.isDestructedType(); + llvm::AllocaInst *endOfInit = 0; + EHScopeStack::stable_iterator cleanup; + if (CGF.needsEHCleanup(dtorKind)) { + // In principle we could tell the cleanup where we are more + // directly, but the control flow can get so varied here that it + // would actually be quite complex. Therefore we go through an + // alloca. + endOfInit = CGF.CreateTempAlloca(begin->getType(), + "arrayinit.endOfInit"); + Builder.CreateStore(begin, endOfInit); + CGF.pushPartialArrayCleanup(begin, elementType, + CGF.getDestroyer(dtorKind), endOfInit); + cleanup = CGF.EHStack.stable_begin(); + + // Otherwise, remember that we didn't need a cleanup. + } else { + dtorKind = QualType::DK_none; + } - for (uint64_t i = 0; i != NumArrayElements; ++i) { - // If we're done emitting initializers and the destination is known-zeroed - // then we're done. - if (i == NumInitElements && - Dest.isZeroed() && - CGF.getTypes().isZeroInitializable(ElementType) && - !hasNonTrivialCXXConstructor) - break; - - llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array"); - LValue LV = CGF.MakeAddrLValue(NextVal, ElementType); - - if (i < NumInitElements) - EmitInitializationToLValue(E->getInit(i), LV); - else if (Expr *filler = E->getArrayFiller()) - EmitInitializationToLValue(filler, LV); + llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); + + // The 'current element to initialize'. The invariants on this + // variable are complicated. Essentially, after each iteration of + // the loop, it points to the last initialized element, except + // that it points to the beginning of the array before any + // elements have been initialized. + llvm::Value *element = begin; + + // Emit the explicit initializers. + for (uint64_t i = 0; i != NumInitElements; ++i) { + // Advance to the next element. + if (i > 0) + element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element"); + + LValue elementLV = CGF.MakeAddrLValue(element, elementType); + EmitInitializationToLValue(E->getInit(i), elementLV); + + // Tell the cleanup that it needs to destroy this element. + // TODO: some of these stores can be trivially observed to be + // unnecessary. + if (endOfInit) Builder.CreateStore(element, endOfInit); + } + + // Check whether there's a non-trivial array-fill expression. + // Note that this will be a CXXConstructExpr even if the element + // type is an array (or array of array, etc.) of class type. + Expr *filler = E->getArrayFiller(); + bool hasTrivialFiller = true; + if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) { + assert(cons->getConstructor()->isDefaultConstructor()); + hasTrivialFiller = cons->getConstructor()->isTrivial(); + } + + // Any remaining elements need to be zero-initialized, possibly + // using the filler expression. We can skip this if the we're + // emitting to zeroed memory. + if (NumInitElements != NumArrayElements && + !(Dest.isZeroed() && hasTrivialFiller && + CGF.getTypes().isZeroInitializable(elementType))) { + + // Use an actual loop. This is basically + // do { *array++ = filler; } while (array != end); + + // Advance to the start of the rest of the array. + if (NumInitElements) + element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start"); + + // Compute the end of the array. + llvm::Value *end = Builder.CreateInBoundsGEP(begin, + llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), + "arrayinit.end"); + + llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); + llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); + + // Jump into the body. + CGF.EmitBlock(bodyBB); + llvm::PHINode *currentElement = + Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); + currentElement->addIncoming(element, entryBB); + + // Emit the actual filler expression. + LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType); + if (filler) + EmitInitializationToLValue(filler, elementLV); else - EmitNullInitializationToLValue(LV); - - // If the GEP didn't get used because of a dead zero init or something - // else, clean it up for -O0 builds and general tidiness. - if (llvm::GetElementPtrInst *GEP = - dyn_cast<llvm::GetElementPtrInst>(NextVal)) - if (GEP->use_empty()) - GEP->eraseFromParent(); + EmitNullInitializationToLValue(elementLV); + + // Tell the EH cleanup that we finished with that element. + if (endOfInit) Builder.CreateStore(element, endOfInit); + + // Move on to the next element. + llvm::Value *nextElement = + Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next"); + + // Leave the loop if we're done. + llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, + "arrayinit.done"); + llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); + Builder.CreateCondBr(done, endBB, bodyBB); + currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); + + CGF.EmitBlock(endBB); } + + // Leave the partial-array cleanup if we entered one. + if (dtorKind) CGF.DeactivateCleanupBlock(cleanup); + return; } diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp index 1b271ef2af8..be38d36f6c5 100644 --- a/clang/lib/CodeGen/CGObjC.cpp +++ b/clang/lib/CodeGen/CGObjC.cpp @@ -1807,25 +1807,43 @@ void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { getContext().VoidTy, DrainSel, Arg, Args); } +void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, + llvm::Value *addr, + QualType type) { + llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy"); + CGF.EmitARCRelease(ptr, /*precise*/ true); +} + +void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, + llvm::Value *addr, + QualType type) { + llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy"); + CGF.EmitARCRelease(ptr, /*precise*/ false); +} + +void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, + llvm::Value *addr, + QualType type) { + CGF.EmitARCDestroyWeak(addr); +} + namespace { struct ObjCReleasingCleanup : EHScopeStack::Cleanup { private: QualType type; llvm::Value *addr; + CodeGenFunction::Destroyer &destroyer; protected: - ObjCReleasingCleanup(QualType type, llvm::Value *addr) - : type(type), addr(addr) {} + ObjCReleasingCleanup(QualType type, llvm::Value *addr, + CodeGenFunction::Destroyer *destroyer) + : type(type), addr(addr), destroyer(*destroyer) {} virtual llvm::Value *getAddress(CodeGenFunction &CGF, llvm::Value *addr) { return addr; } - virtual void release(CodeGenFunction &CGF, - QualType type, - llvm::Value *addr) = 0; - public: void Emit(CodeGenFunction &CGF, bool isForEH) { const ArrayType *arrayType = CGF.getContext().getAsArrayType(type); @@ -1834,14 +1852,13 @@ namespace { // If we don't have an array type, this is easy. if (!arrayType) - return release(CGF, type, addr); + return destroyer(CGF, addr, type); llvm::Value *begin = addr; QualType baseType; // Otherwise, this is more painful. - llvm::Value *count = emitArrayLength(CGF, arrayType, baseType, - begin); + llvm::Value *count = CGF.emitArrayLength(arrayType, baseType, begin); assert(baseType == CGF.getContext().getBaseElementType(arrayType)); @@ -1867,7 +1884,7 @@ namespace { CGF.EmitBlock(bodyBB); // Release the value at 'cur'. - release(CGF, baseType, cur); + destroyer(CGF, cur, baseType); // ++cur; // goto loopBB; @@ -1878,112 +1895,18 @@ namespace { // endBB: CGF.EmitBlock(endBB); } - - private: - /// Computes the length of an array in elements, as well - /// as the base - static llvm::Value *emitArrayLength(CodeGenFunction &CGF, - const ArrayType *origArrayType, - QualType &baseType, - llvm::Value *&addr) { - ASTContext &Ctx = CGF.getContext(); - const ArrayType *arrayType = origArrayType; - - // If it's a VLA, we have to load the stored size. Note that - // this is the size of the VLA in bytes, not its size in elements. - llvm::Value *numVLAElements = 0; - if (isa<VariableArrayType>(arrayType)) { - numVLAElements = - CGF.getVLASize(cast<VariableArrayType>(arrayType)).first; - - // Walk into all VLAs. This doesn't require changes to addr, - // which has type T* where T is the first non-VLA element type. - do { - QualType elementType = arrayType->getElementType(); - arrayType = Ctx.getAsArrayType(elementType); - - // If we only have VLA components, 'addr' requires no adjustment. - if (!arrayType) { - baseType = elementType; - return numVLAElements; - } - } while (isa<VariableArrayType>(arrayType)); - - // We get out here only if we find a constant array type - // inside the VLA. - } - - // We have some number of constant-length arrays, so addr should - // have LLVM type [M x [N x [...]]]*. Build a GEP that walks - // down to the first element of addr. - llvm::SmallVector<llvm::Value*, 8> gepIndices; - - // GEP down to the array type. - llvm::ConstantInt *zero = CGF.Builder.getInt32(0); - gepIndices.push_back(zero); - - // It's more efficient to calculate the count from the LLVM - // constant-length arrays than to re-evaluate the array bounds. - uint64_t countFromCLAs = 1; - - const llvm::ArrayType *llvmArrayType = - cast<llvm::ArrayType>( - cast<llvm::PointerType>(addr->getType())->getElementType()); - while (true) { - assert(isa<ConstantArrayType>(arrayType)); - assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() - == llvmArrayType->getNumElements()); - - gepIndices.push_back(zero); - countFromCLAs *= llvmArrayType->getNumElements(); - - llvmArrayType = - dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); - if (!llvmArrayType) break; - - arrayType = Ctx.getAsArrayType(arrayType->getElementType()); - assert(arrayType && "LLVM and Clang types are out-of-synch"); - } - - baseType = arrayType->getElementType(); - - // Create the actual GEP. - addr = CGF.Builder.CreateInBoundsGEP(addr, gepIndices.begin(), - gepIndices.end(), "array.begin"); - - llvm::Value *numElements - = llvm::ConstantInt::get(CGF.IntPtrTy, countFromCLAs); - - // If we had any VLA dimensions, factor them in. - if (numVLAElements) - numElements = CGF.Builder.CreateNUWMul(numVLAElements, numElements); - - return numElements; - } - - static llvm::Value *divideVLASizeByBaseType(CodeGenFunction &CGF, - llvm::Value *vlaSizeInBytes, - QualType baseType) { - // Divide the base type size back out of the - CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); - llvm::Value *baseSizeInBytes = - llvm::ConstantInt::get(vlaSizeInBytes->getType(), - baseSize.getQuantity()); - - return CGF.Builder.CreateUDiv(vlaSizeInBytes, baseSizeInBytes, - "array.vla-count"); - } }; /// A cleanup that calls @objc_release on all the objects to release. struct CallReleaseForObject : ObjCReleasingCleanup { - bool precise; - CallReleaseForObject(QualType type, llvm::Value *addr, bool precise) - : ObjCReleasingCleanup(type, addr), precise(precise) {} + CallReleaseForObject(QualType type, llvm::Value *addr, + CodeGenFunction::Destroyer *destroyer) + : ObjCReleasingCleanup(type, addr, destroyer) {} using ObjCReleasingCleanup::Emit; static void Emit(CodeGenFunction &CGF, bool IsForEH, - QualType type, llvm::Value *addr, bool precise) { + QualType type, llvm::Value *addr, + CodeGenFunction::Destroyer *destroyer) { // EHScopeStack::Cleanup objects can never have their destructors called, // so use placement new to construct our temporary object. union { @@ -1992,15 +1915,10 @@ namespace { }; CallReleaseForObject *Object - = new (&align) CallReleaseForObject(type, addr, precise); + = new (&align) CallReleaseForObject(type, addr, destroyer); Object->Emit(CGF, IsForEH); (void)data[0]; } - - void release(CodeGenFunction &CGF, QualType type, llvm::Value *addr) { - llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "tmp"); - CGF.EmitARCRelease(ptr, precise); - } }; /// A cleanup that calls @objc_storeStrong(nil) on all the objects to @@ -2008,7 +1926,8 @@ namespace { struct CallReleaseForIvar : ObjCReleasingCleanup { const ObjCIvarDecl *ivar; CallReleaseForIvar(const ObjCIvarDecl *ivar, llvm::Value *self) - : ObjCReleasingCleanup(ivar->getType(), self), ivar(ivar) {} + : ObjCReleasingCleanup(ivar->getType(), self, + destroyARCStrongIvar), ivar(ivar) {} llvm::Value *getAddress(CodeGenFunction &CGF, llvm::Value *addr) { LValue lvalue @@ -2016,8 +1935,9 @@ namespace { return lvalue.getAddress(); } - void release(CodeGenFunction &CGF, QualType type, llvm::Value *addr) { - // Release ivars by storing nil into them; it just makes things easier. + static void destroyARCStrongIvar(CodeGenFunction &CGF, + llvm::Value *addr, + QualType type) { llvm::Value *null = getNullForVariable(addr); CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); } @@ -2029,7 +1949,8 @@ namespace { const FieldDecl *Field; explicit CallReleaseForField(const FieldDecl *Field) - : CallReleaseForObject(Field->getType(), 0, /*precise=*/true), + : CallReleaseForObject(Field->getType(), 0, + CodeGenFunction::destroyARCStrongPrecise), Field(Field) { } llvm::Value *getAddress(CodeGenFunction &CGF, llvm::Value *) { @@ -2043,7 +1964,7 @@ namespace { /// release in an object. struct CallWeakReleaseForObject : ObjCReleasingCleanup { CallWeakReleaseForObject(QualType type, llvm::Value *addr) - : ObjCReleasingCleanup(type, addr) {} + : ObjCReleasingCleanup(type, addr, CodeGenFunction::destroyARCWeak) {} using ObjCReleasingCleanup::Emit; static void Emit(CodeGenFunction &CGF, bool IsForEH, @@ -2060,10 +1981,6 @@ namespace { Object->Emit(CGF, IsForEH); (void)data[0]; } - - void release(CodeGenFunction &CGF, QualType type, llvm::Value *addr) { - CGF.EmitARCDestroyWeak(addr); - } }; @@ -2129,10 +2046,12 @@ void CodeGenFunction::PushARCReleaseCleanup(CleanupKind cleanupKind, llvm::Value *addr, bool precise, bool forFullExpr) { + Destroyer *dtor = + (precise ? destroyARCStrongPrecise : destroyARCStrongImprecise); if (forFullExpr) - pushFullExprCleanup<CallReleaseForObject>(cleanupKind, type, addr, precise); + pushFullExprCleanup<CallReleaseForObject>(cleanupKind, type, addr, dtor); else - EHStack.pushCleanup<CallReleaseForObject>(cleanupKind, type, addr, precise); + EHStack.pushCleanup<CallReleaseForObject>(cleanupKind, type, addr, dtor); } /// PushARCWeakReleaseCleanup - Enter a cleanup to perform a weak diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 43e07e24fc6..596358bee5d 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -785,6 +785,84 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { return IndirectBranch->getParent(); } +/// Computes the length of an array in elements, as well as the base +/// element type and a properly-typed first element pointer. +llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, + QualType &baseType, + llvm::Value *&addr) { + const ArrayType *arrayType = origArrayType; + + // If it's a VLA, we have to load the stored size. Note that + // this is the size of the VLA in bytes, not its size in elements. + llvm::Value *numVLAElements = 0; + if (isa<VariableArrayType>(arrayType)) { + numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first; + + // Walk into all VLAs. This doesn't require changes to addr, + // which has type T* where T is the first non-VLA element type. + do { + QualType elementType = arrayType->getElementType(); + arrayType = getContext().getAsArrayType(elementType); + + // If we only have VLA components, 'addr' requires no adjustment. + if (!arrayType) { + baseType = elementType; + return numVLAElements; + } + } while (isa<VariableArrayType>(arrayType)); + + // We get out here only if we find a constant array type + // inside the VLA. + } + + // We have some number of constant-length arrays, so addr should + // have LLVM type [M x [N x [...]]]*. Build a GEP that walks + // down to the first element of addr. + llvm::SmallVector<llvm::Value*, 8> gepIndices; + + // GEP down to the array type. + llvm::ConstantInt *zero = Builder.getInt32(0); + gepIndices.push_back(zero); + + // It's more efficient to calculate the count from the LLVM + // constant-length arrays than to re-evaluate the array bounds. + uint64_t countFromCLAs = 1; + + const llvm::ArrayType *llvmArrayType = + cast<llvm::ArrayType>( + cast<llvm::PointerType>(addr->getType())->getElementType()); + while (true) { + assert(isa<ConstantArrayType>(arrayType)); + assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() + == llvmArrayType->getNumElements()); + + gepIndices.push_back(zero); + countFromCLAs *= llvmArrayType->getNumElements(); + + llvmArrayType = + dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); + if (!llvmArrayType) break; + + arrayType = getContext().getAsArrayType(arrayType->getElementType()); + assert(arrayType && "LLVM and Clang types are out-of-synch"); + } + + baseType = arrayType->getElementType(); + + // Create the actual GEP. + addr = Builder.CreateInBoundsGEP(addr, gepIndices.begin(), + gepIndices.end(), "array.begin"); + + llvm::Value *numElements + = llvm::ConstantInt::get(SizeTy, countFromCLAs); + + // If we had any VLA dimensions, factor them in. + if (numVLAElements) + numElements = Builder.CreateNUWMul(numVLAElements, numElements); + + return numElements; +} + std::pair<llvm::Value*, QualType> CodeGenFunction::getVLASize(QualType type) { const VariableArrayType *vla = getContext().getAsVariableArrayType(type); diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 751dd9b9bbf..fa12f563748 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -1151,6 +1151,40 @@ public: llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); } //===--------------------------------------------------------------------===// + // Cleanups + //===--------------------------------------------------------------------===// + + typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty); + + void pushPartialArrayCleanup(llvm::Value *arrayBegin, + QualType elementType, + Destroyer &destroyer, + llvm::Value *arrayEndPointer); + + Destroyer &getDestroyer(QualType::DestructionKind destructionKind); + void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type, + Destroyer &destroyer); + void emitDestroy(llvm::Value *addr, QualType type, Destroyer &destroyer); + void emitArrayDestroy(llvm::Value *begin, llvm::Value *end, + QualType type, Destroyer &destroyer); + + /// Determines whether an EH cleanup is required to destroy a type + /// with the given destruction kind. + bool needsEHCleanup(QualType::DestructionKind kind) { + switch (kind) { + case QualType::DK_none: + return false; + case QualType::DK_cxx_destructor: + case QualType::DK_objc_weak_lifetime: + return getLangOptions().Exceptions; + case QualType::DK_objc_strong_lifetime: + return getLangOptions().Exceptions && + CGM.getCodeGenOpts().ObjCAutoRefCountExceptions; + } + llvm_unreachable("bad destruction kind"); + } + + //===--------------------------------------------------------------------===// // Objective-C //===--------------------------------------------------------------------===// @@ -1526,6 +1560,12 @@ public: // instruction in LLVM instead once it works well enough. llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty); + /// emitArrayLength - Compute the length of an array, even if it's a + /// VLA, and drill down to the base element type. + llvm::Value *emitArrayLength(const ArrayType *arrayType, + QualType &baseType, + llvm::Value *&addr); + /// EmitVLASize - Capture all the sizes for the VLA expressions in /// the given variably-modified type and store them in the VLASizeMap. /// @@ -1616,6 +1656,8 @@ public: const ArrayType *Array, llvm::Value *This); + static Destroyer destroyCXXObject; + void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D, llvm::Value *NumElements, llvm::Value *This); @@ -1720,6 +1762,8 @@ public: AutoVarEmission EmitAutoVarAlloca(const VarDecl &var); void EmitAutoVarInit(const AutoVarEmission &emission); void EmitAutoVarCleanups(const AutoVarEmission &emission); + void emitAutoVarTypeCleanup(const AutoVarEmission &emission, + QualType::DestructionKind dtorKind); void EmitStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage); @@ -2101,8 +2145,20 @@ public: void PushARCReleaseCleanup(CleanupKind kind, QualType type, llvm::Value *addr, bool precise, bool forFullExpr = false); + void PushARCArrayReleaseCleanup(CleanupKind kind, QualType elementType, + llvm::Value *addr, + llvm::Value *countOrCountPtr, + bool precise, bool forFullExpr = false); void PushARCWeakReleaseCleanup(CleanupKind kind, QualType type, llvm::Value *addr, bool forFullExpr = false); + void PushARCArrayWeakReleaseCleanup(CleanupKind kind, QualType elementType, + llvm::Value *addr, + llvm::Value *countOrCountPtr, + bool forFullExpr = false); + static Destroyer destroyARCStrongImprecise; + static Destroyer destroyARCStrongPrecise; + static Destroyer destroyARCWeak; + void PushARCFieldReleaseCleanup(CleanupKind cleanupKind, const FieldDecl *Field); void PushARCFieldWeakReleaseCleanup(CleanupKind cleanupKind, diff --git a/clang/test/CodeGenCXX/destructors.cpp b/clang/test/CodeGenCXX/destructors.cpp index 94d88334f85..c6f5bedfdf7 100644 --- a/clang/test/CodeGenCXX/destructors.cpp +++ b/clang/test/CodeGenCXX/destructors.cpp @@ -233,27 +233,16 @@ namespace test4 { namespace test5 { struct A { ~A(); }; - // This is really unnecessarily verbose; we should be using phis, - // even at -O0. - // CHECK: define void @_ZN5test53fooEv() // CHECK: [[ELEMS:%.*]] = alloca [5 x [[A:%.*]]], align - // CHECK-NEXT: [[IVAR:%.*]] = alloca i64 - // CHECK: [[ELEMSARRAY:%.*]] = bitcast [5 x [[A]]]* [[ELEMS]] to [[A]] - // CHECK-NEXT: store i64 5, i64* [[IVAR]] - // CHECK-NEXT: br label - // CHECK: [[I:%.*]] = load i64* [[IVAR]] - // CHECK-NEXT: icmp ne i64 [[I]], 0 - // CHECK-NEXT: br i1 - // CHECK: [[I:%.*]] = load i64* [[IVAR]] - // CHECK-NEXT: [[I2:%.*]] = sub i64 [[I]], 1 - // CHECK-NEXT: getelementptr inbounds [[A]]* [[ELEMSARRAY]], i64 [[I2]] - // CHECK-NEXT: call void @_ZN5test51AD1Ev( - // CHECK-NEXT: br label - // CHECK: [[I:%.*]] = load i64* [[IVAR]] - // CHECK-NEXT: [[I1:%.*]] = sub i64 [[I]], 1 - // CHECK-NEXT: store i64 [[I1]], i64* [[IVAR]] + // CHECK-NEXT: [[BEGIN:%.*]] = getelementptr inbounds [5 x [[A]]]* [[ELEMS]], i32 0, i32 0 + // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds [[A]]* [[BEGIN]], i64 5 // CHECK-NEXT: br label + // CHECK: [[POST:%.*]] = phi [[A]]* [ [[END]], {{%.*}} ], [ [[ELT:%.*]], {{%.*}} ] + // CHECK-NEXT: [[ELT]] = getelementptr inbounds [[A]]* [[POST]], i64 -1 + // CHECK-NEXT: call void @_ZN5test51AD1Ev([[A]]* [[ELT]]) + // CHECK-NEXT: [[T0:%.*]] = icmp eq [[A]]* [[ELT]], [[BEGIN]] + // CHECK-NEXT: br i1 [[T0]], // CHECK: ret void void foo() { A elems[5]; diff --git a/clang/test/CodeGenCXX/temporaries.cpp b/clang/test/CodeGenCXX/temporaries.cpp index 348d51e019a..8aeca653da5 100644 --- a/clang/test/CodeGenCXX/temporaries.cpp +++ b/clang/test/CodeGenCXX/temporaries.cpp @@ -421,28 +421,24 @@ namespace Elision { void test4() { // CHECK: [[X:%.*]] = alloca [[A]], align 8 // CHECK-NEXT: [[XS:%.*]] = alloca [2 x [[A]]], align 16 - // CHECK-NEXT: [[I:%.*]] = alloca i64 // CHECK-NEXT: call void @_ZN7Elision1AC1Ev([[A]]* [[X]]) A x; - // CHECK-NEXT: [[XS0:%.*]] = getelementptr inbounds [2 x [[A]]]* [[XS]], i32 0, i32 0 + // CHECK-NEXT: [[XS0:%.*]] = getelementptr inbounds [2 x [[A]]]* [[XS]], i64 0, i64 0 // CHECK-NEXT: call void @_ZN7Elision1AC1Ev([[A]]* [[XS0]]) - // CHECK-NEXT: [[XS1:%.*]] = getelementptr inbounds [2 x [[A]]]* [[XS]], i32 0, i32 1 + // CHECK-NEXT: [[XS1:%.*]] = getelementptr inbounds [[A]]* [[XS0]], i64 1 // CHECK-NEXT: call void @_ZN7Elision1AC1ERKS0_([[A]]* [[XS1]], [[A]]* [[X]]) - // CHECK-NEXT: [[XSB:%.*]] = bitcast [2 x [[A]]]* [[XS]] to [[A]]* A xs[] = { A(), x }; - // CHECK-NEXT: store i64 2, i64* [[I]] - // CHECK-NEXT: br label - // CHECK: [[I0:%.*]] = load i64* [[I]] - // CHECK-NEXT: icmp ne i64 [[I0]], 0 - // CHECK-NEXT: br i1 - // CHECK: [[I1:%.*]] = load i64* [[I]] - // CHECK-NEXT: [[I2:%.*]] = sub i64 [[I1]], 1 - // CHECK-NEXT: [[XSI:%.*]] = getelementptr inbounds [[A]]* [[XSB]], i64 [[I2]] - // CHECK-NEXT: call void @_ZN7Elision1AD1Ev([[A]]* [[XSI]]) + // CHECK-NEXT: [[BEGIN:%.*]] = getelementptr inbounds [2 x [[A]]]* [[XS]], i32 0, i32 0 + // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds [[A]]* [[BEGIN]], i64 2 // CHECK-NEXT: br label + // CHECK: [[AFTER:%.*]] = phi [[A]]* + // CHECK-NEXT: [[CUR:%.*]] = getelementptr inbounds [[A]]* [[AFTER]], i64 -1 + // CHECK-NEXT: call void @_ZN7Elision1AD1Ev([[A]]* [[CUR]]) + // CHECK-NEXT: [[T0:%.*]] = icmp eq [[A]]* [[CUR]], [[BEGIN]] + // CHECK-NEXT: br i1 [[T0]], // CHECK: call void @_ZN7Elision1AD1Ev([[A]]* [[X]]) } diff --git a/clang/test/CodeGenCXX/value-init.cpp b/clang/test/CodeGenCXX/value-init.cpp index a5a0b67d57d..6178c24e949 100644 --- a/clang/test/CodeGenCXX/value-init.cpp +++ b/clang/test/CodeGenCXX/value-init.cpp @@ -105,34 +105,24 @@ void f() { // CHECK: call void @_ZN6PR98014TestC1Ei // CHECK-NOT: call void @llvm.memset.p0i8.i64 // CHECK: call void @_ZN6PR98014TestC1Ev - // CHECK-NOT: call void @llvm.memset.p0i8.i64 - // CHECK: call void @_ZN6PR98014TestC1Ev Test partial[3] = { 1 }; // CHECK-NOT: call void @llvm.memset.p0i8.i64 // CHECK: call void @_ZN6PR98014TestC1Ev - // CHECK-NOT: call void @llvm.memset.p0i8.i64 - // CHECK: call void @_ZN6PR98014TestC1Ev - // CHECK-NOT: call void @llvm.memset.p0i8.i64 - // CHECK: call void @_ZN6PR98014TestC1Ev + // CHECK-NOT: call void @_ZN6PR98014TestC1Ev Test empty[3] = {}; // CHECK: call void @llvm.memset.p0i8.i64 // CHECK-NOT: call void @llvm.memset.p0i8.i64 // CHECK: call void @_ZN6PR98015Test2C1Ev - // CHECK-NOT: call void @llvm.memset.p0i8.i64 - // CHECK: call void @_ZN6PR98015Test2C1Ev - // CHECK-NOT: call void @llvm.memset.p0i8.i64 - // CHECK: call void @_ZN6PR98015Test2C1Ev + // CHECK-NOT: call void @_ZN6PR98015Test2C1Ev Test2 empty2[3] = {}; // CHECK: call void @llvm.memset.p0i8.i64 // CHECK-NOT: call void @llvm.memset.p0i8.i64 // CHECK: call void @_ZN6PR98015Test3C1Ev // CHECK-NOT: call void @llvm.memset.p0i8.i64 - // CHECK: call void @_ZN6PR98015Test3C1Ev - // CHECK-NOT: call void @llvm.memset.p0i8.i64 - // CHECK: call void @_ZN6PR98015Test3C1Ev + // CHECK-NOT: call void @_ZN6PR98015Test3C1Ev Test3 empty3[3] = {}; } @@ -189,10 +179,7 @@ namespace zeroinit { X3<int>().f(); } - // CHECK: define linkonce_odr void @_ZN8zeroinit2X3IiEC2Ev(%"struct.zeroinit::X3"* %this) unnamed_addr - // CHECK: call void @llvm.memset.p0i8.i64 - // CHECK-NEXT: call void @_ZN8zeroinit2X2IiEC2Ev - // CHECK-NEXT: ret void + // More checks at EOF } namespace PR8726 { @@ -207,3 +194,58 @@ void f(const C& c) { } } + +// rdar://problem/9355931 +namespace test6 { + struct A { A(); A(int); }; + + void test() { + A arr[10][20] = { 5 }; + }; + // CHECK: define void @_ZN5test64testEv() + // CHECK: [[ARR:%.*]] = alloca [10 x [20 x [[A:%.*]]]], + // CHECK-NEXT: [[IDX:%.*]] = alloca i64 + + // CHECK-NEXT: [[INNER:%.*]] = getelementptr inbounds [10 x [20 x [[A]]]]* [[ARR]], i64 0, i64 0 + // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [20 x [[A]]]* [[INNER]], i64 0, i64 0 + // CHECK-NEXT: call void @_ZN5test61AC1Ei([[A]]* [[T0]], i32 5) + // CHECK-NEXT: [[BEGIN:%.*]] = getelementptr inbounds [[A]]* [[T0]], i64 1 + // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds [[A]]* [[T0]], i64 20 + // CHECK-NEXT: br label + // CHECK: [[CUR:%.*]] = phi [[A]]* [ [[BEGIN]], {{%.*}} ], [ [[NEXT:%.*]], {{%.*}} ] + // CHECK-NEXT: call void @_ZN5test61AC1Ev([[A]]* [[CUR]]) + // CHECK-NEXT: [[NEXT]] = getelementptr inbounds [[A]]* [[CUR]], i64 1 + // CHECK-NEXT: [[T0:%.*]] = icmp eq [[A]]* [[NEXT]], [[END]] + // CHECK-NEXT: br i1 + + // CHECK: [[BEGIN:%.*]] = getelementptr inbounds [20 x [[A]]]* [[INNER]], i64 1 + // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds [20 x [[A]]]* [[INNER]], i64 10 + // CHECK-NEXT: br label + // CHECK: [[CUR:%.*]] = phi [20 x [[A]]]* [ [[BEGIN]], {{%.*}} ], [ [[NEXT:%.*]], {{%.*}} ] + // CHECK-NEXT: [[FIRST:%.*]] = bitcast [20 x [[A]]]* [[CUR]] to [[A]]* + + // TODO: this loop should use phis, too, and for preference would be + // merged with the outer loop. + // CHECK-NEXT: store i64 0, i64* [[IDX]] + // CHECK-NEXT: br label + // CHECK: [[T0:%.*]] = load i64* [[IDX]] + // CHECK-NEXT: [[T1:%.*]] = icmp ult i64 [[T0]], 20 + // CHECK-NEXT: br i1 [[T1]] + // CHECK: [[T0:%.*]] = load i64* [[IDX]] + // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[A]]* [[FIRST]], i64 [[T0]] + // CHECK-NEXT: call void @_ZN5test61AC1Ev([[A]]* [[T1]]) + // CHECK-NEXT: br label + // CHECK: [[T0:%.*]] = load i64* [[IDX]] + // CHECK-NEXT: [[T1:%.*]] = add i64 [[T0]], 1 + // CHECK-NEXT: store i64 [[T1]], i64* [[IDX]] + // CHECK-NEXT: br label + // CHECK: [[NEXT]] = getelementptr inbounds [20 x [[A]]]* [[CUR]], i64 1 + // CHECK-NEXT: [[T0:%.*]] = icmp eq [20 x [[A]]]* [[NEXT]], [[END]] + // CHECK-NEXT: br i1 [[T0]] + // CHECK: ret void +} + +// CHECK: define linkonce_odr void @_ZN8zeroinit2X3IiEC2Ev(%"struct.zeroinit::X3"* %this) unnamed_addr +// CHECK: call void @llvm.memset.p0i8.i64 +// CHECK-NEXT: call void @_ZN8zeroinit2X2IiEC2Ev +// CHECK-NEXT: ret void diff --git a/clang/test/CodeGenObjC/arc.m b/clang/test/CodeGenObjC/arc.m index 062773d34fb..479f0d23127 100644 --- a/clang/test/CodeGenObjC/arc.m +++ b/clang/test/CodeGenObjC/arc.m @@ -475,14 +475,12 @@ void test19() { // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i8** [[BEGIN]], i64 5 // CHECK-NEXT: br label - // CHECK: [[CUR:%.*]] = phi i8** - // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[END]] - // CHECK-NEXT: br i1 [[EQ]], - - // CHECK: [[T0:%.*]] = load i8** [[CUR]] + // CHECK: [[AFTER:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[NEXT:%.*]], {{%.*}} ] + // CHECK-NEXT: [[CUR:%.*]] = getelementptr inbounds i8** [[AFTER]], i64 -1 + // CHECK-NEXT: [[T0:%.*]] = load i8** [[CUR]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) nounwind, !clang.imprecise_release - // CHECK-NEXT: [[NEXT:%.*]] = getelementptr inbounds i8** [[CUR]], i32 1 - // CHECK-NEXT: br label + // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[BEGIN]] + // CHECK-NEXT: br i1 [[EQ]], // CHECK: ret void } @@ -515,14 +513,12 @@ void test20(unsigned n) { // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i8** [[VLA]], i64 [[DIM]] // CHECK-NEXT: br label - // CHECK: [[CUR:%.*]] = phi i8** - // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[END]] - // CHECK-NEXT: br i1 [[EQ]], - - // CHECK: [[T0:%.*]] = load i8** [[CUR]] + // CHECK: [[AFTER:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]], {{%.*}} ] + // CHECK-NEXT: [[CUR:%.*]] = getelementptr inbounds i8** [[AFTER]], i64 -1 + // CHECK-NEXT: [[T0:%.*]] = load i8** [[CUR]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) nounwind, !clang.imprecise_release - // CHECK-NEXT: [[NEXT:%.*]] = getelementptr inbounds i8** [[CUR]], i32 1 - // CHECK-NEXT: br label + // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[VLA]] + // CHECK-NEXT: br i1 [[EQ]], // CHECK: [[T0:%.*]] = load i8** [[SAVED_STACK]] // CHECK-NEXT: call void @llvm.stackrestore(i8* [[T0]]) @@ -562,14 +558,12 @@ void test21(unsigned n) { // CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i8** [[BEGIN]], i64 [[T1]] // CHECK-NEXT: br label - // CHECK: [[CUR:%.*]] = phi i8** - // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[END]] - // CHECK-NEXT: br i1 [[EQ]], - - // CHECK: [[T0:%.*]] = load i8** [[CUR]] + // CHECK: [[AFTER:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]], {{%.*}} ] + // CHECK-NEXT: [[CUR:%.*]] = getelementptr inbounds i8** [[AFTER]], i64 -1 + // CHECK-NEXT: [[T0:%.*]] = load i8** [[CUR]] // CHECK-NEXT: call void @objc_release(i8* [[T0]]) nounwind, !clang.imprecise_release - // CHECK-NEXT: [[NEXT:%.*]] = getelementptr inbounds i8** [[CUR]], i32 1 - // CHECK-NEXT: br label + // CHECK-NEXT: [[EQ:%.*]] = icmp eq i8** [[CUR]], [[BEGIN]] + // CHECK-NEXT: br i1 [[EQ]], // CHECK: [[T0:%.*]] = load i8** [[SAVED_STACK]] // CHECK-NEXT: call void @llvm.stackrestore(i8* [[T0]]) @@ -1120,7 +1114,7 @@ void test36(id x) { // CHECK: br label // CHECK: call void @objc_release - // CHECK: br label + // CHECK: br i1 // CHECK: call void @objc_release // CHECK-NEXT: ret void |