diff options
Diffstat (limited to 'clang/lib/CodeGen/CGExprAgg.cpp')
| -rw-r--r-- | clang/lib/CodeGen/CGExprAgg.cpp | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 6c505210b95..760e6f15fa7 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -389,9 +389,9 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, // already-constructed members if an initializer throws. // For that, we'll need an EH cleanup. QualType::DestructionKind dtorKind = elementType.isDestructedType(); - llvm::AllocaInst *endOfInit = 0; + llvm::AllocaInst *endOfInit = nullptr; EHScopeStack::stable_iterator cleanup; - llvm::Instruction *cleanupDominator = 0; + llvm::Instruction *cleanupDominator = nullptr; if (CGF.needsEHCleanup(dtorKind)) { // In principle we could tell the cleanup where we are more // directly, but the control flow can get so varied here that it @@ -539,7 +539,7 @@ static Expr *findPeephole(Expr *op, CastKind kind) { if (castE->getCastKind() == CK_NoOp) continue; } - return 0; + return nullptr; } } @@ -1044,7 +1044,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { return; case TEK_Scalar: if (LV.isSimple()) { - CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false); + CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); } else { CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); } @@ -1165,7 +1165,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { // We'll need to enter cleanup scopes in case any of the member // initializers throw an exception. SmallVector<EHScopeStack::stable_iterator, 16> cleanups; - llvm::Instruction *cleanupDominator = 0; + llvm::Instruction *cleanupDominator = nullptr; // Here we iterate over the fields; this makes it simpler to both // default-initialize fields and skip over unnamed fields. @@ -1252,7 +1252,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { // If this is an initlist expr, sum up the size of sizes of the (present) // elements. If this is something weird, assume the whole thing is non-zero. const InitListExpr *ILE = dyn_cast<InitListExpr>(E); - if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType())) + if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType())) return CGF.getContext().getTypeSizeInChars(E->getType()); // InitListExprs for structs have to be handled carefully. If there are @@ -1301,7 +1301,8 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, CodeGenFunction &CGF) { // If the slot is already known to be zeroed, nothing to do. Don't mess with // volatile stores. - if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return; + if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == nullptr) + return; // C++ objects with a user-declared constructor don't need zero'ing. if (CGF.getLangOpts().CPlusPlus) @@ -1348,7 +1349,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { assert(E && hasAggregateEvaluationKind(E->getType()) && "Invalid aggregate expression to emit"); - assert((Slot.getAddr() != 0 || Slot.isIgnored()) && + assert((Slot.getAddr() != nullptr || Slot.isIgnored()) && "slot has bits but no address"); // Optimize the slot if possible. @@ -1468,10 +1469,10 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, // memcpy, as well as the TBAA tags for the members of the struct, in case // the optimizer wishes to expand it in to scalar memory operations. llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty); - + Builder.CreateMemCpy(DestPtr, SrcPtr, llvm::ConstantInt::get(IntPtrTy, TypeInfo.first.getQuantity()), alignment.getQuantity(), isVolatile, - /*TBAATag=*/0, TBAAStructTag); + /*TBAATag=*/nullptr, TBAAStructTag); } |

