summaryrefslogtreecommitdiffstats
path: root/clang/lib/CodeGen/CGBuiltin.cpp
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2014-05-21 05:09:00 +0000
committerCraig Topper <craig.topper@gmail.com>2014-05-21 05:09:00 +0000
commit8a13c4180e1a50fd4458ad522e2c271ce8ceb3d6 (patch)
treebe98b83ca922b4a9f79937f9166969913f2edcb8 /clang/lib/CodeGen/CGBuiltin.cpp
parent5c35c8c9abc2a830c912c70d51c72197917470c7 (diff)
downloadbcm5719-llvm-8a13c4180e1a50fd4458ad522e2c271ce8ceb3d6.tar.gz
bcm5719-llvm-8a13c4180e1a50fd4458ad522e2c271ce8ceb3d6.zip
[C++11] Use 'nullptr'. CodeGen edition.
llvm-svn: 209272
Diffstat (limited to 'clang/lib/CodeGen/CGBuiltin.cpp')
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp121
1 files changed, 60 insertions, 61 deletions
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index f77aeecd292..9d692d8e1e2 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -212,7 +212,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
default: break; // Handle intrinsics and libm functions below.
case Builtin::BI__builtin___CFStringMakeConstantString:
case Builtin::BI__builtin___NSStringMakeConstantString:
- return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
+ return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr));
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
case Builtin::BI__va_start:
@@ -457,7 +457,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("unreachable.cont"));
- return RValue::get(0);
+ return RValue::get(nullptr);
}
case Builtin::BI__builtin_powi:
@@ -790,7 +790,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("builtin_eh_return.cont"));
- return RValue::get(0);
+ return RValue::get(nullptr);
}
case Builtin::BI__builtin_unwind_init: {
Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
@@ -856,7 +856,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("longjmp.cont"));
- return RValue::get(0);
+ return RValue::get(nullptr);
}
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
@@ -1037,7 +1037,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
Store->setAlignment(StoreSize.getQuantity());
Store->setAtomic(llvm::Release);
- return RValue::get(0);
+ return RValue::get(nullptr);
}
case Builtin::BI__sync_synchronize: {
@@ -1049,7 +1049,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// to use it with non-atomic loads and stores to get acquire/release
// semantics.
Builder.CreateFence(llvm::SequentiallyConsistent);
- return RValue::get(0);
+ return RValue::get(nullptr);
}
case Builtin::BI__c11_atomic_is_lock_free:
@@ -1090,7 +1090,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
- AtomicRMWInst *Result = 0;
+ AtomicRMWInst *Result = nullptr;
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
@@ -1190,7 +1190,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Store->setOrdering(llvm::SequentiallyConsistent);
break;
}
- return RValue::get(0);
+ return RValue::get(nullptr);
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
@@ -1220,7 +1220,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
SI->addCase(Builder.getInt32(5), BBs[2]);
Builder.SetInsertPoint(ContBB);
- return RValue::get(0);
+ return RValue::get(nullptr);
}
case Builtin::BI__atomic_thread_fence:
@@ -1254,7 +1254,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
break;
}
- return RValue::get(0);
+ return RValue::get(nullptr);
}
llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
@@ -1289,7 +1289,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
SI->addCase(Builder.getInt32(5), SeqCstBB);
Builder.SetInsertPoint(ContBB);
- return RValue::get(0);
+ return RValue::get(nullptr);
}
// Library functions with special handling.
@@ -1509,7 +1509,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getAddress());
case Builtin::BI__noop:
- return RValue::get(0);
+ return RValue::get(nullptr);
case Builtin::BI_InterlockedCompareExchange: {
AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
EmitScalarExpr(E->getArg(0)),
@@ -1656,7 +1656,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
case llvm::Triple::ppc64le:
return EmitPPCBuiltinExpr(BuiltinID, E);
default:
- return 0;
+ return nullptr;
}
}
@@ -2764,7 +2764,7 @@ findNeonIntrinsicInMap(llvm::ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
return Builtin;
- return 0;
+ return nullptr;
}
Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
@@ -3040,7 +3040,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
llvm::APSInt NeonTypeConst;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
- return 0;
+ return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(NeonTypeConst.getZExtValue());
@@ -3050,7 +3050,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
- return 0;
+ return nullptr;
unsigned Int = LLVMIntrinsic;
if ((Modifier & UnsignedAlts) && !Usgn)
@@ -3397,7 +3397,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = 0;
+ Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -3426,7 +3426,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = 0;
+ Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -3445,7 +3445,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = 0;
+ Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -3538,11 +3538,11 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
unsigned BuiltinID,
const CallExpr *E) {
unsigned int Int = 0;
- const char *s = NULL;
+ const char *s = nullptr;
switch (BuiltinID) {
default:
- return 0;
+ return nullptr;
case NEON::BI__builtin_neon_vtbl1_v:
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
@@ -3576,14 +3576,14 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
- return 0;
+ return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
llvm::VectorType *VTy = GetNeonType(&CGF, Type);
llvm::Type *Ty = VTy;
if (!Ty)
- return 0;
+ return nullptr;
SmallVector<Value *, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
@@ -3598,20 +3598,20 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
switch (BuiltinID) {
case NEON::BI__builtin_neon_vtbl1_v: {
TblOps.push_back(Ops[0]);
- return packTBLDVectorList(CGF, TblOps, 0, Ops[1], Ty,
+ return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
Intrinsic::aarch64_neon_vtbl1, "vtbl1");
}
case NEON::BI__builtin_neon_vtbl2_v: {
TblOps.push_back(Ops[0]);
TblOps.push_back(Ops[1]);
- return packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
+ return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
Intrinsic::aarch64_neon_vtbl1, "vtbl1");
}
case NEON::BI__builtin_neon_vtbl3_v: {
TblOps.push_back(Ops[0]);
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
- return packTBLDVectorList(CGF, TblOps, 0, Ops[3], Ty,
+ return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
Intrinsic::aarch64_neon_vtbl2, "vtbl2");
}
case NEON::BI__builtin_neon_vtbl4_v: {
@@ -3619,13 +3619,13 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
TblOps.push_back(Ops[3]);
- return packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
+ return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
Intrinsic::aarch64_neon_vtbl2, "vtbl2");
}
case NEON::BI__builtin_neon_vtbx1_v: {
TblOps.push_back(Ops[1]);
- Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
- Intrinsic::aarch64_neon_vtbl1, "vtbl1");
+ Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
+ Intrinsic::aarch64_neon_vtbl1, "vtbl1");
llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
@@ -3649,7 +3649,7 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
TblOps.push_back(Ops[3]);
- Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
+ Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
Intrinsic::aarch64_neon_vtbl2, "vtbl2");
llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
@@ -3701,7 +3701,7 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
}
if (!Int)
- return 0;
+ return nullptr;
Function *F = CGF.CGM.getIntrinsic(Int, Ty);
return CGF.EmitNeonCall(F, Ops, s);
@@ -3740,7 +3740,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
SmallVector<Value *, 4> Ops;
- llvm::Value *Align = 0; // Alignment for load/store
+ llvm::Value *Align = nullptr; // Alignment for load/store
if (BuiltinID == NEON::BI__builtin_neon_vldrq_p128) {
Value *Op = EmitScalarExpr(E->getArg(0));
@@ -3847,7 +3847,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
if (!Arg->isIntegerConstantExpr(Result, getContext()))
- return 0;
+ return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
@@ -3857,8 +3857,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
- return 0;
-
+ return nullptr;
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
@@ -3873,7 +3872,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
unsigned Int;
switch (BuiltinID) {
default:
- return 0;
+ return nullptr;
// AArch64 builtins mapping to legacy ARM v7 builtins.
// FIXME: the mapped builtins listed correspond to what has been tested
@@ -4492,7 +4491,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
SmallVector<Value*, 4> Ops;
- llvm::Value *Align = 0;
+ llvm::Value *Align = nullptr;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
if (i == 0) {
switch (BuiltinID) {
@@ -4608,7 +4607,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
if (!Arg->isIntegerConstantExpr(Result, getContext()))
- return 0;
+ return nullptr;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
@@ -4636,7 +4635,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
- return 0;
+ return nullptr;
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
@@ -4650,7 +4649,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned Int;
switch (BuiltinID) {
- default: return 0;
+ default: return nullptr;
case NEON::BI__builtin_neon_vld1q_lane_v:
// Handle 64-bit integer elements as a special case. Use shuffles of
// one-element vectors to avoid poor code for i64 in the backend.
@@ -4842,11 +4841,11 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E,
SmallVectorImpl<Value *> &Ops) {
unsigned int Int = 0;
- const char *s = NULL;
+ const char *s = nullptr;
switch (BuiltinID) {
default:
- return 0;
+ return nullptr;
case NEON::BI__builtin_neon_vtbl1_v:
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
@@ -4881,14 +4880,14 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
- return 0;
+ return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
llvm::VectorType *VTy = GetNeonType(&CGF, Type);
llvm::Type *Ty = VTy;
if (!Ty)
- return 0;
+ return nullptr;
unsigned nElts = VTy->getNumElements();
@@ -4900,20 +4899,20 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
switch (BuiltinID) {
case NEON::BI__builtin_neon_vtbl1_v: {
TblOps.push_back(Ops[0]);
- return packTBLDVectorList(CGF, TblOps, 0, Ops[1], Ty,
+ return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
Intrinsic::arm64_neon_tbl1, "vtbl1");
}
case NEON::BI__builtin_neon_vtbl2_v: {
TblOps.push_back(Ops[0]);
TblOps.push_back(Ops[1]);
- return packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
+ return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
Intrinsic::arm64_neon_tbl1, "vtbl1");
}
case NEON::BI__builtin_neon_vtbl3_v: {
TblOps.push_back(Ops[0]);
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
- return packTBLDVectorList(CGF, TblOps, 0, Ops[3], Ty,
+ return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
Intrinsic::arm64_neon_tbl2, "vtbl2");
}
case NEON::BI__builtin_neon_vtbl4_v: {
@@ -4921,13 +4920,13 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
TblOps.push_back(Ops[3]);
- return packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
+ return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
Intrinsic::arm64_neon_tbl2, "vtbl2");
}
case NEON::BI__builtin_neon_vtbx1_v: {
TblOps.push_back(Ops[1]);
- Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
- Intrinsic::arm64_neon_tbl1, "vtbl1");
+ Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
+ Intrinsic::arm64_neon_tbl1, "vtbl1");
llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
@@ -4948,7 +4947,7 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
TblOps.push_back(Ops[3]);
- Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
+ Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
Intrinsic::arm64_neon_tbl2, "vtbl2");
llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
@@ -4997,7 +4996,7 @@ static Value *EmitARM64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
}
if (!Int)
- return 0;
+ return nullptr;
Function *F = CGF.CGM.getIntrinsic(Int, Ty);
return CGF.EmitNeonCall(F, Ops, s);
@@ -5678,7 +5677,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID,
llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
- return 0;
+ return nullptr;
// Not all intrinsics handled by the common case work for ARM64 yet, so only
// defer to common code if it's been added to our special map.
@@ -5688,14 +5687,14 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID,
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
- Builtin->NameHint, Builtin->TypeModifier, E, Ops, 0);
+ Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
if (Value *V = EmitARM64TblBuiltinExpr(*this, BuiltinID, E, Ops))
return V;
unsigned Int;
switch (BuiltinID) {
- default: return 0;
+ default: return nullptr;
case NEON::BI__builtin_neon_vbsl_v:
case NEON::BI__builtin_neon_vbslq_v: {
llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
@@ -6725,7 +6724,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = 0;
+ Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -6745,7 +6744,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = 0;
+ Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -6764,7 +6763,7 @@ Value *CodeGenFunction::EmitARM64BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
- Value *SV = 0;
+ Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
@@ -6876,7 +6875,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
switch (BuiltinID) {
- default: return 0;
+ default: return nullptr;
case X86::BI_mm_prefetch: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = ConstantInt::get(Int32Ty, 0);
@@ -7053,7 +7052,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
- const char *name = 0;
+ const char *name = nullptr;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch(BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
@@ -7122,7 +7121,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- default: return 0;
+ default: return nullptr;
// vec_ld, vec_lvsl, vec_lvsr
case PPC::BI__builtin_altivec_lvx:
OpenPOWER on IntegriCloud