diff options
Diffstat (limited to 'clang/lib/CodeGen/CGDecl.cpp')
-rw-r--r-- | clang/lib/CodeGen/CGDecl.cpp | 90 |
1 files changed, 6 insertions, 84 deletions
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index 6f27d879a99..aea99dbbafd 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -19,6 +19,7 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" +#include "PatternInit.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" @@ -984,92 +985,13 @@ static bool shouldSplitConstantStore(CodeGenModule &CGM, return false; } -static llvm::Constant *patternFor(CodeGenModule &CGM, llvm::Type *Ty) { - // The following value is a guaranteed unmappable pointer value and has a - // repeated byte-pattern which makes it easier to synthesize. We use it for - // pointers as well as integers so that aggregates are likely to be - // initialized with this repeated value. - constexpr uint64_t LargeValue = 0xAAAAAAAAAAAAAAAAull; - // For 32-bit platforms it's a bit trickier because, across systems, only the - // zero page can reasonably be expected to be unmapped, and even then we need - // a very low address. We use a smaller value, and that value sadly doesn't - // have a repeated byte-pattern. We don't use it for integers. - constexpr uint32_t SmallValue = 0x000000AA; - // Floating-point values are initialized as NaNs because they propagate. Using - // a repeated byte pattern means that it will be easier to initialize - // all-floating-point aggregates and arrays with memset. Further, aggregates - // which mix integral and a few floats might also initialize with memset - // followed by a handful of stores for the floats. Using fairly unique NaNs - // also means they'll be easier to distinguish in a crash. - constexpr bool NegativeNaN = true; - constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull; - if (Ty->isIntOrIntVectorTy()) { - unsigned BitWidth = cast<llvm::IntegerType>( - Ty->isVectorTy() ? Ty->getVectorElementType() : Ty) - ->getBitWidth(); - if (BitWidth <= 64) - return llvm::ConstantInt::get(Ty, LargeValue); - return llvm::ConstantInt::get( - Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, LargeValue))); - } - if (Ty->isPtrOrPtrVectorTy()) { - auto *PtrTy = cast<llvm::PointerType>( - Ty->isVectorTy() ? Ty->getVectorElementType() : Ty); - unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth( - PtrTy->getAddressSpace()); - llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth); - uint64_t IntValue; - switch (PtrWidth) { - default: - llvm_unreachable("pattern initialization of unsupported pointer width"); - case 64: - IntValue = LargeValue; - break; - case 32: - IntValue = SmallValue; - break; - } - auto *Int = llvm::ConstantInt::get(IntTy, IntValue); - return llvm::ConstantExpr::getIntToPtr(Int, PtrTy); - } - if (Ty->isFPOrFPVectorTy()) { - unsigned BitWidth = llvm::APFloat::semanticsSizeInBits( - (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty) - ->getFltSemantics()); - llvm::APInt Payload(64, NaNPayload); - if (BitWidth >= 64) - Payload = llvm::APInt::getSplat(BitWidth, Payload); - return llvm::ConstantFP::getQNaN(Ty, NegativeNaN, &Payload); - } - if (Ty->isArrayTy()) { - // Note: this doesn't touch tail padding (at the end of an object, before - // the next array object). It is instead handled by replaceUndef. - auto *ArrTy = cast<llvm::ArrayType>(Ty); - llvm::SmallVector<llvm::Constant *, 8> Element( - ArrTy->getNumElements(), patternFor(CGM, ArrTy->getElementType())); - return llvm::ConstantArray::get(ArrTy, Element); - } - - // Note: this doesn't touch struct padding. It will initialize as much union - // padding as is required for the largest type in the union. Padding is - // instead handled by replaceUndef. Stores to structs with volatile members - // don't have a volatile qualifier when initialized according to C++. This is - // fine because stack-based volatiles don't really have volatile semantics - // anyways, and the initialization shouldn't be observable. - auto *StructTy = cast<llvm::StructType>(Ty); - llvm::SmallVector<llvm::Constant *, 8> Struct(StructTy->getNumElements()); - for (unsigned El = 0; El != Struct.size(); ++El) - Struct[El] = patternFor(CGM, StructTy->getElementType(El)); - return llvm::ConstantStruct::get(StructTy, Struct); -} - enum class IsPattern { No, Yes }; /// Generate a constant filled with either a pattern or zeroes. static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern, llvm::Type *Ty) { if (isPattern == IsPattern::Yes) - return patternFor(CGM, Ty); + return initializationPatternFor(CGM, Ty); else return llvm::Constant::getNullValue(Ty); } @@ -1294,8 +1216,8 @@ static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D, Address Loc, bool isVolatile, CGBuilderTy &Builder) { llvm::Type *ElTy = Loc.getElementType(); - llvm::Constant *constant = - constWithPadding(CGM, IsPattern::Yes, patternFor(CGM, ElTy)); + llvm::Constant *constant = constWithPadding( + CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy)); assert(!isa<llvm::UndefValue>(constant)); emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant); } @@ -1818,8 +1740,8 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) { case LangOptions::TrivialAutoVarInitKind::Pattern: { llvm::Type *ElTy = Loc.getElementType(); - llvm::Constant *Constant = - constWithPadding(CGM, IsPattern::Yes, patternFor(CGM, ElTy)); + llvm::Constant *Constant = constWithPadding( + CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy)); CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type); llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop"); llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop"); |