summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Instrumentation
diff options
context:
space:
mode:
authorMehdi Amini <mehdi.amini@apple.com>2015-03-10 02:37:25 +0000
committerMehdi Amini <mehdi.amini@apple.com>2015-03-10 02:37:25 +0000
commita28d91d81b5daa4d8b92452ea8203a57023b576f (patch)
treecbefa13abba5df48124e2f93e7d7d5b13562ad72 /llvm/lib/Transforms/Instrumentation
parentb3d5209927dc4f61c5eaaa48ceac48b8adf6d524 (diff)
downloadbcm5719-llvm-a28d91d81b5daa4d8b92452ea8203a57023b576f.tar.gz
bcm5719-llvm-a28d91d81b5daa4d8b92452ea8203a57023b576f.zip
DataLayout is mandatory, update the API to reflect it with references.
Summary: Now that the DataLayout is a mandatory part of the module, let's start cleaning the codebase. This patch is a first attempt at doing that. This patch is not exactly NFC as for instance some places were passing a nullptr instead of the DataLayout, possibly just because there was a default value on the DataLayout argument to many functions in the API. Even though it is not purely NFC, there is no change in the validation. I turned as many pointer to DataLayout to references, this helped figuring out all the places where a nullptr could come up. I had initially a local version of this patch broken into over 30 independant, commits but some later commit were cleaning the API and touching part of the code modified in the previous commits, so it seemed cleaner without the intermediate state. Test Plan: Reviewers: echristo Subscribers: llvm-commits From: Mehdi Amini <mehdi.amini@apple.com> llvm-svn: 231740
Diffstat (limited to 'llvm/lib/Transforms/Instrumentation')
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp52
-rw-r--r--llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp23
-rw-r--r--llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp18
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp54
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp55
5 files changed, 109 insertions, 93 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 6dc621a8806..e2d7a6de4e1 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -396,7 +396,8 @@ struct AddressSanitizer : public FunctionPass {
}
uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
Type *Ty = AI->getAllocatedType();
- uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
+ uint64_t SizeInBytes =
+ AI->getModule()->getDataLayout().getTypeAllocSize(Ty);
return SizeInBytes;
}
/// Check if we want (and can) handle this alloca.
@@ -407,7 +408,7 @@ struct AddressSanitizer : public FunctionPass {
uint64_t *TypeSize,
unsigned *Alignment) const;
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
- bool UseCalls);
+ bool UseCalls, const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, uint32_t TypeSize, bool IsWrite,
@@ -435,7 +436,6 @@ struct AddressSanitizer : public FunctionPass {
uint64_t TypeSize) const;
LLVMContext *C;
- const DataLayout *DL;
Triple TargetTriple;
int LongSize;
Type *IntptrTy;
@@ -478,7 +478,6 @@ class AddressSanitizerModule : public ModulePass {
GlobalsMetadata GlobalsMD;
Type *IntptrTy;
LLVMContext *C;
- const DataLayout *DL;
Triple TargetTriple;
ShadowMapping Mapping;
Function *AsanPoisonGlobals;
@@ -605,8 +604,9 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
// Right shift for BigEndian and left shift for LittleEndian.
Value *shiftAllocaMagic(Value *Val, IRBuilder<> &IRB, Value *Shift) {
- return ASan.DL->isLittleEndian() ? IRB.CreateShl(Val, Shift)
- : IRB.CreateLShr(Val, Shift);
+ auto &DL = F.getParent()->getDataLayout();
+ return DL.isLittleEndian() ? IRB.CreateShl(Val, Shift)
+ : IRB.CreateLShr(Val, Shift);
}
// Compute PartialRzMagic for dynamic alloca call. Since we don't know the
@@ -818,29 +818,29 @@ Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
if (I->getMetadata("nosanitize")) return nullptr;
Value *PtrOperand = nullptr;
+ const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
- *TypeSize = DL->getTypeStoreSizeInBits(LI->getType());
+ *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
*Alignment = LI->getAlignment();
PtrOperand = LI->getPointerOperand();
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
- *TypeSize = DL->getTypeStoreSizeInBits(SI->getValueOperand()->getType());
+ *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
*Alignment = SI->getAlignment();
PtrOperand = SI->getPointerOperand();
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
- *TypeSize = DL->getTypeStoreSizeInBits(RMW->getValOperand()->getType());
+ *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
*Alignment = 0;
PtrOperand = RMW->getPointerOperand();
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
- *TypeSize =
- DL->getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
+ *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
*Alignment = 0;
PtrOperand = XCHG->getPointerOperand();
}
@@ -896,7 +896,8 @@ void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
}
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
- Instruction *I, bool UseCalls) {
+ Instruction *I, bool UseCalls,
+ const DataLayout &DL) {
bool IsWrite = false;
unsigned Alignment = 0;
uint64_t TypeSize = 0;
@@ -906,8 +907,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptGlobals) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
- GlobalVariable *G =
- dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, nullptr));
+ GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
NumOptimizedAccessesToGlobalVar++;
@@ -917,7 +917,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptStack) {
// A direct inbounds access to a stack variable is always valid.
- if (isa<AllocaInst>(GetUnderlyingObject(Addr, nullptr)) &&
+ if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
NumOptimizedAccessesToStackVar++;
return;
@@ -1221,6 +1221,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
GlobalVariable *ModuleName = createPrivateGlobalForString(
M, M.getModuleIdentifier(), /*AllowMerging*/ false);
+ auto &DL = M.getDataLayout();
for (size_t i = 0; i < n; i++) {
static const uint64_t kMaxGlobalRedzone = 1 << 18;
GlobalVariable *G = GlobalsToChange[i];
@@ -1234,7 +1235,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
PointerType *PtrTy = cast<PointerType>(G->getType());
Type *Ty = PtrTy->getElementType();
- uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
+ uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
uint64_t MinRZ = MinRedzoneSizeForGlobal();
// MinRZ <= RZ <= kMaxGlobalRedzone
// and trying to make RZ to be ~ 1/4 of SizeInBytes.
@@ -1320,9 +1321,8 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
}
bool AddressSanitizerModule::runOnModule(Module &M) {
- DL = &M.getDataLayout();
C = &(M.getContext());
- int LongSize = DL->getPointerSizeInBits();
+ int LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
Mapping = getShadowMapping(TargetTriple, LongSize);
@@ -1396,12 +1396,11 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
// virtual
bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
- DL = &M.getDataLayout();
GlobalsMD.init(M);
C = &(M.getContext());
- LongSize = DL->getPointerSizeInBits();
+ LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
@@ -1507,6 +1506,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ const DataLayout &DL = F.getParent()->getDataLayout();
ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(),
/*RoundToAlign=*/true);
@@ -1516,7 +1516,8 @@ bool AddressSanitizer::runOnFunction(Function &F) {
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
- instrumentMop(ObjSizeVis, Inst, UseCalls);
+ instrumentMop(ObjSizeVis, Inst, UseCalls,
+ F.getParent()->getDataLayout());
else
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
}
@@ -1588,7 +1589,7 @@ void FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
uint64_t Val = 0;
for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
- if (ASan.DL->isLittleEndian())
+ if (F.getParent()->getDataLayout().isLittleEndian())
Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
else
Val = (Val << 8) | ShadowBytes[i + j];
@@ -1932,14 +1933,14 @@ Value *FunctionStackPoisoner::computePartialRzMagic(Value *PartialSize,
Value *Shift = IRB.CreateAnd(PartialSize, IRB.getInt32(~7));
unsigned Val1Int = kAsanAllocaPartialVal1;
unsigned Val2Int = kAsanAllocaPartialVal2;
- if (!ASan.DL->isLittleEndian()) {
+ if (!F.getParent()->getDataLayout().isLittleEndian()) {
Val1Int = sys::getSwappedBytes(Val1Int);
Val2Int = sys::getSwappedBytes(Val2Int);
}
Value *Val1 = shiftAllocaMagic(IRB.getInt32(Val1Int), IRB, Shift);
Value *PartialBits = IRB.CreateAnd(PartialSize, IRB.getInt32(7));
// For BigEndian get 0x000000YZ -> 0xYZ000000.
- if (ASan.DL->isBigEndian())
+ if (F.getParent()->getDataLayout().isBigEndian())
PartialBits = IRB.CreateShl(PartialBits, IRB.getInt32(24));
Value *Val2 = IRB.getInt32(Val2Int);
Value *Cond =
@@ -1973,7 +1974,8 @@ void FunctionStackPoisoner::handleDynamicAllocaCall(
// redzones, and OldSize is number of allocated blocks with
// ElementSize size, get allocated memory size in bytes by
// OldSize * ElementSize.
- unsigned ElementSize = ASan.DL->getTypeAllocSize(AI->getAllocatedType());
+ unsigned ElementSize =
+ F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
Value *OldSize = IRB.CreateMul(AI->getArraySize(),
ConstantInt::get(IntptrTy, ElementSize));
diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 241e172bb69..978ef38b518 100644
--- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -53,7 +53,6 @@ namespace {
}
private:
- const DataLayout *DL;
const TargetLibraryInfo *TLI;
ObjectSizeOffsetEvaluator *ObjSizeEval;
BuilderTy *Builder;
@@ -62,7 +61,7 @@ namespace {
BasicBlock *getTrapBB();
void emitBranchToTrap(Value *Cmp = nullptr);
- bool instrument(Value *Ptr, Value *Val);
+ bool instrument(Value *Ptr, Value *Val, const DataLayout &DL);
};
}
@@ -124,8 +123,9 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) {
/// result from the load or the value being stored. It is used to determine the
/// size of memory block that is touched.
/// Returns true if any change was made to the IR, false otherwise.
-bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
- uint64_t NeededSize = DL->getTypeStoreSize(InstVal->getType());
+bool BoundsChecking::instrument(Value *Ptr, Value *InstVal,
+ const DataLayout &DL) {
+ uint64_t NeededSize = DL.getTypeStoreSize(InstVal->getType());
DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
<< " bytes\n");
@@ -140,7 +140,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
Value *Offset = SizeOffset.second;
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
- Type *IntTy = DL->getIntPtrType(Ptr->getType());
+ Type *IntTy = DL.getIntPtrType(Ptr->getType());
Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
// three checks are required to ensure safety:
@@ -164,7 +164,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
}
bool BoundsChecking::runOnFunction(Function &F) {
- DL = &F.getParent()->getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TrapBB = nullptr;
@@ -191,13 +191,16 @@ bool BoundsChecking::runOnFunction(Function &F) {
Builder->SetInsertPoint(Inst);
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
- MadeChange |= instrument(LI->getPointerOperand(), LI);
+ MadeChange |= instrument(LI->getPointerOperand(), LI, DL);
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- MadeChange |= instrument(SI->getPointerOperand(), SI->getValueOperand());
+ MadeChange |=
+ instrument(SI->getPointerOperand(), SI->getValueOperand(), DL);
} else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
- MadeChange |= instrument(AI->getPointerOperand(),AI->getCompareOperand());
+ MadeChange |=
+ instrument(AI->getPointerOperand(), AI->getCompareOperand(), DL);
} else if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst)) {
- MadeChange |= instrument(AI->getPointerOperand(), AI->getValOperand());
+ MadeChange |=
+ instrument(AI->getPointerOperand(), AI->getValOperand(), DL);
} else {
llvm_unreachable("unknown Instruction type");
}
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 65da9d9cdd9..b3925ee152f 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -217,7 +217,6 @@ class DataFlowSanitizer : public ModulePass {
WK_Custom
};
- const DataLayout *DL;
Module *Mod;
LLVMContext *Ctx;
IntegerType *ShadowTy;
@@ -422,13 +421,13 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
TargetTriple.getArch() == llvm::Triple::mips64el;
- DL = &M.getDataLayout();
+ const DataLayout &DL = M.getDataLayout();
Mod = &M;
Ctx = &M.getContext();
ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
ShadowPtrTy = PointerType::getUnqual(ShadowTy);
- IntptrTy = DL->getIntPtrType(*Ctx);
+ IntptrTy = DL.getIntPtrType(*Ctx);
ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
if (IsX86_64)
@@ -1050,7 +1049,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
SmallVector<Value *, 2> Objs;
- GetUnderlyingObjects(Addr, Objs, DFS.DL);
+ GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
bool AllConstants = true;
for (SmallVector<Value *, 2>::iterator i = Objs.begin(), e = Objs.end();
i != e; ++i) {
@@ -1151,7 +1150,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
void DFSanVisitor::visitLoadInst(LoadInst &LI) {
- uint64_t Size = DFSF.DFS.DL->getTypeStoreSize(LI.getType());
+ auto &DL = LI.getModule()->getDataLayout();
+ uint64_t Size = DL.getTypeStoreSize(LI.getType());
if (Size == 0) {
DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
return;
@@ -1161,7 +1161,7 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
if (ClPreserveAlignment) {
Align = LI.getAlignment();
if (Align == 0)
- Align = DFSF.DFS.DL->getABITypeAlignment(LI.getType());
+ Align = DL.getABITypeAlignment(LI.getType());
} else {
Align = 1;
}
@@ -1229,8 +1229,8 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
void DFSanVisitor::visitStoreInst(StoreInst &SI) {
- uint64_t Size =
- DFSF.DFS.DL->getTypeStoreSize(SI.getValueOperand()->getType());
+ auto &DL = SI.getModule()->getDataLayout();
+ uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
if (Size == 0)
return;
@@ -1238,7 +1238,7 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
if (ClPreserveAlignment) {
Align = SI.getAlignment();
if (Align == 0)
- Align = DFSF.DFS.DL->getABITypeAlignment(SI.getValueOperand()->getType());
+ Align = DL.getABITypeAlignment(SI.getValueOperand()->getType());
} else {
Align = 1;
}
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 7ec6695a35f..c2aa1e2f772 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -274,7 +274,6 @@ class MemorySanitizer : public FunctionPass {
MemorySanitizer(int TrackOrigins = 0)
: FunctionPass(ID),
TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
- DL(nullptr),
WarningFn(nullptr) {}
const char *getPassName() const override { return "MemorySanitizer"; }
bool runOnFunction(Function &F) override;
@@ -287,7 +286,6 @@ class MemorySanitizer : public FunctionPass {
/// \brief Track origins (allocation points) of uninitialized values.
int TrackOrigins;
- const DataLayout *DL;
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
@@ -449,7 +447,7 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
///
/// inserts a call to __msan_init to the module's constructor list.
bool MemorySanitizer::doInitialization(Module &M) {
- DL = &M.getDataLayout();
+ auto &DL = M.getDataLayout();
Triple TargetTriple(M.getTargetTriple());
switch (TargetTriple.getOS()) {
@@ -601,7 +599,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
- unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy);
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
if (IntptrSize == kOriginSize) return Origin;
assert(IntptrSize == kOriginSize * 2);
Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
@@ -611,8 +610,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Fill memory range with the given origin value.
void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
unsigned Size, unsigned Alignment) {
- unsigned IntptrAlignment = MS.DL->getABITypeAlignment(MS.IntptrTy);
- unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy);
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
assert(IntptrAlignment >= kMinOriginAlignment);
assert(IntptrSize >= kOriginSize);
@@ -640,8 +640,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
unsigned Alignment, bool AsCall) {
+ const DataLayout &DL = F.getParent()->getDataLayout();
unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
- unsigned StoreSize = MS.DL->getTypeStoreSize(Shadow->getType());
+ unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
if (isa<StructType>(Shadow->getType())) {
paintOrigin(IRB, updateOrigin(Origin, IRB),
getOriginPtr(Addr, IRB, Alignment), StoreSize,
@@ -658,7 +659,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
unsigned TypeSizeInBits =
- MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
+ DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
if (AsCall && SizeIndex < kNumberOfAccessSizes) {
Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
@@ -728,8 +729,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return;
}
- unsigned TypeSizeInBits =
- MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
+ const DataLayout &DL = OrigIns->getModule()->getDataLayout();
+
+ unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
if (AsCall && SizeIndex < kNumberOfAccessSizes) {
Value *Fn = MS.MaybeWarningFn[SizeIndex];
@@ -769,7 +771,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Add MemorySanitizer instrumentation to a function.
bool runOnFunction() {
MS.initializeCallbacks(*F.getParent());
- if (!MS.DL) return false;
// In the presence of unreachable blocks, we may see Phi nodes with
// incoming nodes from such blocks. Since InstVisitor skips unreachable
@@ -825,8 +826,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// This may return weird-sized types like i1.
if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
return IT;
+ const DataLayout &DL = F.getParent()->getDataLayout();
if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
- uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
+ uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
return VectorType::get(IntegerType::get(*MS.C, EltSize),
VT->getNumElements());
}
@@ -842,7 +844,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
return Res;
}
- uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
+ uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
return IntegerType::get(*MS.C, TypeSize);
}
@@ -1035,14 +1037,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function *F = A->getParent();
IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
unsigned ArgOffset = 0;
+ const DataLayout &DL = F->getParent()->getDataLayout();
for (auto &FArg : F->args()) {
if (!FArg.getType()->isSized()) {
DEBUG(dbgs() << "Arg is not sized\n");
continue;
}
- unsigned Size = FArg.hasByValAttr()
- ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
- : MS.DL->getTypeAllocSize(FArg.getType());
+ unsigned Size =
+ FArg.hasByValAttr()
+ ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
+ : DL.getTypeAllocSize(FArg.getType());
if (A == &FArg) {
bool Overflow = ArgOffset + Size > kParamTLSSize;
Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
@@ -1053,7 +1057,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned ArgAlign = FArg.getParamAlignment();
if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType();
- ArgAlign = MS.DL->getABITypeAlignment(EltType);
+ ArgAlign = DL.getABITypeAlignment(EltType);
}
if (Overflow) {
// ParamTLS overflow.
@@ -2424,10 +2428,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
" Shadow: " << *ArgShadow << "\n");
bool ArgIsInitialized = false;
+ const DataLayout &DL = F.getParent()->getDataLayout();
if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
assert(A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!");
- Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
+ Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
if (ArgOffset + Size > kParamTLSSize) break;
unsigned ParamAlignment = CS.getParamAlignment(i + 1);
unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
@@ -2435,7 +2440,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
Size, Alignment);
} else {
- Size = MS.DL->getTypeAllocSize(A->getType());
+ Size = DL.getTypeAllocSize(A->getType());
if (ArgOffset + Size > kParamTLSSize) break;
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
kShadowTLSAlignment);
@@ -2528,7 +2533,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
IRBuilder<> IRB(I.getNextNode());
- uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType());
if (PoisonStack && ClPoisonStackWithCall) {
IRB.CreateCall2(MS.MsanPoisonStackFn,
IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
@@ -2720,6 +2726,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
unsigned GpOffset = 0;
unsigned FpOffset = AMD64GpEndOffset;
unsigned OverflowOffset = AMD64FpEndOffset;
+ const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
@@ -2729,7 +2736,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
// ByVal arguments always go to the overflow area.
assert(A->getType()->isPointerTy());
Type *RealTy = A->getType()->getPointerElementType();
- uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy);
+ uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
@@ -2751,7 +2758,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
FpOffset += 16;
break;
case AK_Memory:
- uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
}
@@ -2859,11 +2866,12 @@ struct VarArgMIPS64Helper : public VarArgHelper {
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
+ const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
Value *Base;
- uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
#if defined(__MIPSEB__) || defined(MIPSEB)
// Adjusting the shadow for argument with size < 8 to match the placement
// of bits in big endian system
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 6b50ce9513b..d7cb47890e8 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -76,7 +76,7 @@ namespace {
/// ThreadSanitizer: instrument the code in module to find races.
struct ThreadSanitizer : public FunctionPass {
- ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {}
+ ThreadSanitizer() : FunctionPass(ID) {}
const char *getPassName() const override;
bool runOnFunction(Function &F) override;
bool doInitialization(Module &M) override;
@@ -84,15 +84,15 @@ struct ThreadSanitizer : public FunctionPass {
private:
void initializeCallbacks(Module &M);
- bool instrumentLoadOrStore(Instruction *I);
- bool instrumentAtomic(Instruction *I);
+ bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
+ bool instrumentAtomic(Instruction *I, const DataLayout &DL);
bool instrumentMemIntrinsic(Instruction *I);
- void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
- SmallVectorImpl<Instruction*> &All);
+ void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
+ SmallVectorImpl<Instruction *> &All,
+ const DataLayout &DL);
bool addrPointsToConstantData(Value *Addr);
- int getMemoryAccessFuncIndex(Value *Addr);
+ int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
- const DataLayout *DL;
Type *IntptrTy;
IntegerType *OrdTy;
// Callbacks to run-time library are computed in doInitialization.
@@ -230,7 +230,7 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
}
bool ThreadSanitizer::doInitialization(Module &M) {
- DL = &M.getDataLayout();
+ const DataLayout &DL = M.getDataLayout();
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
@@ -282,8 +282,8 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
// 'Local' is a vector of insns within the same BB (no calls between).
// 'All' is a vector of insns that will be instrumented.
void ThreadSanitizer::chooseInstructionsToInstrument(
- SmallVectorImpl<Instruction*> &Local,
- SmallVectorImpl<Instruction*> &All) {
+ SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
+ const DataLayout &DL) {
SmallSet<Value*, 8> WriteTargets;
// Iterate from the end.
for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
@@ -307,7 +307,7 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
Value *Addr = isa<StoreInst>(*I)
? cast<StoreInst>(I)->getPointerOperand()
: cast<LoadInst>(I)->getPointerOperand();
- if (isa<AllocaInst>(GetUnderlyingObject(Addr, nullptr)) &&
+ if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
!PointerMayBeCaptured(Addr, true, true)) {
// The variable is addressable but not captured, so it cannot be
// referenced from a different thread and participate in a data race
@@ -335,7 +335,6 @@ static bool isAtomic(Instruction *I) {
}
bool ThreadSanitizer::runOnFunction(Function &F) {
- if (!DL) return false;
initializeCallbacks(*F.getParent());
SmallVector<Instruction*, 8> RetVec;
SmallVector<Instruction*, 8> AllLoadsAndStores;
@@ -345,6 +344,7 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
bool Res = false;
bool HasCalls = false;
bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
+ const DataLayout &DL = F.getParent()->getDataLayout();
// Traverse all instructions, collect loads/stores/returns, check for calls.
for (auto &BB : F) {
@@ -359,10 +359,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
if (isa<MemIntrinsic>(Inst))
MemIntrinCalls.push_back(&Inst);
HasCalls = true;
- chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
+ chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
+ DL);
}
}
- chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
+ chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
}
// We have collected all loads and stores.
@@ -372,14 +373,14 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
// Instrument memory accesses only if we want to report bugs in the function.
if (ClInstrumentMemoryAccesses && SanitizeFunction)
for (auto Inst : AllLoadsAndStores) {
- Res |= instrumentLoadOrStore(Inst);
+ Res |= instrumentLoadOrStore(Inst, DL);
}
// Instrument atomic memory accesses in any case (they can be used to
// implement synchronization).
if (ClInstrumentAtomics)
for (auto Inst : AtomicAccesses) {
- Res |= instrumentAtomic(Inst);
+ Res |= instrumentAtomic(Inst, DL);
}
if (ClInstrumentMemIntrinsics && SanitizeFunction)
@@ -403,13 +404,14 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
return Res;
}
-bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
+bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
+ const DataLayout &DL) {
IRBuilder<> IRB(I);
bool IsWrite = isa<StoreInst>(*I);
Value *Addr = IsWrite
? cast<StoreInst>(I)->getPointerOperand()
: cast<LoadInst>(I)->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
if (IsWrite && isVtableAccess(I)) {
@@ -440,7 +442,7 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
? cast<StoreInst>(I)->getAlignment()
: cast<LoadInst>(I)->getAlignment();
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
- const uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
+ const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
Value *OnAccessFunc = nullptr;
if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
@@ -501,11 +503,11 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
// The following page contains more background information:
// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
-bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
+bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
IRBuilder<> IRB(I);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Value *Addr = LI->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@@ -519,7 +521,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Addr = SI->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@@ -533,7 +535,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
ReplaceInstWithInst(I, C);
} else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
Value *Addr = RMWI->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
@@ -550,7 +552,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
ReplaceInstWithInst(I, C);
} else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
Value *Addr = CASI->getPointerOperand();
- int Idx = getMemoryAccessFuncIndex(Addr);
+ int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@@ -580,11 +582,12 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
return true;
}
-int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
+int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr,
+ const DataLayout &DL) {
Type *OrigPtrTy = Addr->getType();
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
assert(OrigTy->isSized());
- uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
+ uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
if (TypeSize != 8 && TypeSize != 16 &&
TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
NumAccessesWithBadSize++;
OpenPOWER on IntegriCloud