summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp')
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp566
1 files changed, 311 insertions, 255 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 3b33ced2640..0e6f6bacf44 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1,4 +1,4 @@
-//===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
+//===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,6 +6,7 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
/// \file
/// This file is a part of MemorySanitizer, a detector of uninitialized
/// reads.
@@ -88,32 +89,64 @@
/// implementation ignores the load aspect of CAS/RMW, always returning a clean
/// value. It implements the store part as a simple atomic store by storing a
/// clean shadow.
-
+//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
#include "llvm/IR/ValueMap.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <tuple>
using namespace llvm;
@@ -137,18 +170,23 @@ static const size_t kNumberOfAccessSizes = 4;
static cl::opt<int> ClTrackOrigins("msan-track-origins",
cl::desc("Track origins (allocation sites) of poisoned memory"),
cl::Hidden, cl::init(0));
+
static cl::opt<bool> ClKeepGoing("msan-keep-going",
cl::desc("keep going after reporting a UMR"),
cl::Hidden, cl::init(false));
+
static cl::opt<bool> ClPoisonStack("msan-poison-stack",
cl::desc("poison uninitialized stack variables"),
cl::Hidden, cl::init(true));
+
static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
cl::desc("poison uninitialized stack variables with a call"),
cl::Hidden, cl::init(false));
+
static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
cl::desc("poison uninitialized stack variables with the given pattern"),
cl::Hidden, cl::init(0xff));
+
static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
cl::desc("poison undef temps"),
cl::Hidden, cl::init(true));
@@ -217,6 +255,8 @@ struct PlatformMemoryMapParams {
const MemoryMapParams *bits64;
};
+} // end anonymous namespace
+
// i386 Linux
static const MemoryMapParams Linux_I386_MemoryMapParams = {
0x000080000000, // AndMask
@@ -305,27 +345,39 @@ static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
&FreeBSD_X86_64_MemoryMapParams,
};
+namespace {
+
/// \brief An instrumentation pass implementing detection of uninitialized
/// reads.
///
/// MemorySanitizer: instrument the code in module to find
/// uninitialized reads.
class MemorySanitizer : public FunctionPass {
- public:
+public:
+ // Pass identification, replacement for typeid.
+ static char ID;
+
MemorySanitizer(int TrackOrigins = 0, bool Recover = false)
: FunctionPass(ID),
TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
- Recover(Recover || ClKeepGoing),
- WarningFn(nullptr) {}
+ Recover(Recover || ClKeepGoing) {}
+
StringRef getPassName() const override { return "MemorySanitizer"; }
+
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
+
bool runOnFunction(Function &F) override;
bool doInitialization(Module &M) override;
- static char ID; // Pass identification, replacement for typeid.
- private:
+private:
+ friend struct MemorySanitizerVisitor;
+ friend struct VarArgAMD64Helper;
+ friend struct VarArgMIPS64Helper;
+ friend struct VarArgAArch64Helper;
+ friend struct VarArgPowerPC64Helper;
+
void initializeCallbacks(Module &M);
/// \brief Track origins (allocation points) of uninitialized values.
@@ -335,26 +387,34 @@ class MemorySanitizer : public FunctionPass {
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
+
/// \brief Thread-local shadow storage for function parameters.
GlobalVariable *ParamTLS;
+
/// \brief Thread-local origin storage for function parameters.
GlobalVariable *ParamOriginTLS;
+
/// \brief Thread-local shadow storage for function return value.
GlobalVariable *RetvalTLS;
+
/// \brief Thread-local origin storage for function return value.
GlobalVariable *RetvalOriginTLS;
+
/// \brief Thread-local shadow storage for in-register va_arg function
/// parameters (x86_64-specific).
GlobalVariable *VAArgTLS;
+
/// \brief Thread-local shadow storage for va_arg overflow area
/// (x86_64-specific).
GlobalVariable *VAArgOverflowSizeTLS;
+
/// \brief Thread-local space used to pass origin value to the UMR reporting
/// function.
GlobalVariable *OriginTLS;
/// \brief The run-time callback to print a warning.
- Value *WarningFn;
+ Value *WarningFn = nullptr;
+
// These arrays are indexed by log2(AccessSize).
Value *MaybeWarningFn[kNumberOfAccessSizes];
Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
@@ -362,11 +422,14 @@ class MemorySanitizer : public FunctionPass {
/// \brief Run-time helper that generates a new origin value for a stack
/// allocation.
Value *MsanSetAllocaOrigin4Fn;
+
/// \brief Run-time helper that poisons stack on function entry.
Value *MsanPoisonStackFn;
+
/// \brief Run-time helper that records a store (or any event) of an
/// uninitialized value and returns an updated origin id encoding this info.
Value *MsanChainOriginFn;
+
/// \brief MSan runtime replacements for memmove, memcpy and memset.
Value *MemmoveFn, *MemcpyFn, *MemsetFn;
@@ -374,21 +437,20 @@ class MemorySanitizer : public FunctionPass {
const MemoryMapParams *MapParams;
MDNode *ColdCallWeights;
+
/// \brief Branch weights for origin store.
MDNode *OriginStoreWeights;
+
/// \brief An empty volatile inline asm that prevents callback merge.
InlineAsm *EmptyAsm;
- Function *MsanCtorFunction;
- friend struct MemorySanitizerVisitor;
- friend struct VarArgAMD64Helper;
- friend struct VarArgMIPS64Helper;
- friend struct VarArgAArch64Helper;
- friend struct VarArgPowerPC64Helper;
+ Function *MsanCtorFunction;
};
-} // anonymous namespace
+
+} // end anonymous namespace
char MemorySanitizer::ID = 0;
+
INITIALIZE_PASS_BEGIN(
MemorySanitizer, "msan",
"MemorySanitizer: detects uninitialized reads.", false, false)
@@ -586,6 +648,8 @@ namespace {
/// the function, and should avoid creating new basic blocks. A new
/// instance of this class is created for each instrumented function.
struct VarArgHelper {
+ virtual ~VarArgHelper() = default;
+
/// \brief Visit a CallSite.
virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
@@ -600,21 +664,22 @@ struct VarArgHelper {
/// This method is called after visiting all interesting (see above)
/// instructions in a function.
virtual void finalizeInstrumentation() = 0;
-
- virtual ~VarArgHelper() {}
};
struct MemorySanitizerVisitor;
-VarArgHelper*
-CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
- MemorySanitizerVisitor &Visitor);
+} // end anonymous namespace
+
+static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
+ MemorySanitizerVisitor &Visitor);
-unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
+static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
if (TypeSize <= 8) return 0;
return Log2_32_Ceil((TypeSize + 7) / 8);
}
+namespace {
+
/// This class does all the work for a given function. Store and Load
/// instructions store and load corresponding shadow and origin
/// values. Most instructions propagate shadow from arguments to their
@@ -641,8 +706,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *Shadow;
Value *Origin;
Instruction *OrigIns;
+
ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
- : Shadow(S), Origin(O), OrigIns(I) { }
+ : Shadow(S), Origin(O), OrigIns(I) {}
};
SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
SmallVector<StoreInst *, 16> StoreList;
@@ -856,7 +922,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
visit(*BB);
-
// Finalize PHI nodes.
for (PHINode *PN : ShadowPHINodes) {
PHINode *PNS = cast<PHINode>(getShadow(PN));
@@ -1489,14 +1554,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// arguments are initialized.
template <bool CombineShadow>
class Combiner {
- Value *Shadow;
- Value *Origin;
+ Value *Shadow = nullptr;
+ Value *Origin = nullptr;
IRBuilder<> &IRB;
MemorySanitizerVisitor *MSV;
public:
- Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
- Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
+ Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
+ : IRB(IRB), MSV(MSV) {}
/// \brief Add a pair of shadow and origin values to the mix.
Combiner &Add(Value *OpShadow, Value *OpOrigin) {
@@ -1550,8 +1615,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
};
- typedef Combiner<true> ShadowAndOriginCombiner;
- typedef Combiner<false> OriginCombiner;
+ using ShadowAndOriginCombiner = Combiner<true>;
+ using OriginCombiner = Combiner<false>;
/// \brief Propagate origin for arbitrary operation.
void setOriginForNaryOp(Instruction &I) {
@@ -2204,28 +2269,28 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// intrinsic.
Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
switch (id) {
- case llvm::Intrinsic::x86_sse2_packsswb_128:
- case llvm::Intrinsic::x86_sse2_packuswb_128:
- return llvm::Intrinsic::x86_sse2_packsswb_128;
+ case Intrinsic::x86_sse2_packsswb_128:
+ case Intrinsic::x86_sse2_packuswb_128:
+ return Intrinsic::x86_sse2_packsswb_128;
- case llvm::Intrinsic::x86_sse2_packssdw_128:
- case llvm::Intrinsic::x86_sse41_packusdw:
- return llvm::Intrinsic::x86_sse2_packssdw_128;
+ case Intrinsic::x86_sse2_packssdw_128:
+ case Intrinsic::x86_sse41_packusdw:
+ return Intrinsic::x86_sse2_packssdw_128;
- case llvm::Intrinsic::x86_avx2_packsswb:
- case llvm::Intrinsic::x86_avx2_packuswb:
- return llvm::Intrinsic::x86_avx2_packsswb;
+ case Intrinsic::x86_avx2_packsswb:
+ case Intrinsic::x86_avx2_packuswb:
+ return Intrinsic::x86_avx2_packsswb;
- case llvm::Intrinsic::x86_avx2_packssdw:
- case llvm::Intrinsic::x86_avx2_packusdw:
- return llvm::Intrinsic::x86_avx2_packssdw;
+ case Intrinsic::x86_avx2_packssdw:
+ case Intrinsic::x86_avx2_packusdw:
+ return Intrinsic::x86_avx2_packssdw;
- case llvm::Intrinsic::x86_mmx_packsswb:
- case llvm::Intrinsic::x86_mmx_packuswb:
- return llvm::Intrinsic::x86_mmx_packsswb;
+ case Intrinsic::x86_mmx_packsswb:
+ case Intrinsic::x86_mmx_packuswb:
+ return Intrinsic::x86_mmx_packsswb;
- case llvm::Intrinsic::x86_mmx_packssdw:
- return llvm::Intrinsic::x86_mmx_packssdw;
+ case Intrinsic::x86_mmx_packssdw:
+ return Intrinsic::x86_mmx_packssdw;
default:
llvm_unreachable("unexpected intrinsic id");
}
@@ -2255,9 +2320,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
S2 = IRB.CreateBitCast(S2, T);
}
Value *S1_ext = IRB.CreateSExt(
- IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
+ IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
Value *S2_ext = IRB.CreateSExt(
- IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
+ IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
if (isX86_MMX) {
Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
@@ -2366,213 +2431,213 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
- case llvm::Intrinsic::bswap:
+ case Intrinsic::bswap:
handleBswap(I);
break;
- case llvm::Intrinsic::x86_sse_stmxcsr:
+ case Intrinsic::x86_sse_stmxcsr:
handleStmxcsr(I);
break;
- case llvm::Intrinsic::x86_sse_ldmxcsr:
+ case Intrinsic::x86_sse_ldmxcsr:
handleLdmxcsr(I);
break;
- case llvm::Intrinsic::x86_avx512_vcvtsd2usi64:
- case llvm::Intrinsic::x86_avx512_vcvtsd2usi32:
- case llvm::Intrinsic::x86_avx512_vcvtss2usi64:
- case llvm::Intrinsic::x86_avx512_vcvtss2usi32:
- case llvm::Intrinsic::x86_avx512_cvttss2usi64:
- case llvm::Intrinsic::x86_avx512_cvttss2usi:
- case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
- case llvm::Intrinsic::x86_avx512_cvttsd2usi:
- case llvm::Intrinsic::x86_avx512_cvtusi2sd:
- case llvm::Intrinsic::x86_avx512_cvtusi2ss:
- case llvm::Intrinsic::x86_avx512_cvtusi642sd:
- case llvm::Intrinsic::x86_avx512_cvtusi642ss:
- case llvm::Intrinsic::x86_sse2_cvtsd2si64:
- case llvm::Intrinsic::x86_sse2_cvtsd2si:
- case llvm::Intrinsic::x86_sse2_cvtsd2ss:
- case llvm::Intrinsic::x86_sse2_cvtsi2sd:
- case llvm::Intrinsic::x86_sse2_cvtsi642sd:
- case llvm::Intrinsic::x86_sse2_cvtss2sd:
- case llvm::Intrinsic::x86_sse2_cvttsd2si64:
- case llvm::Intrinsic::x86_sse2_cvttsd2si:
- case llvm::Intrinsic::x86_sse_cvtsi2ss:
- case llvm::Intrinsic::x86_sse_cvtsi642ss:
- case llvm::Intrinsic::x86_sse_cvtss2si64:
- case llvm::Intrinsic::x86_sse_cvtss2si:
- case llvm::Intrinsic::x86_sse_cvttss2si64:
- case llvm::Intrinsic::x86_sse_cvttss2si:
+ case Intrinsic::x86_avx512_vcvtsd2usi64:
+ case Intrinsic::x86_avx512_vcvtsd2usi32:
+ case Intrinsic::x86_avx512_vcvtss2usi64:
+ case Intrinsic::x86_avx512_vcvtss2usi32:
+ case Intrinsic::x86_avx512_cvttss2usi64:
+ case Intrinsic::x86_avx512_cvttss2usi:
+ case Intrinsic::x86_avx512_cvttsd2usi64:
+ case Intrinsic::x86_avx512_cvttsd2usi:
+ case Intrinsic::x86_avx512_cvtusi2sd:
+ case Intrinsic::x86_avx512_cvtusi2ss:
+ case Intrinsic::x86_avx512_cvtusi642sd:
+ case Intrinsic::x86_avx512_cvtusi642ss:
+ case Intrinsic::x86_sse2_cvtsd2si64:
+ case Intrinsic::x86_sse2_cvtsd2si:
+ case Intrinsic::x86_sse2_cvtsd2ss:
+ case Intrinsic::x86_sse2_cvtsi2sd:
+ case Intrinsic::x86_sse2_cvtsi642sd:
+ case Intrinsic::x86_sse2_cvtss2sd:
+ case Intrinsic::x86_sse2_cvttsd2si64:
+ case Intrinsic::x86_sse2_cvttsd2si:
+ case Intrinsic::x86_sse_cvtsi2ss:
+ case Intrinsic::x86_sse_cvtsi642ss:
+ case Intrinsic::x86_sse_cvtss2si64:
+ case Intrinsic::x86_sse_cvtss2si:
+ case Intrinsic::x86_sse_cvttss2si64:
+ case Intrinsic::x86_sse_cvttss2si:
handleVectorConvertIntrinsic(I, 1);
break;
- case llvm::Intrinsic::x86_sse_cvtps2pi:
- case llvm::Intrinsic::x86_sse_cvttps2pi:
+ case Intrinsic::x86_sse_cvtps2pi:
+ case Intrinsic::x86_sse_cvttps2pi:
handleVectorConvertIntrinsic(I, 2);
break;
- case llvm::Intrinsic::x86_avx512_psll_w_512:
- case llvm::Intrinsic::x86_avx512_psll_d_512:
- case llvm::Intrinsic::x86_avx512_psll_q_512:
- case llvm::Intrinsic::x86_avx512_pslli_w_512:
- case llvm::Intrinsic::x86_avx512_pslli_d_512:
- case llvm::Intrinsic::x86_avx512_pslli_q_512:
- case llvm::Intrinsic::x86_avx512_psrl_w_512:
- case llvm::Intrinsic::x86_avx512_psrl_d_512:
- case llvm::Intrinsic::x86_avx512_psrl_q_512:
- case llvm::Intrinsic::x86_avx512_psra_w_512:
- case llvm::Intrinsic::x86_avx512_psra_d_512:
- case llvm::Intrinsic::x86_avx512_psra_q_512:
- case llvm::Intrinsic::x86_avx512_psrli_w_512:
- case llvm::Intrinsic::x86_avx512_psrli_d_512:
- case llvm::Intrinsic::x86_avx512_psrli_q_512:
- case llvm::Intrinsic::x86_avx512_psrai_w_512:
- case llvm::Intrinsic::x86_avx512_psrai_d_512:
- case llvm::Intrinsic::x86_avx512_psrai_q_512:
- case llvm::Intrinsic::x86_avx512_psra_q_256:
- case llvm::Intrinsic::x86_avx512_psra_q_128:
- case llvm::Intrinsic::x86_avx512_psrai_q_256:
- case llvm::Intrinsic::x86_avx512_psrai_q_128:
- case llvm::Intrinsic::x86_avx2_psll_w:
- case llvm::Intrinsic::x86_avx2_psll_d:
- case llvm::Intrinsic::x86_avx2_psll_q:
- case llvm::Intrinsic::x86_avx2_pslli_w:
- case llvm::Intrinsic::x86_avx2_pslli_d:
- case llvm::Intrinsic::x86_avx2_pslli_q:
- case llvm::Intrinsic::x86_avx2_psrl_w:
- case llvm::Intrinsic::x86_avx2_psrl_d:
- case llvm::Intrinsic::x86_avx2_psrl_q:
- case llvm::Intrinsic::x86_avx2_psra_w:
- case llvm::Intrinsic::x86_avx2_psra_d:
- case llvm::Intrinsic::x86_avx2_psrli_w:
- case llvm::Intrinsic::x86_avx2_psrli_d:
- case llvm::Intrinsic::x86_avx2_psrli_q:
- case llvm::Intrinsic::x86_avx2_psrai_w:
- case llvm::Intrinsic::x86_avx2_psrai_d:
- case llvm::Intrinsic::x86_sse2_psll_w:
- case llvm::Intrinsic::x86_sse2_psll_d:
- case llvm::Intrinsic::x86_sse2_psll_q:
- case llvm::Intrinsic::x86_sse2_pslli_w:
- case llvm::Intrinsic::x86_sse2_pslli_d:
- case llvm::Intrinsic::x86_sse2_pslli_q:
- case llvm::Intrinsic::x86_sse2_psrl_w:
- case llvm::Intrinsic::x86_sse2_psrl_d:
- case llvm::Intrinsic::x86_sse2_psrl_q:
- case llvm::Intrinsic::x86_sse2_psra_w:
- case llvm::Intrinsic::x86_sse2_psra_d:
- case llvm::Intrinsic::x86_sse2_psrli_w:
- case llvm::Intrinsic::x86_sse2_psrli_d:
- case llvm::Intrinsic::x86_sse2_psrli_q:
- case llvm::Intrinsic::x86_sse2_psrai_w:
- case llvm::Intrinsic::x86_sse2_psrai_d:
- case llvm::Intrinsic::x86_mmx_psll_w:
- case llvm::Intrinsic::x86_mmx_psll_d:
- case llvm::Intrinsic::x86_mmx_psll_q:
- case llvm::Intrinsic::x86_mmx_pslli_w:
- case llvm::Intrinsic::x86_mmx_pslli_d:
- case llvm::Intrinsic::x86_mmx_pslli_q:
- case llvm::Intrinsic::x86_mmx_psrl_w:
- case llvm::Intrinsic::x86_mmx_psrl_d:
- case llvm::Intrinsic::x86_mmx_psrl_q:
- case llvm::Intrinsic::x86_mmx_psra_w:
- case llvm::Intrinsic::x86_mmx_psra_d:
- case llvm::Intrinsic::x86_mmx_psrli_w:
- case llvm::Intrinsic::x86_mmx_psrli_d:
- case llvm::Intrinsic::x86_mmx_psrli_q:
- case llvm::Intrinsic::x86_mmx_psrai_w:
- case llvm::Intrinsic::x86_mmx_psrai_d:
+ case Intrinsic::x86_avx512_psll_w_512:
+ case Intrinsic::x86_avx512_psll_d_512:
+ case Intrinsic::x86_avx512_psll_q_512:
+ case Intrinsic::x86_avx512_pslli_w_512:
+ case Intrinsic::x86_avx512_pslli_d_512:
+ case Intrinsic::x86_avx512_pslli_q_512:
+ case Intrinsic::x86_avx512_psrl_w_512:
+ case Intrinsic::x86_avx512_psrl_d_512:
+ case Intrinsic::x86_avx512_psrl_q_512:
+ case Intrinsic::x86_avx512_psra_w_512:
+ case Intrinsic::x86_avx512_psra_d_512:
+ case Intrinsic::x86_avx512_psra_q_512:
+ case Intrinsic::x86_avx512_psrli_w_512:
+ case Intrinsic::x86_avx512_psrli_d_512:
+ case Intrinsic::x86_avx512_psrli_q_512:
+ case Intrinsic::x86_avx512_psrai_w_512:
+ case Intrinsic::x86_avx512_psrai_d_512:
+ case Intrinsic::x86_avx512_psrai_q_512:
+ case Intrinsic::x86_avx512_psra_q_256:
+ case Intrinsic::x86_avx512_psra_q_128:
+ case Intrinsic::x86_avx512_psrai_q_256:
+ case Intrinsic::x86_avx512_psrai_q_128:
+ case Intrinsic::x86_avx2_psll_w:
+ case Intrinsic::x86_avx2_psll_d:
+ case Intrinsic::x86_avx2_psll_q:
+ case Intrinsic::x86_avx2_pslli_w:
+ case Intrinsic::x86_avx2_pslli_d:
+ case Intrinsic::x86_avx2_pslli_q:
+ case Intrinsic::x86_avx2_psrl_w:
+ case Intrinsic::x86_avx2_psrl_d:
+ case Intrinsic::x86_avx2_psrl_q:
+ case Intrinsic::x86_avx2_psra_w:
+ case Intrinsic::x86_avx2_psra_d:
+ case Intrinsic::x86_avx2_psrli_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ case Intrinsic::x86_avx2_psrai_w:
+ case Intrinsic::x86_avx2_psrai_d:
+ case Intrinsic::x86_sse2_psll_w:
+ case Intrinsic::x86_sse2_psll_d:
+ case Intrinsic::x86_sse2_psll_q:
+ case Intrinsic::x86_sse2_pslli_w:
+ case Intrinsic::x86_sse2_pslli_d:
+ case Intrinsic::x86_sse2_pslli_q:
+ case Intrinsic::x86_sse2_psrl_w:
+ case Intrinsic::x86_sse2_psrl_d:
+ case Intrinsic::x86_sse2_psrl_q:
+ case Intrinsic::x86_sse2_psra_w:
+ case Intrinsic::x86_sse2_psra_d:
+ case Intrinsic::x86_sse2_psrli_w:
+ case Intrinsic::x86_sse2_psrli_d:
+ case Intrinsic::x86_sse2_psrli_q:
+ case Intrinsic::x86_sse2_psrai_w:
+ case Intrinsic::x86_sse2_psrai_d:
+ case Intrinsic::x86_mmx_psll_w:
+ case Intrinsic::x86_mmx_psll_d:
+ case Intrinsic::x86_mmx_psll_q:
+ case Intrinsic::x86_mmx_pslli_w:
+ case Intrinsic::x86_mmx_pslli_d:
+ case Intrinsic::x86_mmx_pslli_q:
+ case Intrinsic::x86_mmx_psrl_w:
+ case Intrinsic::x86_mmx_psrl_d:
+ case Intrinsic::x86_mmx_psrl_q:
+ case Intrinsic::x86_mmx_psra_w:
+ case Intrinsic::x86_mmx_psra_d:
+ case Intrinsic::x86_mmx_psrli_w:
+ case Intrinsic::x86_mmx_psrli_d:
+ case Intrinsic::x86_mmx_psrli_q:
+ case Intrinsic::x86_mmx_psrai_w:
+ case Intrinsic::x86_mmx_psrai_d:
handleVectorShiftIntrinsic(I, /* Variable */ false);
break;
- case llvm::Intrinsic::x86_avx2_psllv_d:
- case llvm::Intrinsic::x86_avx2_psllv_d_256:
- case llvm::Intrinsic::x86_avx512_psllv_d_512:
- case llvm::Intrinsic::x86_avx2_psllv_q:
- case llvm::Intrinsic::x86_avx2_psllv_q_256:
- case llvm::Intrinsic::x86_avx512_psllv_q_512:
- case llvm::Intrinsic::x86_avx2_psrlv_d:
- case llvm::Intrinsic::x86_avx2_psrlv_d_256:
- case llvm::Intrinsic::x86_avx512_psrlv_d_512:
- case llvm::Intrinsic::x86_avx2_psrlv_q:
- case llvm::Intrinsic::x86_avx2_psrlv_q_256:
- case llvm::Intrinsic::x86_avx512_psrlv_q_512:
- case llvm::Intrinsic::x86_avx2_psrav_d:
- case llvm::Intrinsic::x86_avx2_psrav_d_256:
- case llvm::Intrinsic::x86_avx512_psrav_d_512:
- case llvm::Intrinsic::x86_avx512_psrav_q_128:
- case llvm::Intrinsic::x86_avx512_psrav_q_256:
- case llvm::Intrinsic::x86_avx512_psrav_q_512:
+ case Intrinsic::x86_avx2_psllv_d:
+ case Intrinsic::x86_avx2_psllv_d_256:
+ case Intrinsic::x86_avx512_psllv_d_512:
+ case Intrinsic::x86_avx2_psllv_q:
+ case Intrinsic::x86_avx2_psllv_q_256:
+ case Intrinsic::x86_avx512_psllv_q_512:
+ case Intrinsic::x86_avx2_psrlv_d:
+ case Intrinsic::x86_avx2_psrlv_d_256:
+ case Intrinsic::x86_avx512_psrlv_d_512:
+ case Intrinsic::x86_avx2_psrlv_q:
+ case Intrinsic::x86_avx2_psrlv_q_256:
+ case Intrinsic::x86_avx512_psrlv_q_512:
+ case Intrinsic::x86_avx2_psrav_d:
+ case Intrinsic::x86_avx2_psrav_d_256:
+ case Intrinsic::x86_avx512_psrav_d_512:
+ case Intrinsic::x86_avx512_psrav_q_128:
+ case Intrinsic::x86_avx512_psrav_q_256:
+ case Intrinsic::x86_avx512_psrav_q_512:
handleVectorShiftIntrinsic(I, /* Variable */ true);
break;
- case llvm::Intrinsic::x86_sse2_packsswb_128:
- case llvm::Intrinsic::x86_sse2_packssdw_128:
- case llvm::Intrinsic::x86_sse2_packuswb_128:
- case llvm::Intrinsic::x86_sse41_packusdw:
- case llvm::Intrinsic::x86_avx2_packsswb:
- case llvm::Intrinsic::x86_avx2_packssdw:
- case llvm::Intrinsic::x86_avx2_packuswb:
- case llvm::Intrinsic::x86_avx2_packusdw:
+ case Intrinsic::x86_sse2_packsswb_128:
+ case Intrinsic::x86_sse2_packssdw_128:
+ case Intrinsic::x86_sse2_packuswb_128:
+ case Intrinsic::x86_sse41_packusdw:
+ case Intrinsic::x86_avx2_packsswb:
+ case Intrinsic::x86_avx2_packssdw:
+ case Intrinsic::x86_avx2_packuswb:
+ case Intrinsic::x86_avx2_packusdw:
handleVectorPackIntrinsic(I);
break;
- case llvm::Intrinsic::x86_mmx_packsswb:
- case llvm::Intrinsic::x86_mmx_packuswb:
+ case Intrinsic::x86_mmx_packsswb:
+ case Intrinsic::x86_mmx_packuswb:
handleVectorPackIntrinsic(I, 16);
break;
- case llvm::Intrinsic::x86_mmx_packssdw:
+ case Intrinsic::x86_mmx_packssdw:
handleVectorPackIntrinsic(I, 32);
break;
- case llvm::Intrinsic::x86_mmx_psad_bw:
- case llvm::Intrinsic::x86_sse2_psad_bw:
- case llvm::Intrinsic::x86_avx2_psad_bw:
+ case Intrinsic::x86_mmx_psad_bw:
+ case Intrinsic::x86_sse2_psad_bw:
+ case Intrinsic::x86_avx2_psad_bw:
handleVectorSadIntrinsic(I);
break;
- case llvm::Intrinsic::x86_sse2_pmadd_wd:
- case llvm::Intrinsic::x86_avx2_pmadd_wd:
- case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
- case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
+ case Intrinsic::x86_sse2_pmadd_wd:
+ case Intrinsic::x86_avx2_pmadd_wd:
+ case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
+ case Intrinsic::x86_avx2_pmadd_ub_sw:
handleVectorPmaddIntrinsic(I);
break;
- case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
+ case Intrinsic::x86_ssse3_pmadd_ub_sw:
handleVectorPmaddIntrinsic(I, 8);
break;
- case llvm::Intrinsic::x86_mmx_pmadd_wd:
+ case Intrinsic::x86_mmx_pmadd_wd:
handleVectorPmaddIntrinsic(I, 16);
break;
- case llvm::Intrinsic::x86_sse_cmp_ss:
- case llvm::Intrinsic::x86_sse2_cmp_sd:
- case llvm::Intrinsic::x86_sse_comieq_ss:
- case llvm::Intrinsic::x86_sse_comilt_ss:
- case llvm::Intrinsic::x86_sse_comile_ss:
- case llvm::Intrinsic::x86_sse_comigt_ss:
- case llvm::Intrinsic::x86_sse_comige_ss:
- case llvm::Intrinsic::x86_sse_comineq_ss:
- case llvm::Intrinsic::x86_sse_ucomieq_ss:
- case llvm::Intrinsic::x86_sse_ucomilt_ss:
- case llvm::Intrinsic::x86_sse_ucomile_ss:
- case llvm::Intrinsic::x86_sse_ucomigt_ss:
- case llvm::Intrinsic::x86_sse_ucomige_ss:
- case llvm::Intrinsic::x86_sse_ucomineq_ss:
- case llvm::Intrinsic::x86_sse2_comieq_sd:
- case llvm::Intrinsic::x86_sse2_comilt_sd:
- case llvm::Intrinsic::x86_sse2_comile_sd:
- case llvm::Intrinsic::x86_sse2_comigt_sd:
- case llvm::Intrinsic::x86_sse2_comige_sd:
- case llvm::Intrinsic::x86_sse2_comineq_sd:
- case llvm::Intrinsic::x86_sse2_ucomieq_sd:
- case llvm::Intrinsic::x86_sse2_ucomilt_sd:
- case llvm::Intrinsic::x86_sse2_ucomile_sd:
- case llvm::Intrinsic::x86_sse2_ucomigt_sd:
- case llvm::Intrinsic::x86_sse2_ucomige_sd:
- case llvm::Intrinsic::x86_sse2_ucomineq_sd:
+ case Intrinsic::x86_sse_cmp_ss:
+ case Intrinsic::x86_sse2_cmp_sd:
+ case Intrinsic::x86_sse_comieq_ss:
+ case Intrinsic::x86_sse_comilt_ss:
+ case Intrinsic::x86_sse_comile_ss:
+ case Intrinsic::x86_sse_comigt_ss:
+ case Intrinsic::x86_sse_comige_ss:
+ case Intrinsic::x86_sse_comineq_ss:
+ case Intrinsic::x86_sse_ucomieq_ss:
+ case Intrinsic::x86_sse_ucomilt_ss:
+ case Intrinsic::x86_sse_ucomile_ss:
+ case Intrinsic::x86_sse_ucomigt_ss:
+ case Intrinsic::x86_sse_ucomige_ss:
+ case Intrinsic::x86_sse_ucomineq_ss:
+ case Intrinsic::x86_sse2_comieq_sd:
+ case Intrinsic::x86_sse2_comilt_sd:
+ case Intrinsic::x86_sse2_comile_sd:
+ case Intrinsic::x86_sse2_comigt_sd:
+ case Intrinsic::x86_sse2_comige_sd:
+ case Intrinsic::x86_sse2_comineq_sd:
+ case Intrinsic::x86_sse2_ucomieq_sd:
+ case Intrinsic::x86_sse2_ucomilt_sd:
+ case Intrinsic::x86_sse2_ucomile_sd:
+ case Intrinsic::x86_sse2_ucomigt_sd:
+ case Intrinsic::x86_sse2_ucomige_sd:
+ case Intrinsic::x86_sse2_ucomineq_sd:
handleVectorCompareScalarIntrinsic(I);
break;
- case llvm::Intrinsic::x86_sse_cmp_ps:
- case llvm::Intrinsic::x86_sse2_cmp_pd:
+ case Intrinsic::x86_sse_cmp_ps:
+ case Intrinsic::x86_sse2_cmp_pd:
// FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
// generates reasonably looking IR that fails in the backend with "Do not
// know how to split the result of this operator!".
@@ -2939,18 +3004,16 @@ struct VarArgAMD64Helper : public VarArgHelper {
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
- Value *VAArgTLSCopy;
- Value *VAArgOverflowSize;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgOverflowSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
- VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
- VAArgOverflowSize(nullptr) {}
-
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
+ VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
+
ArgKind classifyArgument(Value* arg) {
// A very rough approximation of X86_64 argument classification rules.
Type *T = arg->getType();
@@ -3119,15 +3182,13 @@ struct VarArgMIPS64Helper : public VarArgHelper {
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
- Value *VAArgTLSCopy;
- Value *VAArgSize;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
- VAArgSize(nullptr) {}
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
@@ -3135,11 +3196,11 @@ struct VarArgMIPS64Helper : public VarArgHelper {
for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
CS.getFunctionType()->getNumParams(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
- llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
+ Triple TargetTriple(F.getParent()->getTargetTriple());
Value *A = *ArgIt;
Value *Base;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
- if (TargetTriple.getArch() == llvm::Triple::mips64) {
+ if (TargetTriple.getArch() == Triple::mips64) {
// Adjusting the shadow for argument with size < 8 to match the placement
// of bits in big endian system
if (ArgSize < 8)
@@ -3217,7 +3278,6 @@ struct VarArgMIPS64Helper : public VarArgHelper {
}
};
-
/// \brief AArch64-specific implementation of VarArgHelper.
struct VarArgAArch64Helper : public VarArgHelper {
static const unsigned kAArch64GrArgSize = 64;
@@ -3234,18 +3294,16 @@ struct VarArgAArch64Helper : public VarArgHelper {
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
- Value *VAArgTLSCopy;
- Value *VAArgOverflowSize;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgOverflowSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
- VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
- VAArgOverflowSize(nullptr) {}
-
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
+ VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
+
ArgKind classifyArgument(Value* arg) {
Type *T = arg->getType();
if (T->isFPOrFPVectorTy())
@@ -3468,15 +3526,13 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
Function &F;
MemorySanitizer &MS;
MemorySanitizerVisitor &MSV;
- Value *VAArgTLSCopy;
- Value *VAArgSize;
+ Value *VAArgTLSCopy = nullptr;
+ Value *VAArgSize = nullptr;
SmallVector<CallInst*, 16> VAStartInstrumentationList;
VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
- MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
- VAArgSize(nullptr) {}
+ MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
// For PowerPC, we need to deal with alignment of stack arguments -
@@ -3486,12 +3542,12 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
// compute current offset from stack pointer (which is always properly
// aligned), and offset for the first vararg, then subtract them.
unsigned VAArgBase;
- llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
+ Triple TargetTriple(F.getParent()->getTargetTriple());
// Parameter save area starts at 48 bytes from frame pointer for ABIv1,
// and 32 bytes for ABIv2. This is usually determined by target
// endianness, but in theory could be overriden by function attribute.
// For simplicity, we ignore it here (it'd only matter for QPX vectors).
- if (TargetTriple.getArch() == llvm::Triple::ppc64)
+ if (TargetTriple.getArch() == Triple::ppc64)
VAArgBase = 48;
else
VAArgBase = 32;
@@ -3634,27 +3690,27 @@ struct VarArgNoOpHelper : public VarArgHelper {
void finalizeInstrumentation() override {}
};
-VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
- MemorySanitizerVisitor &Visitor) {
+} // end anonymous namespace
+
+static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
+ MemorySanitizerVisitor &Visitor) {
// VarArg handling is only implemented on AMD64. False positives are possible
// on other platforms.
- llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
- if (TargetTriple.getArch() == llvm::Triple::x86_64)
+ Triple TargetTriple(Func.getParent()->getTargetTriple());
+ if (TargetTriple.getArch() == Triple::x86_64)
return new VarArgAMD64Helper(Func, Msan, Visitor);
- else if (TargetTriple.getArch() == llvm::Triple::mips64 ||
- TargetTriple.getArch() == llvm::Triple::mips64el)
+ else if (TargetTriple.getArch() == Triple::mips64 ||
+ TargetTriple.getArch() == Triple::mips64el)
return new VarArgMIPS64Helper(Func, Msan, Visitor);
- else if (TargetTriple.getArch() == llvm::Triple::aarch64)
+ else if (TargetTriple.getArch() == Triple::aarch64)
return new VarArgAArch64Helper(Func, Msan, Visitor);
- else if (TargetTriple.getArch() == llvm::Triple::ppc64 ||
- TargetTriple.getArch() == llvm::Triple::ppc64le)
+ else if (TargetTriple.getArch() == Triple::ppc64 ||
+ TargetTriple.getArch() == Triple::ppc64le)
return new VarArgPowerPC64Helper(Func, Msan, Visitor);
else
return new VarArgNoOpHelper(Func, Msan, Visitor);
}
-} // anonymous namespace
-
bool MemorySanitizer::runOnFunction(Function &F) {
if (&F == MsanCtorFunction)
return false;
OpenPOWER on IntegriCloud