diff options
5 files changed, 751 insertions, 79 deletions
diff --git a/llvm/include/llvm/Transforms/Instrumentation.h b/llvm/include/llvm/Transforms/Instrumentation.h index 26d2bde8ed3..d6d9529ba9a 100644 --- a/llvm/include/llvm/Transforms/Instrumentation.h +++ b/llvm/include/llvm/Transforms/Instrumentation.h @@ -134,7 +134,8 @@ ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false, // Insert MemorySanitizer instrumentation (detection of uninitialized reads) FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0, - bool Recover = false); + bool Recover = false, + bool EnableKmsan = false); FunctionPass *createHWAddressSanitizerPass(bool CompileKernel = false, bool Recover = false); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 9a9bbff715a..c0066eb1128 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -89,7 +89,38 @@ /// implementation ignores the load aspect of CAS/RMW, always returning a clean /// value. It implements the store part as a simple atomic store by storing a /// clean shadow. -// +/// +/// KernelMemorySanitizer (KMSAN) implementation. +/// +/// The major differences between KMSAN and MSan instrumentation are: +/// - KMSAN always tracks the origins and implies msan-keep-going=true; +/// - KMSAN allocates shadow and origin memory for each page separately, so +/// there are no explicit accesses to shadow and origin in the +/// instrumentation. +/// Shadow and origin values for a particular X-byte memory location +/// (X=1,2,4,8) are accessed through pointers obtained via the +/// __msan_metadata_ptr_for_load_X(ptr) +/// __msan_metadata_ptr_for_store_X(ptr) +/// functions. The corresponding functions check that the X-byte accesses +/// are possible and returns the pointers to shadow and origin memory. +/// Arbitrary sized accesses are handled with: +/// __msan_metadata_ptr_for_load_n(ptr, size) +/// __msan_metadata_ptr_for_store_n(ptr, size); +/// - TLS variables are stored in a single per-task struct. A call to a +/// function __msan_get_context_state() returning a pointer to that struct +/// is inserted into every instrumented function before the entry block; +/// - __msan_warning() takes a 32-bit origin parameter; +/// - local variables are poisoned with __msan_poison_alloca() upon function +/// entry and unpoisoned with __msan_unpoison_alloca() before leaving the +/// function; +/// - the pass doesn't declare any global variables or add global constructors +/// to the translation unit. +/// +/// Also, KMSAN currently ignores uninitialized memory passed into inline asm +/// calls, making sure we're on the safe side wrt. possible false positives. +/// +/// KernelMemorySanitizer only supports X86_64 at the moment. +/// //===----------------------------------------------------------------------===// #include "llvm/ADT/APInt.h" @@ -233,6 +264,11 @@ static cl::opt<int> ClInstrumentationWithCallThreshold( "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500)); +static cl::opt<bool> + ClEnableKmsan("msan-kernel", + cl::desc("Enable KernelMemorySanitizer instrumentation"), + cl::Hidden, cl::init(false)); + // This is an experiment to enable handling of cases where shadow is a non-zero // compile-time constant. For some unexplainable reason they were silently // ignored in the instrumentation. @@ -400,11 +436,19 @@ public: // Pass identification, replacement for typeid. static char ID; - MemorySanitizer(int TrackOrigins = 0, bool Recover = false) - : FunctionPass(ID), - TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), - Recover(Recover || ClKeepGoing) {} - + MemorySanitizer(int TrackOrigins = 0, bool Recover = false, + bool EnableKmsan = false) + : FunctionPass(ID) { + this->CompileKernel = + ClEnableKmsan.getNumOccurrences() > 0 ? ClEnableKmsan : EnableKmsan; + if (ClTrackOrigins.getNumOccurrences() > 0) + this->TrackOrigins = ClTrackOrigins; + else + this->TrackOrigins = this->CompileKernel ? 2 : TrackOrigins; + this->Recover = ClKeepGoing.getNumOccurrences() > 0 + ? ClKeepGoing + : (this->CompileKernel | Recover); + } StringRef getPassName() const override { return "MemorySanitizer"; } void getAnalysisUsage(AnalysisUsage &AU) const override { @@ -422,8 +466,12 @@ private: friend struct VarArgPowerPC64Helper; void initializeCallbacks(Module &M); + void createKernelApi(Module &M); void createUserspaceApi(Module &M); + /// True if we're compiling the Linux kernel. + bool CompileKernel; + /// Track origins (allocation points) of uninitialized values. int TrackOrigins; bool Recover; @@ -432,33 +480,39 @@ private: Type *IntptrTy; Type *OriginTy; + // XxxTLS variables represent the per-thread state in MSan and per-task state + // in KMSAN. + // For the userspace these point to thread-local globals. In the kernel land + // they point to the members of a per-task struct obtained via a call to + // __msan_get_context_state(). + /// Thread-local shadow storage for function parameters. - GlobalVariable *ParamTLS; + Value *ParamTLS; /// Thread-local origin storage for function parameters. - GlobalVariable *ParamOriginTLS; + Value *ParamOriginTLS; /// Thread-local shadow storage for function return value. - GlobalVariable *RetvalTLS; + Value *RetvalTLS; /// Thread-local origin storage for function return value. - GlobalVariable *RetvalOriginTLS; + Value *RetvalOriginTLS; /// Thread-local shadow storage for in-register va_arg function /// parameters (x86_64-specific). - GlobalVariable *VAArgTLS; + Value *VAArgTLS; /// Thread-local shadow storage for in-register va_arg function /// parameters (x86_64-specific). - GlobalVariable *VAArgOriginTLS; + Value *VAArgOriginTLS; /// Thread-local shadow storage for va_arg overflow area /// (x86_64-specific). - GlobalVariable *VAArgOverflowSizeTLS; + Value *VAArgOverflowSizeTLS; /// Thread-local space used to pass origin value to the UMR reporting /// function. - GlobalVariable *OriginTLS; + Value *OriginTLS; /// Are the instrumentation callbacks set up? bool CallbacksInitialized = false; @@ -484,6 +538,21 @@ private: /// MSan runtime replacements for memmove, memcpy and memset. Value *MemmoveFn, *MemcpyFn, *MemsetFn; + /// KMSAN callback for task-local function argument shadow. + Value *MsanGetContextStateFn; + + /// Functions for poisoning/unpoisoning local variables + Value *MsanPoisonAllocaFn, *MsanUnpoisonAllocaFn; + + /// Each of the MsanMetadataPtrXxx functions returns a pair of shadow/origin + /// pointers. + Value *MsanMetadataPtrForLoadN, *MsanMetadataPtrForStoreN; + Value *MsanMetadataPtrForLoad_1_8[4]; + Value *MsanMetadataPtrForStore_1_8[4]; + + /// Helper to choose between different MsanMetadataPtrXxx(). + Value *getKmsanShadowOriginAccessFn(bool isStore, int size); + /// Memory map parameters used in application-to-shadow calculation. const MemoryMapParams *MapParams; @@ -514,8 +583,9 @@ INITIALIZE_PASS_END( MemorySanitizer, "msan", "MemorySanitizer: detects uninitialized reads.", false, false) -FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover) { - return new MemorySanitizer(TrackOrigins, Recover); +FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover, + bool CompileKernel) { + return new MemorySanitizer(TrackOrigins, Recover, CompileKernel); } /// Create a non-const global initialized with the given string. @@ -530,6 +600,68 @@ static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, GlobalValue::PrivateLinkage, StrConst, ""); } +/// Create KMSAN API callbacks. +void MemorySanitizer::createKernelApi(Module &M) { + IRBuilder<> IRB(*C); + + // These will be initialized in insertKmsanPrologue(). + RetvalTLS = nullptr; + RetvalOriginTLS = nullptr; + ParamTLS = nullptr; + ParamOriginTLS = nullptr; + VAArgTLS = nullptr; + VAArgOriginTLS = nullptr; + VAArgOverflowSizeTLS = nullptr; + // OriginTLS is unused in the kernel. + OriginTLS = nullptr; + + // __msan_warning() in the kernel takes an origin. + WarningFn = M.getOrInsertFunction("__msan_warning", IRB.getVoidTy(), + IRB.getInt32Ty()); + // Requests the per-task context state (kmsan_context_state*) from the + // runtime library. + MsanGetContextStateFn = M.getOrInsertFunction( + "__msan_get_context_state", + PointerType::get( + StructType::get(ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), + ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), + ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), + ArrayType::get(IRB.getInt64Ty(), + kParamTLSSize / 8), /* va_arg_origin */ + IRB.getInt64Ty(), + ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy, + OriginTy), + 0)); + + Type *RetTy = StructType::get(PointerType::get(IRB.getInt8Ty(), 0), + PointerType::get(IRB.getInt32Ty(), 0)); + + for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) { + std::string name_load = + "__msan_metadata_ptr_for_load_" + std::to_string(size); + std::string name_store = + "__msan_metadata_ptr_for_store_" + std::to_string(size); + MsanMetadataPtrForLoad_1_8[ind] = M.getOrInsertFunction( + name_load, RetTy, PointerType::get(IRB.getInt8Ty(), 0)); + MsanMetadataPtrForStore_1_8[ind] = M.getOrInsertFunction( + name_store, RetTy, PointerType::get(IRB.getInt8Ty(), 0)); + } + + MsanMetadataPtrForLoadN = M.getOrInsertFunction( + "__msan_metadata_ptr_for_load_n", RetTy, + PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty()); + MsanMetadataPtrForStoreN = M.getOrInsertFunction( + "__msan_metadata_ptr_for_store_n", RetTy, + PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty()); + + // Functions for poisoning and unpoisoning memory. + MsanPoisonAllocaFn = + M.getOrInsertFunction("__msan_poison_alloca", IRB.getVoidTy(), + IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy()); + MsanUnpoisonAllocaFn = M.getOrInsertFunction( + "__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy); +} + /// Insert declarations for userspace-specific functions and globals. void MemorySanitizer::createUserspaceApi(Module &M) { IRBuilder<> IRB(*C); @@ -625,10 +757,31 @@ void MemorySanitizer::initializeCallbacks(Module &M) { StringRef(""), StringRef(""), /*hasSideEffects=*/true); - createUserspaceApi(M); + if (CompileKernel) { + createKernelApi(M); + } else { + createUserspaceApi(M); + } CallbacksInitialized = true; } +Value *MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore, int size) { + Value **Fns = + isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8; + switch (size) { + case 1: + return Fns[0]; + case 2: + return Fns[1]; + case 4: + return Fns[2]; + case 8: + return Fns[3]; + default: + return nullptr; + } +} + /// Module-level initialization. /// /// inserts a call to __msan_init to the module's constructor list. @@ -705,27 +858,28 @@ bool MemorySanitizer::doInitialization(Module &M) { ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); - std::tie(MsanCtorFunction, std::ignore) = - createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName, - /*InitArgTypes=*/{}, - /*InitArgs=*/{}); - if (ClWithComdat) { - Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName); - MsanCtorFunction->setComdat(MsanCtorComdat); - appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction); - } else { - appendToGlobalCtors(M, MsanCtorFunction, 0); - } - - - if (TrackOrigins) - new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, - IRB.getInt32(TrackOrigins), "__msan_track_origins"); + if (!CompileKernel) { + std::tie(MsanCtorFunction, std::ignore) = + createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, + kMsanInitName, + /*InitArgTypes=*/{}, + /*InitArgs=*/{}); + if (ClWithComdat) { + Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName); + MsanCtorFunction->setComdat(MsanCtorComdat); + appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction); + } else { + appendToGlobalCtors(M, MsanCtorFunction, 0); + } - if (Recover) - new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, - IRB.getInt32(Recover), "__msan_keep_going"); + if (TrackOrigins) + new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, + IRB.getInt32(TrackOrigins), "__msan_track_origins"); + if (Recover) + new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, + IRB.getInt32(Recover), "__msan_keep_going"); + } return true; } @@ -819,7 +973,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { TLI = &MS.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); MS.initializeCallbacks(*F.getParent()); - ActualFnStart = &F.getEntryBlock(); + if (MS.CompileKernel) + ActualFnStart = insertKmsanPrologue(F); + else + ActualFnStart = &F.getEntryBlock(); LLVM_DEBUG(if (!InsertChecks) dbgs() << "MemorySanitizer is not inserting checks into '" @@ -893,7 +1050,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType()); unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); - if (AsCall && SizeIndex < kNumberOfAccessSizes) { + if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) { Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; Value *ConvertedShadow2 = IRB.CreateZExt( ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); @@ -942,10 +1099,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void insertWarningFn(IRBuilder<> &IRB, Value *Origin) { if (!Origin) Origin = (Value *)IRB.getInt32(0); - if (MS.TrackOrigins) { - IRB.CreateStore(Origin, MS.OriginTLS); + if (MS.CompileKernel) { + IRB.CreateCall(MS.WarningFn, Origin); + } else { + if (MS.TrackOrigins) { + IRB.CreateStore(Origin, MS.OriginTLS); + } + IRB.CreateCall(MS.WarningFn, {}); } - IRB.CreateCall(MS.WarningFn, {}); IRB.CreateCall(MS.EmptyAsm, {}); // FIXME: Insert UnreachableInst if !MS.Recover? // This may invalidate some of the following checks and needs to be done @@ -971,7 +1132,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType()); unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); - if (AsCall && SizeIndex < kNumberOfAccessSizes) { + if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) { Value *Fn = MS.MaybeWarningFn[SizeIndex]; Value *ConvertedShadow2 = IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); @@ -1001,6 +1162,29 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { LLVM_DEBUG(dbgs() << "DONE:\n" << F); } + BasicBlock *insertKmsanPrologue(Function &F) { + BasicBlock *ret = + SplitBlock(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {}); + Constant *Zero = IRB.getInt32(0); + MS.ParamTLS = + IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(0)}, "param_shadow"); + MS.RetvalTLS = + IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(1)}, "retval_shadow"); + MS.VAArgTLS = + IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(2)}, "va_arg_shadow"); + MS.VAArgOriginTLS = + IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(3)}, "va_arg_origin"); + MS.VAArgOverflowSizeTLS = IRB.CreateGEP( + ContextState, {Zero, IRB.getInt32(4)}, "va_arg_overflow_size"); + MS.ParamOriginTLS = + IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(5)}, "param_origin"); + MS.RetvalOriginTLS = + IRB.CreateGEP(ContextState, {Zero, IRB.getInt32(6)}, "retval_origin"); + return ret; + } + /// Add MemorySanitizer instrumentation to a function. bool runOnFunction() { // In the presence of unreachable blocks, we may see Phi nodes with @@ -1149,12 +1333,40 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return std::make_pair(ShadowPtr, OriginPtr); } + std::pair<Value *, Value *> + getShadowOriginPtrKernel(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy, + unsigned Alignment, bool isStore) { + Value *ShadowOriginPtrs; + const DataLayout &DL = F.getParent()->getDataLayout(); + int Size = DL.getTypeStoreSize(ShadowTy); + + Value *Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size); + Value *AddrCast = + IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0)); + if (Getter) { + ShadowOriginPtrs = IRB.CreateCall(Getter, AddrCast); + } else { + Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size); + ShadowOriginPtrs = IRB.CreateCall(isStore ? MS.MsanMetadataPtrForStoreN + : MS.MsanMetadataPtrForLoadN, + {AddrCast, SizeVal}); + } + Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0); + ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0)); + Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1); + + return std::make_pair(ShadowPtr, OriginPtr); + } + std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy, unsigned Alignment, bool isStore) { - std::pair<Value *, Value *> ret = - getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment); + std::pair<Value *, Value *> ret; + if (MS.CompileKernel) + ret = getShadowOriginPtrKernel(Addr, IRB, ShadowTy, Alignment, isStore); + else + ret = getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment); return ret; } @@ -1173,7 +1385,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { /// Compute the origin address for a given function argument. Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) { - if (!MS.TrackOrigins) return nullptr; + if (!MS.TrackOrigins) + return nullptr; Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); if (ArgOffset) Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); @@ -1313,6 +1526,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign, /*isStore*/ true) .first; + // TODO(glider): need to copy origins. if (Overflow) { // ParamTLS overflow. EntryIRB.CreateMemSet( @@ -2931,12 +3145,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { if (ArgOffset + Size > kParamTLSSize) break; unsigned ParamAlignment = CS.getParamAlignment(i); unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment); - Value *AShadowPtr = getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), - Alignment, /*isStore*/ false) - .first; + Value *AShadowPtr = + getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment, + /*isStore*/ false) + .first; Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr, Alignment, Size); + // TODO(glider): need to copy origins. } else { Size = DL.getTypeAllocSize(A->getType()); if (ArgOffset + Size > kParamTLSSize) break; @@ -3043,40 +3259,34 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { "_msphi_o")); } - void visitAllocaInst(AllocaInst &I) { - setShadow(&I, getCleanShadow(&I)); - setOrigin(&I, getCleanOrigin()); - IRBuilder<> IRB(I.getNextNode()); - const DataLayout &DL = F.getParent()->getDataLayout(); - uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType()); - Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize); - if (I.isArrayAllocation()) - Len = IRB.CreateMul(Len, I.getArraySize()); + Value *getLocalVarDescription(AllocaInst &I) { + SmallString<2048> StackDescriptionStorage; + raw_svector_ostream StackDescription(StackDescriptionStorage); + // We create a string with a description of the stack allocation and + // pass it into __msan_set_alloca_origin. + // It will be printed by the run-time if stack-originated UMR is found. + // The first 4 bytes of the string are set to '----' and will be replaced + // by __msan_va_arg_overflow_size_tls at the first call. + StackDescription << "----" << I.getName() << "@" << F.getName(); + return createPrivateNonConstGlobalForString(*F.getParent(), + StackDescription.str()); + } + + void instrumentAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) { if (PoisonStack && ClPoisonStackWithCall) { IRB.CreateCall(MS.MsanPoisonStackFn, {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len}); } else { - Value *ShadowBase = getShadowOriginPtr(&I, IRB, IRB.getInt8Ty(), - I.getAlignment(), /*isStore*/ true) - .first; + Value *ShadowBase, *OriginBase; + std::tie(ShadowBase, OriginBase) = + getShadowOriginPtr(&I, IRB, IRB.getInt8Ty(), 1, /*isStore*/ true); Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlignment()); } if (PoisonStack && MS.TrackOrigins) { - SmallString<2048> StackDescriptionStorage; - raw_svector_ostream StackDescription(StackDescriptionStorage); - // We create a string with a description of the stack allocation and - // pass it into __msan_set_alloca_origin. - // It will be printed by the run-time if stack-originated UMR is found. - // The first 4 bytes of the string are set to '----' and will be replaced - // by __msan_va_arg_overflow_size_tls at the first call. - StackDescription << "----" << I.getName() << "@" << F.getName(); - Value *Descr = - createPrivateNonConstGlobalForString(*F.getParent(), - StackDescription.str()); - + Value *Descr = getLocalVarDescription(I); IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn, {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len, IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), @@ -3084,6 +3294,34 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } + void instrumentAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) { + Value *Descr = getLocalVarDescription(I); + if (PoisonStack) { + IRB.CreateCall(MS.MsanPoisonAllocaFn, + {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len, + IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())}); + } else { + IRB.CreateCall(MS.MsanUnpoisonAllocaFn, + {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len}); + } + } + + void visitAllocaInst(AllocaInst &I) { + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + IRBuilder<> IRB(I.getNextNode()); + const DataLayout &DL = F.getParent()->getDataLayout(); + uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType()); + Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize); + if (I.isArrayAllocation()) + Len = IRB.CreateMul(Len, I.getArraySize()); + + if (MS.CompileKernel) + instrumentAllocaKmsan(I, IRB, Len); + else + instrumentAllocaUserspace(I, IRB, Len); + } + void visitSelectInst(SelectInst& I) { IRBuilder<> IRB(&I); // a = select b, c, d diff --git a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll index 317f6b28506..d1d3bc55a2b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll @@ -2,6 +2,7 @@ ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-poison-stack-with-call=1 -S | FileCheck %s --check-prefixes=CHECK,CALL ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s --check-prefixes=CHECK,ORIGIN ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s --check-prefixes=CHECK,ORIGIN +; RUN: opt < %s -msan -msan-kernel=1 -S | FileCheck %s --check-prefixes=CHECK,KMSAN target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -16,6 +17,7 @@ entry: ; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false) ; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4) ; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 4, +; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4, ; CHECK: ret void @@ -31,6 +33,7 @@ l: ; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false) ; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4) ; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 4, +; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4, ; CHECK: ret void define void @array() sanitize_memory { @@ -43,6 +46,7 @@ entry: ; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 20, i1 false) ; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 20) ; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 20, +; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 20, ; CHECK: ret void define void @array_non_const(i64 %cnt) sanitize_memory { @@ -56,4 +60,20 @@ entry: ; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 %[[A]], i1 false) ; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 %[[A]]) ; ORIGIN: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 %[[A]], +; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 %[[A]], ; CHECK: ret void + +; Check that the local is unpoisoned in the absence of sanitize_memory +define void @unpoison_local() { +entry: + %x = alloca i32, i64 5, align 4 + ret void +} + +; CHECK-LABEL: define void @unpoison_local( +; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 0, i64 20, i1 false) +; CALL: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 0, i64 20, i1 false) +; ORIGIN-NOT: call void @__msan_set_alloca_origin4(i8* {{.*}}, i64 20, +; KMSAN: call void @__msan_unpoison_alloca(i8* {{.*}}, i64 20) +; CHECK: ret void + diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll new file mode 100644 index 00000000000..28bbf3cd708 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll @@ -0,0 +1,404 @@ +; KMSAN instrumentation tests +; RUN: opt < %s -msan -msan-kernel=1 -S | FileCheck %s -check-prefixes=CHECK + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; Check the instrumentation prologue. +define void @Empty() nounwind uwtable sanitize_memory { +entry: + ret void +} + +; CHECK-LABEL: @Empty +; CHECK: entry: +; CHECK: @__msan_get_context_state() +; %param_shadow: +; CHECK: getelementptr {{.*}} i32 0, i32 0 +; %retval_shadow: +; CHECK: getelementptr {{.*}} i32 0, i32 1 +; %va_arg_shadow: +; CHECK: getelementptr {{.*}} i32 0, i32 2 +; %va_arg_origin: +; CHECK: getelementptr {{.*}} i32 0, i32 3 +; %va_arg_overflow_size: +; CHECK: getelementptr {{.*}} i32 0, i32 4 +; %param_origin: +; CHECK: getelementptr {{.*}} i32 0, i32 5 +; %retval_origin: +; CHECK: getelementptr {{.*}} i32 0, i32 6 +; CHECK: entry.split: + +; Check instrumentation of stores + +define void @Store1(i8* nocapture %p, i8 %x) nounwind uwtable sanitize_memory { +entry: + store i8 %x, i8* %p + ret void +} + +; CHECK-LABEL: @Store1 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: [[BASE2:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] +; CHECK: [[BASE:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] +; CHECK: [[SHADOW:%[a-z0-9_]+]] = inttoptr {{.*}} [[BASE]] +; Load the shadow of %p and check it +; CHECK: load i64, i64* [[SHADOW]] +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; CHECK: @__msan_metadata_ptr_for_store_1(i8* %p) +; CHECK: store i8 +; If the new shadow is non-zero, jump to __msan_chain_origin() +; CHECK: icmp +; CHECK: br i1 +; CHECK: <label> +; CHECK: @__msan_chain_origin +; Storing origin here: +; CHECK: store i32 +; CHECK: br label +; CHECK: <label> +; CHECK: store i8 +; CHECK: ret void + +define void @Store2(i16* nocapture %p, i16 %x) nounwind uwtable sanitize_memory { +entry: + store i16 %x, i16* %p + ret void +} + +; CHECK-LABEL: @Store2 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i64 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; CHECK: [[REG:%[0-9]+]] = bitcast i16* %p to i8* +; CHECK: @__msan_metadata_ptr_for_store_2(i8* [[REG]]) +; CHECK: store i16 +; If the new shadow is non-zero, jump to __msan_chain_origin() +; CHECK: icmp +; CHECK: br i1 +; CHECK: <label> +; CHECK: @__msan_chain_origin +; Storing origin here: +; CHECK: store i32 +; CHECK: br label +; CHECK: <label> +; CHECK: store i16 +; CHECK: ret void + + +define void @Store4(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory { +entry: + store i32 %x, i32* %p + ret void +} + +; CHECK-LABEL: @Store4 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i32 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; CHECK: [[REG:%[0-9]+]] = bitcast i32* %p to i8* +; CHECK: @__msan_metadata_ptr_for_store_4(i8* [[REG]]) +; CHECK: store i32 +; If the new shadow is non-zero, jump to __msan_chain_origin() +; CHECK: icmp +; CHECK: br i1 +; CHECK: <label> +; CHECK: @__msan_chain_origin +; Storing origin here: +; CHECK: store i32 +; CHECK: br label +; CHECK: <label> +; CHECK: store i32 +; CHECK: ret void + +define void @Store8(i64* nocapture %p, i64 %x) nounwind uwtable sanitize_memory { +entry: + store i64 %x, i64* %p + ret void +} + +; CHECK-LABEL: @Store8 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i64 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; CHECK: [[REG:%[0-9]+]] = bitcast i64* %p to i8* +; CHECK: @__msan_metadata_ptr_for_store_8(i8* [[REG]]) +; CHECK: store i64 +; If the new shadow is non-zero, jump to __msan_chain_origin() +; CHECK: icmp +; CHECK: br i1 +; CHECK: <label> +; CHECK: @__msan_chain_origin +; Storing origin here: +; CHECK: store i32 +; CHECK: br label +; CHECK: <label> +; CHECK: store i64 +; CHECK: ret void + +define void @Store16(i128* nocapture %p, i128 %x) nounwind uwtable sanitize_memory { +entry: + store i128 %x, i128* %p + ret void +} + +; CHECK-LABEL: @Store16 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i64 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; CHECK: [[REG:%[0-9]+]] = bitcast i128* %p to i8* +; CHECK: @__msan_metadata_ptr_for_store_n(i8* [[REG]], i64 16) +; CHECK: store i128 +; If the new shadow is non-zero, jump to __msan_chain_origin() +; CHECK: icmp +; CHECK: br i1 +; CHECK: <label> +; CHECK: @__msan_chain_origin +; Storing origin here: +; CHECK: store i32 +; CHECK: br label +; CHECK: <label> +; CHECK: store i128 +; CHECK: ret void + + +; Check instrumentation of loads + +define i8 @Load1(i8* nocapture %p) nounwind uwtable sanitize_memory { +entry: + %0 = load i8, i8* %p + ret i8 %0 +} + +; CHECK-LABEL: @Load1 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i64 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; Load the value from %p. This is done before accessing the shadow +; to ease atomic handling. +; CHECK: load i8 +; CHECK: @__msan_metadata_ptr_for_load_1(i8* %p) +; Load the shadow and origin. +; CHECK: load i8 +; CHECK: load i32 + + +define i16 @Load2(i16* nocapture %p) nounwind uwtable sanitize_memory { +entry: + %0 = load i16, i16* %p + ret i16 %0 +} + +; CHECK-LABEL: @Load2 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i64 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; Load the value from %p. This is done before accessing the shadow +; to ease atomic handling. +; CHECK: load i16 +; CHECK: [[REG:%[0-9]+]] = bitcast i16* %p to i8* +; CHECK: @__msan_metadata_ptr_for_load_2(i8* [[REG]]) +; Load the shadow and origin. +; CHECK: load i16 +; CHECK: load i32 + + +define i32 @Load4(i32* nocapture %p) nounwind uwtable sanitize_memory { +entry: + %0 = load i32, i32* %p + ret i32 %0 +} + +; CHECK-LABEL: @Load4 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i64 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; Load the value from %p. This is done before accessing the shadow +; to ease atomic handling. +; CHECK: load i32 +; CHECK: [[REG:%[0-9]+]] = bitcast i32* %p to i8* +; CHECK: @__msan_metadata_ptr_for_load_4(i8* [[REG]]) +; Load the shadow and origin. +; CHECK: load i32 +; CHECK: load i32 + +define i64 @Load8(i64* nocapture %p) nounwind uwtable sanitize_memory { +entry: + %0 = load i64, i64* %p + ret i64 %0 +} + +; CHECK-LABEL: @Load8 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i64 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; Load the value from %p. This is done before accessing the shadow +; to ease atomic handling. +; CHECK: load i64 +; CHECK: [[REG:%[0-9]+]] = bitcast i64* %p to i8* +; CHECK: @__msan_metadata_ptr_for_load_8(i8* [[REG]]) +; Load the shadow and origin. +; CHECK: load i64 +; CHECK: load i32 + +define i128 @Load16(i128* nocapture %p) nounwind uwtable sanitize_memory { +entry: + %0 = load i128, i128* %p + ret i128 %0 +} + +; CHECK-LABEL: @Load16 +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK-LABEL: entry.split: +; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] +; Load the shadow of %p and check it +; CHECK: load i64 +; CHECK: icmp +; CHECK: br i1 +; CHECK-LABEL: <label> +; Load the value from %p. This is done before accessing the shadow +; to ease atomic handling. +; CHECK: load i128 +; CHECK: [[REG:%[0-9]+]] = bitcast i128* %p to i8* +; CHECK: @__msan_metadata_ptr_for_load_n(i8* [[REG]], i64 16) +; Load the shadow and origin. +; CHECK: load i128 +; CHECK: load i32 + + +; Test kernel-specific va_list instrumentation + +%struct.__va_list_tag = type { i32, i32, i8*, i8* } +declare void @llvm.va_start(i8*) nounwind +declare void @llvm.va_end(i8*) +@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1 +declare dso_local i32 @VAListFn(i8*, %struct.__va_list_tag*) local_unnamed_addr + +; Function Attrs: nounwind uwtable +define dso_local i32 @VarArgFn(i8* %fmt, ...) local_unnamed_addr sanitize_memory #0 { +entry: + %args = alloca [1 x %struct.__va_list_tag], align 16 + %0 = bitcast [1 x %struct.__va_list_tag]* %args to i8* + %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %args, i64 0, i64 0 + call void @llvm.va_start(i8* nonnull %0) + %call = call i32 @VAListFn(i8* %fmt, %struct.__va_list_tag* nonnull %arraydecay) + call void @llvm.va_end(i8* nonnull %0) + ret i32 %call +} + +; Kernel is built without SSE support. +attributes #0 = { "target-features"="+fxsr,+x87,-sse" } + +; CHECK-LABEL: @VarArgFn +; CHECK: @__msan_get_context_state() +; CHECK: [[VA_ARG_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 2 +; CHECK: [[VA_ARG_ORIGIN:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 3 +; CHECK: [[VA_ARG_OVERFLOW_SIZE:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 4 + +; CHECK-LABEL: entry.split: +; CHECK: [[OSIZE:%[0-9]+]] = load i64, i64* [[VA_ARG_OVERFLOW_SIZE]] +; Register save area is 48 bytes for non-SSE builds. +; CHECK: [[SIZE:%[0-9]+]] = add i64 48, [[OSIZE]] +; CHECK: [[SHADOWS:%[0-9]+]] = alloca i8, i64 [[SIZE]] +; CHECK: [[VA_ARG_SHADOW]] +; CHECK: call void @llvm.memcpy{{.*}}(i8* align 8 [[SHADOWS]], {{.*}}, i64 [[SIZE]] +; CHECK: [[ORIGINS:%[0-9]+]] = alloca i8, i64 [[SIZE]] +; CHECK: [[VA_ARG_ORIGIN]] +; CHECK: call void @llvm.memcpy{{.*}}(i8* align 8 [[ORIGINS]], {{.*}}, i64 [[SIZE]] +; CHECK: call i32 @VAListFn + +; Function Attrs: nounwind uwtable +define dso_local void @VarArgCaller() local_unnamed_addr sanitize_memory { +entry: + %call = tail call i32 (i8*, ...) @VarArgFn(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 123) + ret void +} + +; CHECK-LABEL: @VarArgCaller + +; CHECK-LABEL: entry: +; CHECK: @__msan_get_context_state() +; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 +; CHECK: [[VA_ARG_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 2 +; CHECK: [[VA_ARG_OVERFLOW_SIZE:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 4 + +; CHECK-LABEL: entry.split: +; CHECK: [[PARAM_SI:%[_a-z0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] +; CHECK: [[ARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[PARAM_SI]] to i64* +; First argument is initialized +; CHECK: store i64 0, i64* [[ARG1_S]] + +; Dangling cast of va_arg_shadow[0], unused because the first argument is fixed. +; CHECK: [[VA_CAST0:%[_a-z0-9]+]] = ptrtoint {{.*}} [[VA_ARG_SHADOW]] to i64 + +; CHECK: [[VA_CAST1:%[_a-z0-9]+]] = ptrtoint {{.*}} [[VA_ARG_SHADOW]] to i64 +; CHECK: [[ARG1_SI:%[_a-z0-9]+]] = add i64 [[VA_CAST1]], 8 +; CHECK: [[PARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[ARG1_SI]] to i32* + +; Shadow for 123 is 0. +; CHECK: store i32 0, i32* [[ARG1_S]] + +; CHECK: store i64 0, i64* [[VA_ARG_OVERFLOW_SIZE]] +; CHECK: call i32 (i8*, ...) @VarArgFn({{.*}} @.str{{.*}} i32 123) diff --git a/llvm/test/Instrumentation/MemorySanitizer/store-origin.ll b/llvm/test/Instrumentation/MemorySanitizer/store-origin.ll index cdf9c5479e7..70722c63f29 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/store-origin.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/store-origin.ll @@ -1,5 +1,6 @@ -; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS1 %s -; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS2 %s +; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefixes=CHECK,CHECK-MSAN,CHECK-ORIGINS1 %s +; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck -check-prefixes=CHECK,CHECK-MSAN,CHECK-ORIGINS2 %s +; RUN: opt < %s -msan -msan-kernel=1 -msan-check-access-address=0 -S | FileCheck -check-prefixes=CHECK,CHECK-KMSAN,CHECK-ORIGINS2 %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -52,9 +53,17 @@ attributes #1 = { nounwind readnone } ; CHECK-LABEL: @Store -; CHECK: load {{.*}} @__msan_param_tls -; CHECK: [[ORIGIN:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls -; CHECK: store {{.*}}!dbg ![[DBG:[01-9]+]] + +; CHECK-MSAN: load {{.*}} @__msan_param_tls +; CHECK-MSAN: [[ORIGIN:%[0-9a-z]+]] = load {{.*}} @__msan_param_origin_tls + +; CHECK-KMSAN-LABEL: entry.split: +; CHECK-KMSAN: %param_shadow +; CHECK-KMSAN: load i32, i32* +; CHECK-KMSAN: %param_origin +; CHECK-KMSAN: [[ORIGIN:%[0-9a-z]+]] = load i32, i32* + +; CHECK: store {{.*}}!dbg ![[DBG:[0-9]+]] ; CHECK: icmp ; CHECK: br i1 ; CHECK: <label> @@ -63,7 +72,7 @@ attributes #1 = { nounwind readnone } ; CHECK-ORIGINS1: store i32 {{.*}}[[ORIGIN]],{{.*}}!dbg !{{.*}}[[DBG]] ; Origin tracking level 2: pass origin value through __msan_chain_origin and store the result. -; CHECK-ORIGINS2: [[ORIGIN2:%[01-9a-z]+]] = call i32 @__msan_chain_origin(i32 {{.*}}[[ORIGIN]]) +; CHECK-ORIGINS2: [[ORIGIN2:%[0-9a-z]+]] = call i32 @__msan_chain_origin(i32 {{.*}}[[ORIGIN]]) ; CHECK-ORIGINS2: store i32 {{.*}}[[ORIGIN2]],{{.*}}!dbg !{{.*}}[[DBG]] ; CHECK: br label{{.*}}!dbg !{{.*}}[[DBG]] |