summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp57
1 files changed, 50 insertions, 7 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index f43245109d9..07e63b2a7df 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -67,6 +67,7 @@ using namespace llvm;
static const uint64_t kDefaultShadowScale = 3;
static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
+static const uint64_t kDynamicShadowSentinel = ~(uint64_t)0;
static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
static const uint64_t kIOSShadowOffset64 = 0x120200000;
static const uint64_t kIOSSimShadowOffset32 = 1ULL << 30;
@@ -81,8 +82,8 @@ static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
-// TODO(wwchrome): Experimental for asan Win64, may change.
-static const uint64_t kWindowsShadowOffset64 = 0x1ULL << 45; // 32TB.
+// The shadow memory space is dynamically allocated.
+static const uint64_t kWindowsShadowOffset64 = kDynamicShadowSentinel;
static const size_t kMinStackMallocSize = 1 << 6; // 64B
static const size_t kMaxStackMallocSize = 1 << 16; // 64K
@@ -125,6 +126,9 @@ static const char *const kAsanGlobalsRegisteredFlagName =
static const char *const kAsanOptionDetectUseAfterReturn =
"__asan_option_detect_stack_use_after_return";
+static const char *const kAsanShadowMemoryDynamicAddress =
+ "__asan_shadow_memory_dynamic_address";
+
static const char *const kAsanAllocaPoison = "__asan_alloca_poison";
static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison";
@@ -157,6 +161,11 @@ static cl::opt<bool> ClAlwaysSlowPath(
"asan-always-slow-path",
cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
cl::init(false));
+static cl::opt<bool> ClForceDynamicShadow(
+ "asan-force-dynamic-shadow",
+ cl::desc("Load shadow address into a local variable for each function"),
+ cl::Hidden, cl::init(false));
+
// This flag limits the number of instructions to be instrumented
// in any given BB. Normally, this should be set to unlimited (INT_MAX),
// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
@@ -432,6 +441,10 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
Mapping.Offset = kDefaultShadowOffset64;
}
+ if (ClForceDynamicShadow) {
+ Mapping.Offset = kDynamicShadowSentinel;
+ }
+
Mapping.Scale = kDefaultShadowScale;
if (ClMappingScale.getNumOccurrences() > 0) {
Mapping.Scale = ClMappingScale;
@@ -447,7 +460,8 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
// we could OR the constant in a single instruction, but it's more
// efficient to load it once and use indexed addressing.
Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ
- && !(Mapping.Offset & (Mapping.Offset - 1));
+ && !(Mapping.Offset & (Mapping.Offset - 1))
+ && Mapping.Offset != kDynamicShadowSentinel;
return Mapping;
}
@@ -464,7 +478,8 @@ struct AddressSanitizer : public FunctionPass {
bool UseAfterScope = false)
: FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan),
Recover(Recover || ClRecover),
- UseAfterScope(UseAfterScope || ClUseAfterScope) {
+ UseAfterScope(UseAfterScope || ClUseAfterScope),
+ LocalDynamicShadow(nullptr) {
initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
}
const char *getPassName() const override {
@@ -512,6 +527,7 @@ struct AddressSanitizer : public FunctionPass {
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool runOnFunction(Function &F) override;
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
+ void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void markEscapedLocalAllocas(Function &F);
bool doInitialization(Module &M) override;
bool doFinalization(Module &M) override;
@@ -533,8 +549,12 @@ struct AddressSanitizer : public FunctionPass {
FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
assert(Pass->ProcessedAllocas.empty() &&
"last pass forgot to clear cache");
+ assert(!Pass->LocalDynamicShadow);
+ }
+ ~FunctionStateRAII() {
+ Pass->LocalDynamicShadow = nullptr;
+ Pass->ProcessedAllocas.clear();
}
- ~FunctionStateRAII() { Pass->ProcessedAllocas.clear(); }
};
LLVMContext *C;
@@ -558,6 +578,7 @@ struct AddressSanitizer : public FunctionPass {
Function *AsanMemoryAccessCallbackSized[2][2];
Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
InlineAsm *EmptyAsm;
+ Value *LocalDynamicShadow;
GlobalsMetadata GlobalsMD;
DenseMap<const AllocaInst *, bool> ProcessedAllocas;
@@ -921,10 +942,15 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
if (Mapping.Offset == 0) return Shadow;
// (Shadow >> scale) | offset
+ Value *ShadowBase;
+ if (LocalDynamicShadow)
+ ShadowBase = LocalDynamicShadow;
+ else
+ ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
if (Mapping.OrShadowOffset)
- return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
+ return IRB.CreateOr(Shadow, ShadowBase);
else
- return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
+ return IRB.CreateAdd(Shadow, ShadowBase);
}
// Instrument memset/memmove/memcpy
@@ -977,6 +1003,10 @@ Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
// Skip memory accesses inserted by another instrumentation.
if (I->getMetadata("nosanitize")) return nullptr;
+ // Do not instrument the load fetching the dynamic shadow address.
+ if (LocalDynamicShadow == I)
+ return nullptr;
+
Value *PtrOperand = nullptr;
const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
@@ -1781,6 +1811,17 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
return false;
}
+void AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
+ // Generate code only when dynamic addressing is needed.
+ if (Mapping.Offset != kDynamicShadowSentinel)
+ return;
+
+ IRBuilder<> IRB(&F.front().front());
+ Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
+ kAsanShadowMemoryDynamicAddress, IntptrTy);
+ LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress);
+}
+
void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
// Find the one possible call to llvm.localescape and pre-mark allocas passed
// to it as uninteresting. This assumes we haven't started processing allocas
@@ -1833,6 +1874,8 @@ bool AddressSanitizer::runOnFunction(Function &F) {
FunctionStateRAII CleanupObj(this);
+ maybeInsertDynamicShadowAtFunctionEntry(F);
+
// We can't instrument allocas used with llvm.localescape. Only static allocas
// can be passed to that intrinsic.
markEscapedLocalAllocas(F);
OpenPOWER on IntegriCloud