1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
|
//===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file is a part of HWAddressSanitizer, an address sanity checker
/// based on tagged addressing.
//===----------------------------------------------------------------------===//
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
using namespace llvm;
#define DEBUG_TYPE "hwasan"
static const char *const kHwasanModuleCtorName = "hwasan.module_ctor";
static const char *const kHwasanInitName = "__hwasan_init";
static const char *const kHwasanShadowMemoryDynamicAddress =
"__hwasan_shadow_memory_dynamic_address";
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
static const size_t kNumberOfAccessSizes = 5;
static const size_t kDefaultShadowScale = 4;
static const uint64_t kDynamicShadowSentinel =
std::numeric_limits<uint64_t>::max();
static const unsigned kPointerTagShift = 56;
static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
"hwasan-memory-access-callback-prefix",
cl::desc("Prefix for memory access callbacks"), cl::Hidden,
cl::init("__hwasan_"));
static cl::opt<bool>
ClInstrumentWithCalls("hwasan-instrument-with-calls",
cl::desc("instrument reads and writes with callbacks"),
cl::Hidden, cl::init(false));
static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
cl::desc("instrument read instructions"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClInstrumentWrites(
"hwasan-instrument-writes", cl::desc("instrument write instructions"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClInstrumentAtomics(
"hwasan-instrument-atomics",
cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
cl::init(true));
static cl::opt<bool> ClRecover(
"hwasan-recover",
cl::desc("Enable recovery mode (continue-after-error)."),
cl::Hidden, cl::init(false));
static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
cl::desc("instrument stack (allocas)"),
cl::Hidden, cl::init(true));
static cl::opt<bool> ClGenerateTagsWithCalls(
"hwasan-generate-tags-with-calls",
cl::desc("generate new tags with runtime library calls"), cl::Hidden,
cl::init(false));
static cl::opt<int> ClMatchAllTag(
"hwasan-match-all-tag",
cl::desc("don't report bad accesses via pointers with this tag"),
cl::Hidden, cl::init(-1));
static cl::opt<bool> ClEnableKhwasan(
"hwasan-kernel",
cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
cl::Hidden, cl::init(false));
// These flags allow to change the shadow mapping and control how shadow memory
// is accessed. The shadow mapping looks like:
// Shadow = (Mem >> scale) + offset
static cl::opt<unsigned long long> ClMappingOffset(
"hwasan-mapping-offset",
cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden,
cl::init(0));
namespace {
/// An instrumentation pass implementing detection of addressability bugs
/// using tagged pointers.
class HWAddressSanitizer : public FunctionPass {
public:
// Pass identification, replacement for typeid.
static char ID;
explicit HWAddressSanitizer(bool CompileKernel = false, bool Recover = false)
: FunctionPass(ID) {
this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
ClEnableKhwasan : CompileKernel;
}
StringRef getPassName() const override { return "HWAddressSanitizer"; }
bool runOnFunction(Function &F) override;
bool doInitialization(Module &M) override;
void initializeCallbacks(Module &M);
void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
void untagPointerOperand(Instruction *I, Value *Addr);
Value *memToShadow(Value *Shadow, Type *Ty, IRBuilder<> &IRB);
void instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
unsigned AccessSizeIndex,
Instruction *InsertBefore);
bool instrumentMemAccess(Instruction *I);
Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
uint64_t *TypeSize, unsigned *Alignment,
Value **MaybeMask);
bool isInterestingAlloca(const AllocaInst &AI);
bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
bool instrumentStack(SmallVectorImpl<AllocaInst *> &Allocas,
SmallVectorImpl<Instruction *> &RetVec);
Value *getNextTagWithCall(IRBuilder<> &IRB);
Value *getStackBaseTag(IRBuilder<> &IRB);
Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
unsigned AllocaNo);
Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
private:
LLVMContext *C;
Triple TargetTriple;
/// This struct defines the shadow mapping using the rule:
/// shadow = (mem >> Scale) + Offset.
/// If InGlobal is true, then
/// extern char __hwasan_shadow[];
/// shadow = (mem >> Scale) + &__hwasan_shadow
struct ShadowMapping {
int Scale;
uint64_t Offset;
bool InGlobal;
void init(Triple &TargetTriple);
unsigned getAllocaAlignment() const { return 1U << Scale; }
};
ShadowMapping Mapping;
Type *IntptrTy;
Type *Int8Ty;
bool CompileKernel;
bool Recover;
Function *HwasanCtorFunction;
Function *HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
Function *HwasanMemoryAccessCallbackSized[2];
Function *HwasanTagMemoryFunc;
Function *HwasanGenerateTagFunc;
Constant *ShadowGlobal;
Value *LocalDynamicShadow = nullptr;
};
} // end anonymous namespace
char HWAddressSanitizer::ID = 0;
INITIALIZE_PASS_BEGIN(
HWAddressSanitizer, "hwasan",
"HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
false)
INITIALIZE_PASS_END(
HWAddressSanitizer, "hwasan",
"HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
false)
FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel,
bool Recover) {
assert(!CompileKernel || Recover);
return new HWAddressSanitizer(CompileKernel, Recover);
}
/// Module-level initialization.
///
/// inserts a call to __hwasan_init to the module's constructor list.
bool HWAddressSanitizer::doInitialization(Module &M) {
LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
auto &DL = M.getDataLayout();
TargetTriple = Triple(M.getTargetTriple());
Mapping.init(TargetTriple);
C = &(M.getContext());
IRBuilder<> IRB(*C);
IntptrTy = IRB.getIntPtrTy(DL);
Int8Ty = IRB.getInt8Ty();
HwasanCtorFunction = nullptr;
if (!CompileKernel) {
std::tie(HwasanCtorFunction, std::ignore) =
createSanitizerCtorAndInitFunctions(M, kHwasanModuleCtorName,
kHwasanInitName,
/*InitArgTypes=*/{},
/*InitArgs=*/{});
appendToGlobalCtors(M, HwasanCtorFunction, 0);
}
return true;
}
void HWAddressSanitizer::initializeCallbacks(Module &M) {
IRBuilder<> IRB(*C);
for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
const std::string TypeStr = AccessIsWrite ? "store" : "load";
const std::string EndingStr = Recover ? "_noabort" : "";
HwasanMemoryAccessCallbackSized[AccessIsWrite] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false)));
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
ClMemoryAccessCallbackPrefix + TypeStr +
itostr(1ULL << AccessSizeIndex) + EndingStr,
FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false)));
}
}
HwasanTagMemoryFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
"__hwasan_tag_memory", IRB.getVoidTy(), IntptrTy, Int8Ty, IntptrTy));
HwasanGenerateTagFunc = checkSanitizerInterfaceFunction(
M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty));
if (Mapping.InGlobal)
ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
ArrayType::get(IRB.getInt8Ty(), 0));
}
void HWAddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
// Generate code only when dynamic addressing is needed.
if (Mapping.Offset != kDynamicShadowSentinel)
return;
IRBuilder<> IRB(&F.front().front());
if (Mapping.InGlobal) {
// An empty inline asm with input reg == output reg.
// An opaque pointer-to-int cast, basically.
InlineAsm *Asm = InlineAsm::get(
FunctionType::get(IntptrTy, {ShadowGlobal->getType()}, false),
StringRef(""), StringRef("=r,0"),
/*hasSideEffects=*/false);
LocalDynamicShadow = IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
} else {
Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
kHwasanShadowMemoryDynamicAddress, IntptrTy);
LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress);
}
}
Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
bool *IsWrite,
uint64_t *TypeSize,
unsigned *Alignment,
Value **MaybeMask) {
// Skip memory accesses inserted by another instrumentation.
if (I->getMetadata("nosanitize")) return nullptr;
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
return nullptr;
Value *PtrOperand = nullptr;
const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
*TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
*Alignment = LI->getAlignment();
PtrOperand = LI->getPointerOperand();
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
*TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
*Alignment = SI->getAlignment();
PtrOperand = SI->getPointerOperand();
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
*TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
*Alignment = 0;
PtrOperand = RMW->getPointerOperand();
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
*TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
*Alignment = 0;
PtrOperand = XCHG->getPointerOperand();
}
if (PtrOperand) {
// Do not instrument accesses from different address spaces; we cannot deal
// with them.
Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
if (PtrTy->getPointerAddressSpace() != 0)
return nullptr;
// Ignore swifterror addresses.
// swifterror memory addresses are mem2reg promoted by instruction
// selection. As such they cannot have regular uses like an instrumentation
// function and it makes no sense to track them as memory.
if (PtrOperand->isSwiftError())
return nullptr;
}
return PtrOperand;
}
static unsigned getPointerOperandIndex(Instruction *I) {
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return LI->getPointerOperandIndex();
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperandIndex();
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
return RMW->getPointerOperandIndex();
if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
return XCHG->getPointerOperandIndex();
report_fatal_error("Unexpected instruction");
return -1;
}
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
size_t Res = countTrailingZeros(TypeSize / 8);
assert(Res < kNumberOfAccessSizes);
return Res;
}
void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
if (TargetTriple.isAArch64())
return;
IRBuilder<> IRB(I);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
Value *UntaggedPtr =
IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
}
Value *HWAddressSanitizer::memToShadow(Value *Mem, Type *Ty, IRBuilder<> &IRB) {
// Mem >> Scale
Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
if (Mapping.Offset == 0)
return Shadow;
// (Mem >> Scale) + Offset
Value *ShadowBase;
if (LocalDynamicShadow)
ShadowBase = LocalDynamicShadow;
else
ShadowBase = ConstantInt::get(Ty, Mapping.Offset);
return IRB.CreateAdd(Shadow, ShadowBase);
}
void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
unsigned AccessSizeIndex,
Instruction *InsertBefore) {
IRBuilder<> IRB(InsertBefore);
Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
IRB.getInt8Ty());
Value *AddrLong = untagPointer(IRB, PtrLong);
Value *ShadowLong = memToShadow(AddrLong, PtrLong->getType(), IRB);
Value *MemTag =
IRB.CreateLoad(IRB.CreateIntToPtr(ShadowLong, IRB.getInt8PtrTy()));
Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
ClMatchAllTag : (CompileKernel ? 0xFF : -1);
if (matchAllTag != -1) {
Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
ConstantInt::get(PtrTag->getType(), matchAllTag));
TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
}
TerminatorInst *CheckTerm =
SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
MDBuilder(*C).createBranchWeights(1, 100000));
IRB.SetInsertPoint(CheckTerm);
const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
InlineAsm *Asm;
switch (TargetTriple.getArch()) {
case Triple::x86_64:
// The signal handler will find the data address in rdi.
Asm = InlineAsm::get(
FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
"int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
"{rdi}",
/*hasSideEffects=*/true);
break;
case Triple::aarch64:
case Triple::aarch64_be:
// The signal handler will find the data address in x0.
Asm = InlineAsm::get(
FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
"brk #" + itostr(0x900 + AccessInfo),
"{x0}",
/*hasSideEffects=*/true);
break;
default:
report_fatal_error("unsupported architecture");
}
IRB.CreateCall(Asm, PtrLong);
}
bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
bool IsWrite = false;
unsigned Alignment = 0;
uint64_t TypeSize = 0;
Value *MaybeMask = nullptr;
Value *Addr =
isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
if (!Addr)
return false;
if (MaybeMask)
return false; //FIXME
IRBuilder<> IRB(I);
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
if (isPowerOf2_64(TypeSize) &&
(TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
(Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
Alignment >= TypeSize / 8)) {
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
if (ClInstrumentWithCalls) {
IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
AddrLong);
} else {
instrumentMemAccessInline(AddrLong, IsWrite, AccessSizeIndex, I);
}
} else {
IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
{AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8)});
}
untagPointerOperand(I, Addr);
return true;
}
static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
uint64_t ArraySize = 1;
if (AI.isArrayAllocation()) {
const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
assert(CI && "non-constant array size");
ArraySize = CI->getZExtValue();
}
Type *Ty = AI.getAllocatedType();
uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
return SizeInBytes * ArraySize;
}
bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
Value *Tag) {
size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
~(Mapping.getAllocaAlignment() - 1);
Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
if (ClInstrumentWithCalls) {
IRB.CreateCall(HwasanTagMemoryFunc,
{IRB.CreatePointerCast(AI, IntptrTy), JustTag,
ConstantInt::get(IntptrTy, Size)});
} else {
size_t ShadowSize = Size >> Mapping.Scale;
Value *ShadowPtr = IRB.CreateIntToPtr(
memToShadow(IRB.CreatePointerCast(AI, IntptrTy), AI->getType(), IRB),
IRB.getInt8PtrTy());
// If this memset is not inlined, it will be intercepted in the hwasan
// runtime library. That's OK, because the interceptor skips the checks if
// the address is in the shadow region.
// FIXME: the interceptor is not as fast as real memset. Consider lowering
// llvm.memset right here into either a sequence of stores, or a call to
// hwasan_tag_memory.
IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
}
return true;
}
static unsigned RetagMask(unsigned AllocaNo) {
// A list of 8-bit numbers that have at most one run of non-zero bits.
// x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
// masks.
// The list does not include the value 255, which is used for UAR.
static unsigned FastMasks[] = {
0, 1, 2, 3, 4, 6, 7, 8, 12, 14, 15, 16, 24,
28, 30, 31, 32, 48, 56, 60, 62, 63, 64, 96, 112, 120,
124, 126, 127, 128, 192, 224, 240, 248, 252, 254};
return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
}
Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
}
Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
if (ClGenerateTagsWithCalls)
return nullptr;
// FIXME: use addressofreturnaddress (but implement it in aarch64 backend
// first).
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
auto GetStackPointerFn =
Intrinsic::getDeclaration(M, Intrinsic::frameaddress);
Value *StackPointer = IRB.CreateCall(
GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
// Extract some entropy from the stack pointer for the tags.
// Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
// between functions).
Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
Value *StackTag =
IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20),
"hwasan.stack.base.tag");
return StackTag;
}
Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
AllocaInst *AI, unsigned AllocaNo) {
if (ClGenerateTagsWithCalls)
return getNextTagWithCall(IRB);
return IRB.CreateXor(StackTag,
ConstantInt::get(IntptrTy, RetagMask(AllocaNo)));
}
Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
if (ClGenerateTagsWithCalls)
return getNextTagWithCall(IRB);
return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU));
}
// Add a tag to an address.
Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
Value *PtrLong, Value *Tag) {
Value *TaggedPtrLong;
if (CompileKernel) {
// Kernel addresses have 0xFF in the most significant byte.
Value *ShiftedTag = IRB.CreateOr(
IRB.CreateShl(Tag, kPointerTagShift),
ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1));
TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
} else {
// Userspace can simply do OR (tag << 56);
Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift);
TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
}
return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
}
// Remove tag from an address.
Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
Value *UntaggedPtrLong;
if (CompileKernel) {
// Kernel addresses have 0xFF in the most significant byte.
UntaggedPtrLong = IRB.CreateOr(PtrLong,
ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift));
} else {
// Userspace addresses have 0x00.
UntaggedPtrLong = IRB.CreateAnd(PtrLong,
ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift)));
}
return UntaggedPtrLong;
}
bool HWAddressSanitizer::instrumentStack(
SmallVectorImpl<AllocaInst *> &Allocas,
SmallVectorImpl<Instruction *> &RetVec) {
Function *F = Allocas[0]->getParent()->getParent();
Instruction *InsertPt = &*F->getEntryBlock().begin();
IRBuilder<> IRB(InsertPt);
Value *StackTag = getStackBaseTag(IRB);
// Ideally, we want to calculate tagged stack base pointer, and rewrite all
// alloca addresses using that. Unfortunately, offsets are not known yet
// (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
// temp, shift-OR it into each alloca address and xor with the retag mask.
// This generates one extra instruction per alloca use.
for (unsigned N = 0; N < Allocas.size(); ++N) {
auto *AI = Allocas[N];
IRB.SetInsertPoint(AI->getNextNode());
// Replace uses of the alloca with tagged address.
Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
std::string Name =
AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
Replacement->setName(Name + ".hwasan");
for (auto UI = AI->use_begin(), UE = AI->use_end(); UI != UE;) {
Use &U = *UI++;
if (U.getUser() != AILong)
U.set(Replacement);
}
tagAlloca(IRB, AI, Tag);
for (auto RI : RetVec) {
IRB.SetInsertPoint(RI);
// Re-tag alloca memory with the special UAR tag.
Value *Tag = getUARTag(IRB, StackTag);
tagAlloca(IRB, AI, Tag);
}
}
return true;
}
bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
return (AI.getAllocatedType()->isSized() &&
// FIXME: instrument dynamic allocas, too
AI.isStaticAlloca() &&
// alloca() may be called with 0 size, ignore it.
getAllocaSizeInBytes(AI) > 0 &&
// We are only interested in allocas not promotable to registers.
// Promotable allocas are common under -O0.
!isAllocaPromotable(&AI) &&
// inalloca allocas are not treated as static, and we don't want
// dynamic alloca instrumentation for them as well.
!AI.isUsedWithInAlloca() &&
// swifterror allocas are register promoted by ISel
!AI.isSwiftError());
}
bool HWAddressSanitizer::runOnFunction(Function &F) {
if (&F == HwasanCtorFunction)
return false;
if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
return false;
LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
initializeCallbacks(*F.getParent());
assert(!LocalDynamicShadow);
maybeInsertDynamicShadowAtFunctionEntry(F);
bool Changed = false;
SmallVector<Instruction*, 16> ToInstrument;
SmallVector<AllocaInst*, 8> AllocasToInstrument;
SmallVector<Instruction*, 8> RetVec;
for (auto &BB : F) {
for (auto &Inst : BB) {
if (ClInstrumentStack)
if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
// Realign all allocas. We don't want small uninteresting allocas to
// hide in instrumented alloca's padding.
if (AI->getAlignment() < Mapping.getAllocaAlignment())
AI->setAlignment(Mapping.getAllocaAlignment());
// Instrument some of them.
if (isInterestingAlloca(*AI))
AllocasToInstrument.push_back(AI);
continue;
}
if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
isa<CleanupReturnInst>(Inst))
RetVec.push_back(&Inst);
Value *MaybeMask = nullptr;
bool IsWrite;
unsigned Alignment;
uint64_t TypeSize;
Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
&Alignment, &MaybeMask);
if (Addr || isa<MemIntrinsic>(Inst))
ToInstrument.push_back(&Inst);
}
}
if (!AllocasToInstrument.empty())
Changed |= instrumentStack(AllocasToInstrument, RetVec);
for (auto Inst : ToInstrument)
Changed |= instrumentMemAccess(Inst);
LocalDynamicShadow = nullptr;
return Changed;
}
void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) {
const bool IsAndroid = TargetTriple.isAndroid();
const bool IsAndroidWithIfuncSupport =
IsAndroid && !TargetTriple.isAndroidVersionLT(21);
Scale = kDefaultShadowScale;
if (ClEnableKhwasan || ClInstrumentWithCalls || !IsAndroidWithIfuncSupport)
Offset = 0;
else
Offset = kDynamicShadowSentinel;
if (ClMappingOffset.getNumOccurrences() > 0)
Offset = ClMappingOffset;
InGlobal = IsAndroidWithIfuncSupport;
}
|