summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKuba Brecka <kuba.brecka@gmail.com>2016-11-07 19:09:56 +0000
committerKuba Brecka <kuba.brecka@gmail.com>2016-11-07 19:09:56 +0000
commit44e875ad5b2ce26826dd53f9e7d1a71436c86212 (patch)
tree3c8e1a9bd8db587858b8f30033a92cbb913fb139
parentf530e8b3f09dc89d8b0c9952abdbe7667e65bfcd (diff)
downloadbcm5719-llvm-44e875ad5b2ce26826dd53f9e7d1a71436c86212.tar.gz
bcm5719-llvm-44e875ad5b2ce26826dd53f9e7d1a71436c86212.zip
[tsan] Cast floating-point types correctly when instrumenting atomic accesses, LLVM part
Although rare, atomic accesses to floating-point types seem to be valid, i.e. `%a = load atomic float ...`. The TSan instrumentation pass however tries to emit inttoptr, which is incorrect, we should use a bitcast here. Anyway, IRBuilder already has a convenient helper function for this. Differential Revision: https://reviews.llvm.org/D26266 llvm-svn: 286135
-rw-r--r--llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp23
-rw-r--r--llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll51
2 files changed, 57 insertions, 17 deletions
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 41d0b53672d..355f69c9aae 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -549,11 +549,6 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
return false;
}
-static Value *createIntOrPtrToIntCast(Value *V, Type* Ty, IRBuilder<> &IRB) {
- return isa<PointerType>(V->getType()) ?
- IRB.CreatePtrToInt(V, Ty) : IRB.CreateIntCast(V, Ty, false);
-}
-
// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
// standards. For background see C++11 standard. A slightly older, publicly
// available draft of the standard (not entirely up-to-date, but close enough
@@ -576,15 +571,9 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
createOrdering(&IRB, LI->getOrdering())};
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
- if (Ty == OrigTy) {
- Instruction *C = CallInst::Create(TsanAtomicLoad[Idx], Args);
- ReplaceInstWithInst(I, C);
- } else {
- // We are loading a pointer, so we need to cast the return value.
- Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
- Instruction *Cast = CastInst::Create(Instruction::IntToPtr, C, OrigTy);
- ReplaceInstWithInst(I, Cast);
- }
+ Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
+ Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
+ I->replaceAllUsesWith(Cast);
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Addr = SI->getPointerOperand();
int Idx = getMemoryAccessFuncIndex(Addr, DL);
@@ -595,7 +584,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
- createIntOrPtrToIntCast(SI->getValueOperand(), Ty, IRB),
+ IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
createOrdering(&IRB, SI->getOrdering())};
CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
ReplaceInstWithInst(I, C);
@@ -626,9 +615,9 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
Value *CmpOperand =
- createIntOrPtrToIntCast(CASI->getCompareOperand(), Ty, IRB);
+ IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
Value *NewOperand =
- createIntOrPtrToIntCast(CASI->getNewValOperand(), Ty, IRB);
+ IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
CmpOperand,
NewOperand,
diff --git a/llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll b/llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll
new file mode 100644
index 00000000000..535d5ae9cd5
--- /dev/null
+++ b/llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -tsan -S | FileCheck %s
+; Check that atomic memory operations on floating-point types are converted to calls into ThreadSanitizer runtime.
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+define float @load_float(float* %fptr) {
+ %v = load atomic float, float* %fptr unordered, align 4
+ ret float %v
+ ; CHECK-LABEL: load_float
+ ; CHECK: call i32 @__tsan_atomic32_load(i32* %{{.+}}, i32 0)
+ ; CHECK: bitcast i32 {{.+}} to float
+}
+
+define double @load_double(double* %fptr) {
+ %v = load atomic double, double* %fptr unordered, align 8
+ ret double %v
+ ; CHECK-LABEL: load_double
+ ; CHECK: call i64 @__tsan_atomic64_load(i64* %{{.+}}, i32 0)
+ ; CHECK: bitcast i64 {{.+}} to double
+}
+
+define fp128 @load_fp128(fp128* %fptr) {
+ %v = load atomic fp128, fp128* %fptr unordered, align 16
+ ret fp128 %v
+ ; CHECK-LABEL: load_fp128
+ ; CHECK: call i128 @__tsan_atomic128_load(i128* %{{.+}}, i32 0)
+ ; CHECK: bitcast i128 {{.+}} to fp128
+}
+
+define void @store_float(float* %fptr, float %v) {
+ store atomic float %v, float* %fptr unordered, align 4
+ ret void
+ ; CHECK-LABEL: store_float
+ ; CHECK: bitcast float %v to i32
+ ; CHECK: call void @__tsan_atomic32_store(i32* %{{.+}}, i32 %{{.+}}, i32 0)
+}
+
+define void @store_double(double* %fptr, double %v) {
+ store atomic double %v, double* %fptr unordered, align 8
+ ret void
+ ; CHECK-LABEL: store_double
+ ; CHECK: bitcast double %v to i64
+ ; CHECK: call void @__tsan_atomic64_store(i64* %{{.+}}, i64 %{{.+}}, i32 0)
+}
+
+define void @store_fp128(fp128* %fptr, fp128 %v) {
+ store atomic fp128 %v, fp128* %fptr unordered, align 16
+ ret void
+ ; CHECK-LABEL: store_fp128
+ ; CHECK: bitcast fp128 %v to i128
+ ; CHECK: call void @__tsan_atomic128_store(i128* %{{.+}}, i128 %{{.+}}, i32 0)
+}
OpenPOWER on IntegriCloud