summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/docs/LangRef.rst27
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td3
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAArch64.td3
-rw-r--r--llvm/include/llvm/IR/IntrinsicsARM.td3
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp10
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp6
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp2
-rw-r--r--llvm/test/Assembler/autoupgrade-thread-pointer.ll19
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll4
-rw-r--r--llvm/test/CodeGen/ARM/thread_pointer.ll4
-rw-r--r--llvm/test/Transforms/SafeStack/AArch64/abi.ll2
-rw-r--r--llvm/test/Transforms/SafeStack/AArch64/abi_ssp.ll6
12 files changed, 71 insertions, 18 deletions
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 8b0bedcd534..e8bbcc535f6 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -9634,6 +9634,33 @@ pass will generate the appropriate data structures and replace the
``llvm.instrprof_value_profile`` intrinsic with the call to the profile
runtime library with proper arguments.
+'``llvm.thread.pointer``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare i8* @llvm.thread.pointer()
+
+Overview:
+"""""""""
+
+The '``llvm.thread.pointer``' intrinsic returns the value of the thread
+pointer.
+
+Semantics:
+""""""""""
+
+The '``llvm.thread.pointer``' intrinsic returns a pointer to the TLS area
+for the current thread. The exact semantics of this value are target
+specific: it may point to the start of TLS area, to the end, or somewhere
+in the middle. Depending on the target, this intrinsic may read a register,
+call a helper function, read from an alternate memory space, or perform
+other operations necessary to locate the TLS area. Not all targets support
+this intrinsic.
+
Standard C Library Intrinsics
-----------------------------
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index f26fefc4177..b67dfc4a1fc 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -306,6 +306,9 @@ def int_stackrestore : Intrinsic<[], [llvm_ptr_ty]>,
def int_get_dynamic_area_offset : Intrinsic<[llvm_anyint_ty]>;
+def int_thread_pointer : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__builtin_thread_pointer">;
+
// IntrReadWriteArgMem is more pessimistic than strictly necessary for prefetch,
// however it does conveniently prevent the prefetch from being reordered
// with respect to nearby accesses to the same memory.
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 5489604565a..3bbc91a39c8 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -13,9 +13,6 @@
let TargetPrefix = "aarch64" in {
-def int_aarch64_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
- Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
-
def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index 626d99be9c4..9fd458102fc 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -17,9 +17,6 @@
let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
-def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
- Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
-
// A space-consuming intrinsic primarily for testing ARMConstantIslands. The
// first argument is the number of bytes this "instruction" takes up, the second
// and return value are essentially chains, used to force ordering during ISel.
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 12e269ab902..41d59b372ca 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -126,6 +126,10 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
StoreLaneInts[fArgs.size() - 5], Tys);
return true;
}
+ if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
+ return true;
+ }
break;
}
@@ -799,6 +803,12 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
CI->eraseFromParent();
return;
}
+
+ case Intrinsic::thread_pointer: {
+ CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {}));
+ CI->eraseFromParent();
+ return;
+ }
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index bfa07cc854c..743a57b36de 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2326,7 +2326,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDLoc dl(Op);
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
- case Intrinsic::aarch64_thread_pointer: {
+ case Intrinsic::thread_pointer: {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
}
@@ -10265,7 +10265,7 @@ Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
const unsigned TlsOffset = 0x28;
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_thread_pointer);
+ Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreatePointerCast(
IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), TlsOffset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));
@@ -10281,7 +10281,7 @@ Value *AArch64TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) cons
const unsigned TlsOffset = 0x48;
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_thread_pointer);
+ Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreatePointerCast(
IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), TlsOffset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 9d210375c12..f240dca1b07 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2929,7 +2929,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
"RBIT intrinsic must have i32 type!");
return DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Op.getOperand(1));
}
- case Intrinsic::arm_thread_pointer: {
+ case Intrinsic::thread_pointer: {
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
}
diff --git a/llvm/test/Assembler/autoupgrade-thread-pointer.ll b/llvm/test/Assembler/autoupgrade-thread-pointer.ll
new file mode 100644
index 00000000000..a96829d9b2e
--- /dev/null
+++ b/llvm/test/Assembler/autoupgrade-thread-pointer.ll
@@ -0,0 +1,19 @@
+; Test autoupgrade of arch-specific thread pointer intrinsics
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+declare i8* @llvm.aarch64.thread.pointer()
+declare i8* @llvm.arm.thread.pointer()
+
+define i8* @test1() {
+; CHECK: test1()
+; CHECK: call i8* @llvm.thread.pointer()
+ %1 = call i8* @llvm.aarch64.thread.pointer()
+ ret i8 *%1
+}
+
+define i8* @test2() {
+; CHECK: test2()
+; CHECK: call i8* @llvm.thread.pointer()
+ %1 = call i8* @llvm.arm.thread.pointer()
+ ret i8 *%1
+}
diff --git a/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll b/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll
index 34fa1b47156..6caf3a2a18e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll
@@ -1,11 +1,11 @@
; RUN: llc < %s -march=aarch64 -mtriple=aarch64-linux-gnu | FileCheck %s
; Function Attrs: nounwind readnone
-declare i8* @llvm.aarch64.thread.pointer() #1
+declare i8* @llvm.thread.pointer() #1
define i8* @thread_pointer() {
; CHECK: thread_pointer:
; CHECK: mrs {{x[0-9]+}}, TPIDR_EL0
- %1 = tail call i8* @llvm.aarch64.thread.pointer()
+ %1 = tail call i8* @llvm.thread.pointer()
ret i8* %1
}
diff --git a/llvm/test/CodeGen/ARM/thread_pointer.ll b/llvm/test/CodeGen/ARM/thread_pointer.ll
index c403fa5c4a2..fe1d3a4dfd0 100644
--- a/llvm/test/CodeGen/ARM/thread_pointer.ll
+++ b/llvm/test/CodeGen/ARM/thread_pointer.ll
@@ -3,8 +3,8 @@
define i8* @test() {
entry:
- %tmp1 = call i8* @llvm.arm.thread.pointer( ) ; <i8*> [#uses=0]
+ %tmp1 = call i8* @llvm.thread.pointer( ) ; <i8*> [#uses=0]
ret i8* %tmp1
}
-declare i8* @llvm.arm.thread.pointer()
+declare i8* @llvm.thread.pointer()
diff --git a/llvm/test/Transforms/SafeStack/AArch64/abi.ll b/llvm/test/Transforms/SafeStack/AArch64/abi.ll
index cdec923eb74..bd6710d160c 100644
--- a/llvm/test/Transforms/SafeStack/AArch64/abi.ll
+++ b/llvm/test/Transforms/SafeStack/AArch64/abi.ll
@@ -3,7 +3,7 @@
define void @foo() nounwind uwtable safestack {
entry:
-; CHECK: %[[TP:.*]] = call i8* @llvm.aarch64.thread.pointer()
+; CHECK: %[[TP:.*]] = call i8* @llvm.thread.pointer()
; CHECK: %[[SPA0:.*]] = getelementptr i8, i8* %[[TP]], i32 72
; CHECK: %[[SPA:.*]] = bitcast i8* %[[SPA0]] to i8**
; CHECK: %[[USP:.*]] = load i8*, i8** %[[SPA]]
diff --git a/llvm/test/Transforms/SafeStack/AArch64/abi_ssp.ll b/llvm/test/Transforms/SafeStack/AArch64/abi_ssp.ll
index da0f5e0a80a..5d584d0a76b 100644
--- a/llvm/test/Transforms/SafeStack/AArch64/abi_ssp.ll
+++ b/llvm/test/Transforms/SafeStack/AArch64/abi_ssp.ll
@@ -3,10 +3,10 @@
define void @foo() nounwind uwtable safestack sspreq {
entry:
-; The first @llvm.aarch64.thread.pointer is for the unsafe stack pointer, skip it.
-; TLS: call i8* @llvm.aarch64.thread.pointer()
+; The first @llvm.thread.pointer is for the unsafe stack pointer, skip it.
+; TLS: call i8* @llvm.thread.pointer()
-; TLS: %[[TP2:.*]] = call i8* @llvm.aarch64.thread.pointer()
+; TLS: %[[TP2:.*]] = call i8* @llvm.thread.pointer()
; TLS: %[[B:.*]] = getelementptr i8, i8* %[[TP2]], i32 40
; TLS: %[[C:.*]] = bitcast i8* %[[B]] to i8**
; TLS: %[[StackGuard:.*]] = load i8*, i8** %[[C]]
OpenPOWER on IntegriCloud