summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2018-02-19 16:02:38 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2018-02-19 16:02:38 +0000
commitc7e51805ff52e84594b0514d5bdf31579434b80c (patch)
tree8b16479bcae90b6f88b97cbdc450ee5a78b8bbdd /llvm
parent9c5ac63785b5aac0c85b57637409869b27983bfe (diff)
downloadbcm5719-llvm-c7e51805ff52e84594b0514d5bdf31579434b80c.tar.gz
bcm5719-llvm-c7e51805ff52e84594b0514d5bdf31579434b80c.zip
Bring back r323297.
It was reverted because it broke the grub build. The reason the grub build broke is because grub does its own relocation processing and was not handing R_386_PLT32. Since grub has no dynamic linker, the fix is trivial: handle R_386_PLT32 exactly like R_386_PC32. On the report it was noted that they are using -fno-integrated-assembler. The upstream GAS (starting with 451875b4f976a527395e9303224c7881b65e12ed) will already be producing a R_386_PLT32 anyway, so they have to update their code one way or the other Original message: Don't assume a null GV is local for ELF and MachO. This is already a simplification, and should help with avoiding a plt reference when calling an intrinsic with -fno-plt. With this change we return false for null GVs, so the caller only needs to check the new metadata to decide if it should use foo@plt or *foo@got. llvm-svn: 325514
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/TargetMachine.cpp21
-rw-r--r--llvm/test/CodeGen/X86/finite-libcalls.ll24
-rw-r--r--llvm/test/CodeGen/X86/fp-intrinsics.ll4
-rw-r--r--llvm/test/CodeGen/X86/half.ll2
-rw-r--r--llvm/test/CodeGen/X86/memset-nonzero.ll2
-rw-r--r--llvm/test/CodeGen/X86/negative-sin.ll6
-rw-r--r--llvm/test/CodeGen/X86/vector-half-conversions.ll2
7 files changed, 34 insertions, 27 deletions
diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp
index ee5b010ecf2..76ec541f8b6 100644
--- a/llvm/lib/Target/TargetMachine.cpp
+++ b/llvm/lib/Target/TargetMachine.cpp
@@ -137,20 +137,27 @@ bool TargetMachine::shouldAssumeDSOLocal(const Module &M,
if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
return true;
+ // If GV is null we know that this is a call to an intrinsic. For ELF and
+ // MachO we don't need to assume those are local since the liker can trivially
+ // convert a call to a PLT to a direct call if the target (in the runtime
+ // library) turns out to be local.
+ if (!GV)
+ return false;
+
// Most PIC code sequences that assume that a symbol is local cannot
// produce a 0 if it turns out the symbol is undefined. While this
// is ABI and relocation depended, it seems worth it to handle it
// here.
- if (GV && isPositionIndependent() && GV->hasExternalWeakLinkage())
+ if (isPositionIndependent() && GV->hasExternalWeakLinkage())
return false;
- if (GV && !GV->hasDefaultVisibility())
+ if (!GV->hasDefaultVisibility())
return true;
if (TT.isOSBinFormatMachO()) {
if (RM == Reloc::Static)
return true;
- return GV && GV->isStrongDefinitionForLinker();
+ return GV->isStrongDefinitionForLinker();
}
assert(TT.isOSBinFormatELF());
@@ -160,19 +167,19 @@ bool TargetMachine::shouldAssumeDSOLocal(const Module &M,
RM == Reloc::Static || M.getPIELevel() != PIELevel::Default;
if (IsExecutable) {
// If the symbol is defined, it cannot be preempted.
- if (GV && !GV->isDeclarationForLinker())
+ if (!GV->isDeclarationForLinker())
return true;
// A symbol marked nonlazybind should not be accessed with a plt. If the
// symbol turns out to be external, the linker will convert a direct
// access to an access via the plt, so don't assume it is local.
- const Function *F = dyn_cast_or_null<Function>(GV);
+ const Function *F = dyn_cast<Function>(GV);
if (F && F->hasFnAttribute(Attribute::NonLazyBind))
return false;
- bool IsTLS = GV && GV->isThreadLocal();
+ bool IsTLS = GV->isThreadLocal();
bool IsAccessViaCopyRelocs =
- Options.MCOptions.MCPIECopyRelocations && GV && isa<GlobalVariable>(GV);
+ Options.MCOptions.MCPIECopyRelocations && isa<GlobalVariable>(GV);
Triple::ArchType Arch = TT.getArch();
bool IsPPC =
Arch == Triple::ppc || Arch == Triple::ppc64 || Arch == Triple::ppc64le;
diff --git a/llvm/test/CodeGen/X86/finite-libcalls.ll b/llvm/test/CodeGen/X86/finite-libcalls.ll
index d4b79acbd7e..f3d336083b2 100644
--- a/llvm/test/CodeGen/X86/finite-libcalls.ll
+++ b/llvm/test/CodeGen/X86/finite-libcalls.ll
@@ -9,7 +9,7 @@
define float @exp_f32(float %x) #0 {
; GNU-LABEL: exp_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __expf_finite # TAILCALL
+; GNU-NEXT: jmp __expf_finite@PLT # TAILCALL
;
; WIN-LABEL: exp_f32:
; WIN: # %bb.0:
@@ -25,7 +25,7 @@ define float @exp_f32(float %x) #0 {
define double @exp_f64(double %x) #0 {
; GNU-LABEL: exp_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __exp_finite # TAILCALL
+; GNU-NEXT: jmp __exp_finite@PLT # TAILCALL
;
; WIN-LABEL: exp_f64:
; WIN: # %bb.0:
@@ -72,7 +72,7 @@ define x86_fp80 @exp_f80(x86_fp80 %x) #0 {
define float @exp2_f32(float %x) #0 {
; GNU-LABEL: exp2_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __exp2f_finite # TAILCALL
+; GNU-NEXT: jmp __exp2f_finite@PLT # TAILCALL
;
; WIN-LABEL: exp2_f32:
; WIN: # %bb.0:
@@ -88,7 +88,7 @@ define float @exp2_f32(float %x) #0 {
define double @exp2_f64(double %x) #0 {
; GNU-LABEL: exp2_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __exp2_finite # TAILCALL
+; GNU-NEXT: jmp __exp2_finite@PLT # TAILCALL
;
; WIN-LABEL: exp2_f64:
; WIN: # %bb.0:
@@ -135,7 +135,7 @@ define x86_fp80 @exp2_f80(x86_fp80 %x) #0 {
define float @log_f32(float %x) #0 {
; GNU-LABEL: log_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __logf_finite # TAILCALL
+; GNU-NEXT: jmp __logf_finite@PLT # TAILCALL
;
; WIN-LABEL: log_f32:
; WIN: # %bb.0:
@@ -151,7 +151,7 @@ define float @log_f32(float %x) #0 {
define double @log_f64(double %x) #0 {
; GNU-LABEL: log_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __log_finite # TAILCALL
+; GNU-NEXT: jmp __log_finite@PLT # TAILCALL
;
; WIN-LABEL: log_f64:
; WIN: # %bb.0:
@@ -198,7 +198,7 @@ define x86_fp80 @log_f80(x86_fp80 %x) #0 {
define float @log2_f32(float %x) #0 {
; GNU-LABEL: log2_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __log2f_finite # TAILCALL
+; GNU-NEXT: jmp __log2f_finite@PLT # TAILCALL
;
; WIN-LABEL: log2_f32:
; WIN: # %bb.0:
@@ -214,7 +214,7 @@ define float @log2_f32(float %x) #0 {
define double @log2_f64(double %x) #0 {
; GNU-LABEL: log2_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __log2_finite # TAILCALL
+; GNU-NEXT: jmp __log2_finite@PLT # TAILCALL
;
; WIN-LABEL: log2_f64:
; WIN: # %bb.0:
@@ -261,7 +261,7 @@ define x86_fp80 @log2_f80(x86_fp80 %x) #0 {
define float @log10_f32(float %x) #0 {
; GNU-LABEL: log10_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __log10f_finite # TAILCALL
+; GNU-NEXT: jmp __log10f_finite@PLT # TAILCALL
;
; WIN-LABEL: log10_f32:
; WIN: # %bb.0:
@@ -277,7 +277,7 @@ define float @log10_f32(float %x) #0 {
define double @log10_f64(double %x) #0 {
; GNU-LABEL: log10_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp __log10_finite # TAILCALL
+; GNU-NEXT: jmp __log10_finite@PLT # TAILCALL
;
; WIN-LABEL: log10_f64:
; WIN: # %bb.0:
@@ -325,7 +325,7 @@ define float @pow_f32(float %x) #0 {
; GNU-LABEL: pow_f32:
; GNU: # %bb.0:
; GNU-NEXT: movaps %xmm0, %xmm1
-; GNU-NEXT: jmp __powf_finite # TAILCALL
+; GNU-NEXT: jmp __powf_finite@PLT # TAILCALL
;
; WIN-LABEL: pow_f32:
; WIN: # %bb.0:
@@ -344,7 +344,7 @@ define double @pow_f64(double %x) #0 {
; GNU-LABEL: pow_f64:
; GNU: # %bb.0:
; GNU-NEXT: movaps %xmm0, %xmm1
-; GNU-NEXT: jmp __pow_finite # TAILCALL
+; GNU-NEXT: jmp __pow_finite@PLT # TAILCALL
;
; WIN-LABEL: pow_f64:
; WIN: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index eae3955adc3..9816875ebad 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -245,7 +245,7 @@ entry:
; Verify that fma(3.5) isn't simplified when the rounding mode is
; unknown.
; CHECK-LABEL: f17
-; FMACALL32: jmp fmaf # TAILCALL
+; FMACALL32: jmp fmaf@PLT # TAILCALL
; FMA32: vfmadd213ss
define float @f17() {
entry:
@@ -261,7 +261,7 @@ entry:
; Verify that fma(42.1) isn't simplified when the rounding mode is
; unknown.
; CHECK-LABEL: f18
-; FMACALL64: jmp fma # TAILCALL
+; FMACALL64: jmp fma@PLT # TAILCALL
; FMA64: vfmadd213sd
define double @f18() {
entry:
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index 896b358c0d9..3bc176f3d8f 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -75,7 +75,7 @@ define float @test_extend32(half* %addr) #0 {
; CHECK-LIBCALL-LABEL: test_extend32:
; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
-; CHECK-LIBCALL-NEXT: jmp __gnu_h2f_ieee # TAILCALL
+; CHECK-LIBCALL-NEXT: jmp __gnu_h2f_ieee@PLT # TAILCALL
;
; BWON-F16C-LABEL: test_extend32:
; BWON-F16C: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/memset-nonzero.ll b/llvm/test/CodeGen/X86/memset-nonzero.ll
index 37b98b40192..e7192b0e136 100644
--- a/llvm/test/CodeGen/X86/memset-nonzero.ll
+++ b/llvm/test/CodeGen/X86/memset-nonzero.ll
@@ -394,7 +394,7 @@ define void @memset_256_nonconst_bytes(i8* %x, i8 %c) {
; SSE-LABEL: memset_256_nonconst_bytes:
; SSE: # %bb.0:
; SSE-NEXT: movl $256, %edx # imm = 0x100
-; SSE-NEXT: jmp memset # TAILCALL
+; SSE-NEXT: jmp memset@PLT # TAILCALL
;
; SSE2FAST-LABEL: memset_256_nonconst_bytes:
; SSE2FAST: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll
index c30cd2741e6..9f00dbb01ff 100644
--- a/llvm/test/CodeGen/X86/negative-sin.ll
+++ b/llvm/test/CodeGen/X86/negative-sin.ll
@@ -28,7 +28,7 @@ define double @strict(double %e) nounwind {
define double @fast(double %e) nounwind {
; CHECK-LABEL: fast:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp sin # TAILCALL
+; CHECK-NEXT: jmp sin@PLT # TAILCALL
%f = fsub fast double 0.0, %e
%g = call double @sin(double %f) readonly
%h = fsub fast double 0.0, %g
@@ -40,7 +40,7 @@ define double @fast(double %e) nounwind {
define double @nsz(double %e) nounwind {
; CHECK-LABEL: nsz:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp sin # TAILCALL
+; CHECK-NEXT: jmp sin@PLT # TAILCALL
%f = fsub nsz double 0.0, %e
%g = call double @sin(double %f) readonly
%h = fsub nsz double 0.0, %g
@@ -88,7 +88,7 @@ define double @semi_strict2(double %e) nounwind {
define double @fn_attr(double %e) nounwind #0 {
; CHECK-LABEL: fn_attr:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp sin # TAILCALL
+; CHECK-NEXT: jmp sin@PLT # TAILCALL
%f = fsub double 0.0, %e
%g = call double @sin(double %f) readonly
%h = fsub double 0.0, %g
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index 94dc5b9e001..51a4dae355e 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -2953,7 +2953,7 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin
define i16 @cvt_f64_to_i16(double %a0) nounwind {
; ALL-LABEL: cvt_f64_to_i16:
; ALL: # %bb.0:
-; ALL-NEXT: jmp __truncdfhf2 # TAILCALL
+; ALL-NEXT: jmp __truncdfhf2@PLT # TAILCALL
%1 = fptrunc double %a0 to half
%2 = bitcast half %1 to i16
ret i16 %2
OpenPOWER on IntegriCloud