summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorAsiri Rathnayake <asiri.rathnayake@arm.com>2014-09-10 13:54:38 +0000
committerAsiri Rathnayake <asiri.rathnayake@arm.com>2014-09-10 13:54:38 +0000
commit369c03063376df9d24d521b0edd4ee0e72360e4d (patch)
tree935ecf715f8d16b037c8a199a890a4d3d51021e2 /llvm
parent3967764b98cbbea1c4a72f8767fc988955782085 (diff)
downloadbcm5719-llvm-369c03063376df9d24d521b0edd4ee0e72360e4d.tar.gz
bcm5719-llvm-369c03063376df9d24d521b0edd4ee0e72360e4d.zip
[AArch 64] Use a constant pool load for weak symbol references when
using static relocation model and small code model. Summary: currently we generate GOT based relocations for weak symbol references regardless of the underlying relocation model. This should be change so that in static relocation model we use a constant pool load instead. Patch from: Keith Walker Reviewers: Renato Golin, Tim Northover llvm-svn: 217503
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp22
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.cpp13
-rw-r--r--llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h7
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-extern-weak.ll19
-rw-r--r--llvm/test/CodeGen/AArch64/extern-weak.ll19
6 files changed, 73 insertions, 10 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index ca8f9a5bfee..aa2860c6557 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -372,6 +372,9 @@ unsigned AArch64FastISel::AArch64MaterializeGV(const GlobalValue *GV) {
.addReg(ADRPReg)
.addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF |
AArch64II::MO_NC);
+ } else if (OpFlags & AArch64II::MO_CONSTPOOL) {
+ // We can't handle addresses loaded from a constant pool quickly yet.
+ return 0;
} else {
// ADRP + ADDX
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index db00f8dc84a..91629d30444 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2711,7 +2711,8 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy();
SDLoc DL(Op);
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
+ const GlobalValue *GV = GN->getGlobal();
unsigned char OpFlags =
Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
@@ -2726,6 +2727,25 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
return DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, GotAddr);
}
+ if ((OpFlags & AArch64II::MO_CONSTPOOL) != 0) {
+ assert(getTargetMachine().getCodeModel() == CodeModel::Small &&
+ "use of MO_CONSTPOOL only supported on small model");
+ SDValue Hi = DAG.getTargetConstantPool(GV, PtrVT, 0, 0, AArch64II::MO_PAGE);
+ SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
+ unsigned char LoFlags = AArch64II::MO_PAGEOFF | AArch64II::MO_NC;
+ SDValue Lo = DAG.getTargetConstantPool(GV, PtrVT, 0, 0, LoFlags);
+ SDValue PoolAddr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
+ SDValue GlobalAddr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), PoolAddr,
+ MachinePointerInfo::getConstantPool(),
+ /*isVolatile=*/ false,
+ /*isNonTemporal=*/ true,
+ /*isInvariant=*/ true, 8);
+ if (GN->getOffset() != 0)
+ return DAG.getNode(ISD::ADD, DL, PtrVT, GlobalAddr,
+ DAG.getConstant(GN->getOffset(), PtrVT));
+ return GlobalAddr;
+ }
+
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
const unsigned char MO_NC = AArch64II::MO_NC;
return DAG.getNode(
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index bb0b72c585b..641a67fe629 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -78,10 +78,15 @@ AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
return AArch64II::MO_GOT;
// The small code mode's direct accesses use ADRP, which cannot necessarily
- // produce the value 0 (if the code is above 4GB). Therefore they must use the
- // GOT.
- if (TM.getCodeModel() == CodeModel::Small && GV->isWeakForLinker() && isDecl)
- return AArch64II::MO_GOT;
+ // produce the value 0 (if the code is above 4GB).
+ if (TM.getCodeModel() == CodeModel::Small &&
+ GV->isWeakForLinker() && isDecl) {
+ // In PIC mode use the GOT, but in absolute mode use a constant pool load.
+ if (TM.getRelocationModel() == Reloc::Static)
+ return AArch64II::MO_CONSTPOOL;
+ else
+ return AArch64II::MO_GOT;
+ }
// If symbol visibility is hidden, the extra load is not needed if
// the symbol is definitely defined in the current translation unit.
diff --git a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
index 3d030a5042a..326eb9995d7 100644
--- a/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
+++ b/llvm/lib/Target/AArch64/Utils/AArch64BaseInfo.h
@@ -1271,7 +1271,12 @@ namespace AArch64II {
/// thread-local symbol. On Darwin, only one type of thread-local access
/// exists (pre linker-relaxation), but on ELF the TLSModel used for the
/// referee will affect interpretation.
- MO_TLS = 0x20
+ MO_TLS = 0x20,
+
+ /// MO_CONSTPOOL - This flag indicates that a symbol operand represents
+ /// the address of a constant pool entry for the symbol, rather than the
+ /// address of the symbol itself.
+ MO_CONSTPOOL = 0x40
};
} // end namespace AArch64II
diff --git a/llvm/test/CodeGen/AArch64/arm64-extern-weak.ll b/llvm/test/CodeGen/AArch64/arm64-extern-weak.ll
index a239403befa..06bd9270ba4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-extern-weak.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-extern-weak.ll
@@ -1,16 +1,23 @@
-; RUN: llc -mtriple=arm64-none-linux-gnu -o - < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=pic -o - < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-none-linux-gnu -relocation-model=static -o - < %s | FileCheck --check-prefix=CHECK-STATIC %s
; RUN: llc -mtriple=arm64-none-linux-gnu -code-model=large -o - < %s | FileCheck --check-prefix=CHECK-LARGE %s
declare extern_weak i32 @var()
define i32()* @foo() {
; The usual ADRP/ADD pair can't be used for a weak reference because it must
-; evaluate to 0 if the symbol is undefined. We use a litpool entry.
+; evaluate to 0 if the symbol is undefined. We use a GOT entry for PIC
+; otherwise a litpool entry.
ret i32()* @var
; CHECK: adrp x[[VAR:[0-9]+]], :got:var
; CHECK: ldr x0, [x[[VAR]], :got_lo12:var]
+; CHECK-STATIC: .LCPI0_0:
+; CHECK-STATIC-NEXT: .xword var
+; CHECK-STATIC: adrp x[[VAR:[0-9]+]], .LCPI0_0
+; CHECK-STATIC: ldr x0, [x[[VAR]], :lo12:.LCPI0_0]
+
; In the large model, the usual relocations are absolute and can
; materialise 0.
; CHECK-LARGE: movz x0, #:abs_g3:var
@@ -29,6 +36,11 @@ define i32* @bar() {
; CHECK: add x0, [[ARR_VAR]], #20
ret i32* %addr
+; CHECK-STATIC: .LCPI1_0:
+; CHECK-STATIC-NEXT: .xword arr_var
+; CHECK-STATIC: ldr [[BASE:x[0-9]+]], [{{x[0-9]+}}, :lo12:.LCPI1_0]
+; CHECK-STATIC: add x0, [[BASE]], #20
+
; In the large model, the usual relocations are absolute and can
; materialise 0.
; CHECK-LARGE: movz [[ARR_VAR:x[0-9]+]], #:abs_g3:arr_var
@@ -44,6 +56,9 @@ define i32* @wibble() {
; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
; CHECK: add x0, [[BASE]], :lo12:defined_weak_var
+; CHECK-STATIC: adrp [[BASE:x[0-9]+]], defined_weak_var
+; CHECK-STATIC: add x0, [[BASE]], :lo12:defined_weak_var
+
; CHECK-LARGE: movz x0, #:abs_g3:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var
diff --git a/llvm/test/CodeGen/AArch64/extern-weak.ll b/llvm/test/CodeGen/AArch64/extern-weak.ll
index ce5c0f68661..f647c4bcda5 100644
--- a/llvm/test/CodeGen/AArch64/extern-weak.ll
+++ b/llvm/test/CodeGen/AArch64/extern-weak.ll
@@ -1,17 +1,24 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -o - %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=static -o - < %s | FileCheck --check-prefix=CHECK-STATIC %s
; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large -o - %s | FileCheck --check-prefix=CHECK-LARGE %s
declare extern_weak i32 @var()
define i32()* @foo() {
; The usual ADRP/ADD pair can't be used for a weak reference because it must
-; evaluate to 0 if the symbol is undefined. We use a litpool entry.
+; evaluate to 0 if the symbol is undefined. We use a GOT entry for PIC
+; otherwise a litpool entry.
ret i32()* @var
; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:var
; CHECK: ldr x0, [x[[ADDRHI]], :got_lo12:var]
+; CHECK-STATIC: .LCPI0_0:
+; CHECK-STATIC-NEXT: .xword var
+; CHECK-STATIC: adrp x[[VAR:[0-9]+]], .LCPI0_0
+; CHECK-STATIC: ldr x0, [x[[VAR]], :lo12:.LCPI0_0]
+
; In the large model, the usual relocations are absolute and can
; materialise 0.
; CHECK-LARGE: movz x0, #:abs_g3:var
@@ -31,6 +38,11 @@ define i32* @bar() {
; CHECK: ldr [[BASE:x[0-9]+]], [x[[ADDRHI]], :got_lo12:arr_var]
; CHECK: add x0, [[BASE]], #20
+; CHECK-STATIC: .LCPI1_0:
+; CHECK-STATIC-NEXT: .xword arr_var
+; CHECK-STATIC: ldr [[BASE:x[0-9]+]], [{{x[0-9]+}}, :lo12:.LCPI1_0]
+; CHECK-STATIC: add x0, [[BASE]], #20
+
ret i32* %addr
; In the large model, the usual relocations are absolute and can
@@ -49,6 +61,9 @@ define i32* @wibble() {
; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
; CHECK: add x0, [[BASE]], :lo12:defined_weak_var
+; CHECK-STATIC: adrp [[BASE:x[0-9]+]], defined_weak_var
+; CHECK-STATIC: add x0, [[BASE]], :lo12:defined_weak_var
+
; CHECK-LARGE: movz x0, #:abs_g3:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var
OpenPOWER on IntegriCloud