summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64
diff options
context:
space:
mode:
authorDavid Green <david.green@arm.com>2018-08-22 11:31:39 +0000
committerDavid Green <david.green@arm.com>2018-08-22 11:31:39 +0000
commit9dd1d451d9719aa91b3bdd59c0c667983e1baf05 (patch)
treef3bc770a3ce8a449b97ddd3b29f37efea905a543 /llvm/lib/Target/AArch64
parentbb8e64e7f5ad448bf04ba84c995b8a7cbf9bb7e4 (diff)
downloadbcm5719-llvm-9dd1d451d9719aa91b3bdd59c0c667983e1baf05.tar.gz
bcm5719-llvm-9dd1d451d9719aa91b3bdd59c0c667983e1baf05.zip
[AArch64] Add Tiny Code Model for AArch64
This adds the plumbing for the Tiny code model for the AArch64 backend. This, instead of loading addresses through the normal ADRP;ADD pair used in the Small model, uses a single ADR. The 21 bit range of an ADR means that the code and its statically defined symbols need to be within 1MB of each other. This makes it mostly interesting for embedded applications where we want to fit as much as we can in as small a space as possible. Differential Revision: https://reviews.llvm.org/D49673 llvm-svn: 340397
Diffstat (limited to 'llvm/lib/Target/AArch64')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp69
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp1
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp32
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h3
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td8
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.cpp4
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetMachine.cpp14
-rw-r--r--llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp33
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp9
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp4
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp2
13 files changed, 138 insertions, 47 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 9226a9dd879..f7190d58fbf 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -835,36 +835,55 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
}
case AArch64::LOADgot: {
- // Expand into ADRP + LDR.
+ MachineFunction *MF = MBB.getParent();
unsigned DstReg = MI.getOperand(0).getReg();
const MachineOperand &MO1 = MI.getOperand(1);
unsigned Flags = MO1.getTargetFlags();
- MachineInstrBuilder MIB1 =
- BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
- MachineInstrBuilder MIB2 =
- BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
- .add(MI.getOperand(0))
- .addReg(DstReg);
-
- if (MO1.isGlobal()) {
- MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
- MIB2.addGlobalAddress(MO1.getGlobal(), 0,
- Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
- } else if (MO1.isSymbol()) {
- MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
- MIB2.addExternalSymbol(MO1.getSymbolName(),
- Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+
+ if (MF->getTarget().getCodeModel() == CodeModel::Tiny) {
+ // Tiny codemodel expand to LDR
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
+ TII->get(AArch64::LDRXl), DstReg);
+
+ if (MO1.isGlobal()) {
+ MIB.addGlobalAddress(MO1.getGlobal(), 0, Flags);
+ } else if (MO1.isSymbol()) {
+ MIB.addExternalSymbol(MO1.getSymbolName(), Flags);
+ } else {
+ assert(MO1.isCPI() &&
+ "Only expect globals, externalsymbols, or constant pools");
+ MIB.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), Flags);
+ }
} else {
- assert(MO1.isCPI() &&
- "Only expect globals, externalsymbols, or constant pools");
- MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
- Flags | AArch64II::MO_PAGE);
- MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
- Flags | AArch64II::MO_PAGEOFF |
- AArch64II::MO_NC);
+ // Small codemodel expand into ADRP + LDR.
+ MachineInstrBuilder MIB1 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
+ MachineInstrBuilder MIB2 =
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
+ .add(MI.getOperand(0))
+ .addReg(DstReg);
+
+ if (MO1.isGlobal()) {
+ MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
+ MIB2.addGlobalAddress(MO1.getGlobal(), 0,
+ Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ } else if (MO1.isSymbol()) {
+ MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
+ MIB2.addExternalSymbol(MO1.getSymbolName(), Flags |
+ AArch64II::MO_PAGEOFF |
+ AArch64II::MO_NC);
+ } else {
+ assert(MO1.isCPI() &&
+ "Only expect globals, externalsymbols, or constant pools");
+ MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
+ Flags | AArch64II::MO_PAGE);
+ MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
+ Flags | AArch64II::MO_PAGEOFF |
+ AArch64II::MO_NC);
+ }
+
+ transferImpOps(MI, MIB1, MIB2);
}
-
- transferImpOps(MI, MIB1, MIB2);
MI.eraseFromParent();
return true;
}
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index deb70e89c67..54914c0a1b3 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -686,6 +686,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
.setMIFlags(MachineInstr::FrameSetup);
switch (MF.getTarget().getCodeModel()) {
+ case CodeModel::Tiny:
case CodeModel::Small:
case CodeModel::Medium:
case CodeModel::Kernel:
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b665debb929..f998d0fd8f3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1086,6 +1086,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::FIRST_NUMBER: break;
case AArch64ISD::CALL: return "AArch64ISD::CALL";
case AArch64ISD::ADRP: return "AArch64ISD::ADRP";
+ case AArch64ISD::ADR: return "AArch64ISD::ADR";
case AArch64ISD::ADDlow: return "AArch64ISD::ADDlow";
case AArch64ISD::LOADgot: return "AArch64ISD::LOADgot";
case AArch64ISD::RET_FLAG: return "AArch64ISD::RET_FLAG";
@@ -3912,6 +3913,17 @@ SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo);
}
+// (adr sym)
+template <class NodeTy>
+SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
+ unsigned Flags) const {
+ LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrTiny\n");
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ SDValue Sym = getTargetNode(N, Ty, DAG, Flags);
+ return DAG.getNode(AArch64ISD::ADR, DL, Ty, Sym);
+}
+
SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
@@ -3926,7 +3938,8 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
"unexpected offset in global node");
- // This also catches the large code model case for Darwin.
+ // This also catches the large code model case for Darwin, and tiny code
+ // model with got relocations.
if ((OpFlags & AArch64II::MO_GOT) != 0) {
return getGOT(GN, DAG, TargetFlags);
}
@@ -3934,6 +3947,8 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
SDValue Result;
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
Result = getAddrLarge(GN, DAG, TargetFlags);
+ } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
+ Result = getAddrTiny(GN, DAG, TargetFlags);
} else {
Result = getAddr(GN, DAG, TargetFlags);
}
@@ -4055,13 +4070,15 @@ SDValue
AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
assert(Subtarget->isTargetELF() && "This function expects an ELF target");
- assert(Subtarget->useSmallAddressing() &&
- "ELF TLS only supported in small memory model");
+ if (getTargetMachine().getCodeModel() == CodeModel::Large)
+ report_fatal_error("ELF TLS only supported in small memory model");
// Different choices can be made for the maximum size of the TLS area for a
// module. For the small address model, the default TLS size is 16MiB and the
// maximum TLS size is 4GiB.
// FIXME: add -mtls-size command line option and make it control the 16MiB
// vs. 4GiB code sequence generation.
+ // FIXME: add tiny codemodel support. We currently generate the same code as
+ // small, which may be larger than needed.
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
@@ -4779,6 +4796,8 @@ SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
if (getTargetMachine().getCodeModel() == CodeModel::Large &&
!Subtarget->isTargetMachO()) {
return getAddrLarge(JT, DAG);
+ } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
+ return getAddrTiny(JT, DAG);
}
return getAddr(JT, DAG);
}
@@ -4793,6 +4812,8 @@ SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
return getGOT(CP, DAG);
}
return getAddrLarge(CP, DAG);
+ } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
+ return getAddrTiny(CP, DAG);
} else {
return getAddr(CP, DAG);
}
@@ -4804,9 +4825,10 @@ SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
if (getTargetMachine().getCodeModel() == CodeModel::Large &&
!Subtarget->isTargetMachO()) {
return getAddrLarge(BA, DAG);
- } else {
- return getAddr(BA, DAG);
+ } else if (getTargetMachine().getCodeModel() == CodeModel::Tiny) {
+ return getAddrTiny(BA, DAG);
}
+ return getAddr(BA, DAG);
}
SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index a6d66aeae04..4470658b2c0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -35,6 +35,7 @@ enum NodeType : unsigned {
// offset of a variable into X0, using the TLSDesc model.
TLSDESC_CALLSEQ,
ADRP, // Page address of a TargetGlobalAddress operand.
+ ADR, // ADR
ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
LOADgot, // Load from automatically generated descriptor (e.g. Global
// Offset Table, TLS record).
@@ -587,6 +588,8 @@ private:
SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
template <class NodeTy>
SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
+ template <class NodeTy>
+ SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 032d53d1962..e17f7c99e47 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1632,6 +1632,9 @@ bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
.addReg(Reg, RegState::Kill)
.addImm(0)
.addMemOperand(*MI.memoperands_begin());
+ } else if (TM.getCodeModel() == CodeModel::Tiny) {
+ BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg)
+ .addGlobalAddress(GV, 0, OpFlags);
} else {
BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
.addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index d89ff41894e..f1ec76bdf7c 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -176,6 +176,7 @@ def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
// Node definitions.
def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
+def AArch64adr : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
@@ -1385,7 +1386,8 @@ def : InstAlias<"cneg $dst, $src, $cc",
//===----------------------------------------------------------------------===//
let isReMaterializable = 1 in {
let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
-def ADR : ADRI<0, "adr", adrlabel, []>;
+def ADR : ADRI<0, "adr", adrlabel,
+ [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
} // hasSideEffects = 0
def ADRP : ADRI<1, "adrp", adrplabel,
@@ -1393,6 +1395,10 @@ def ADRP : ADRI<1, "adrp", adrplabel,
} // isReMaterializable = 1
// page address of a constant pool entry, block address
+def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
+def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
+def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
+def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index b2b500320b5..46b8b4dc648 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -983,6 +983,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
materializeLargeCMVal(I, GV, OpFlags);
I.eraseFromParent();
return true;
+ } else if (TM.getCodeModel() == CodeModel::Tiny) {
+ I.setDesc(TII.get(AArch64::ADR));
+ I.getOperand(1).setTargetFlags(OpFlags);
} else {
I.setDesc(TII.get(AArch64::MOVaddr));
I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
index 04bb90d30d6..be655e3482c 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -204,7 +204,9 @@ AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
// The small code model's direct accesses use ADRP, which cannot
// necessarily produce the value 0 (if the code is above 4GB).
- if (useSmallAddressing() && GV->hasExternalWeakLinkage())
+ // Same for the tiny code model, where we have a pc relative LDR.
+ if ((useSmallAddressing() || TM.getCodeModel() == CodeModel::Tiny) &&
+ GV->hasExternalWeakLinkage())
return AArch64II::MO_GOT | Flags;
return Flags;
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index 120d71381c6..c4b9b45f67b 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -210,14 +210,16 @@ static CodeModel::Model getEffectiveCodeModel(const Triple &TT,
Optional<CodeModel::Model> CM,
bool JIT) {
if (CM) {
- if (*CM != CodeModel::Small && *CM != CodeModel::Large) {
+ if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
+ *CM != CodeModel::Large) {
if (!TT.isOSFuchsia())
report_fatal_error(
- "Only small and large code models are allowed on AArch64");
- else if (CM != CodeModel::Kernel)
- report_fatal_error(
- "Only small, kernel, and large code models are allowed on AArch64");
- }
+ "Only small, tiny and large code models are allowed on AArch64");
+ else if (*CM != CodeModel::Kernel)
+ report_fatal_error("Only small, tiny, kernel, and large code models "
+ "are allowed on AArch64");
+ } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())
+ report_fatal_error("tiny code model is only supported on ELF");
return *CM;
}
// The default MCJIT memory managers make no guarantees about where they can
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 30a9a08f234..e18498c8a4d 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -2453,17 +2453,34 @@ AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
SMLoc S = getLoc();
const MCExpr *Expr;
- const AsmToken &Tok = getParser().getTok();
- if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
- if (getParser().parseExpression(Expr))
- return MatchOperand_ParseFail;
+ // Leave anything with a bracket to the default for SVE
+ if (getParser().getTok().is(AsmToken::LBrac))
+ return MatchOperand_NoMatch;
- SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
- Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
+ if (getParser().getTok().is(AsmToken::Hash))
+ getParser().Lex(); // Eat hash token.
- return MatchOperand_Success;
+ if (parseSymbolicImmVal(Expr))
+ return MatchOperand_ParseFail;
+
+ AArch64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
+ if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
+ ELFRefKind == AArch64MCExpr::VK_INVALID) {
+ // No modifier was specified at all; this is the syntax for an ELF basic
+ // ADR relocation (unfortunately).
+ Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
+ } else {
+ Error(S, "unexpected adr label");
+ return MatchOperand_ParseFail;
+ }
}
- return MatchOperand_NoMatch;
+
+ SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
+ Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
+ return MatchOperand_Success;
}
/// tryParseFPImm - A floating point immediate expression operand.
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 85694655519..37b92d96bf5 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -10,6 +10,7 @@
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/MC/MCAsmBackend.h"
@@ -376,6 +377,14 @@ bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
// to the linker -- a relocation!
if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21)
return true;
+
+ AArch64MCExpr::VariantKind RefKind =
+ static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
+ AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
+ // LDR GOT relocations need a relocation
+ if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_ldr_pcrel_imm19 &&
+ SymLoc == AArch64MCExpr::VK_GOT)
+ return true;
return false;
}
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index 07a35defca7..2ccd7cef8be 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -138,7 +138,9 @@ unsigned AArch64ELFObjectWriter::getRelocType(MCContext &Ctx,
} else
return ELF::R_AARCH64_PREL64;
case AArch64::fixup_aarch64_pcrel_adr_imm21:
- assert(SymLoc == AArch64MCExpr::VK_NONE && "unexpected ADR relocation");
+ if (SymLoc != AArch64MCExpr::VK_ABS)
+ Ctx.reportError(Fixup.getLoc(),
+ "invalid symbol kind for ADR relocation");
return R_CLS(ADR_PREL_LO21);
case AArch64::fixup_aarch64_pcrel_adrp_imm21:
if (SymLoc == AArch64MCExpr::VK_ABS && !IsNC)
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
index cd937935ddb..2d9ccd58089 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp
@@ -62,8 +62,10 @@ StringRef AArch64MCExpr::getVariantKindName() const {
case VK_TLSDESC_LO12: return ":tlsdesc_lo12:";
case VK_ABS_PAGE: return "";
case VK_ABS_PAGE_NC: return ":pg_hi21_nc:";
+ case VK_GOT: return ":got:";
case VK_GOT_PAGE: return ":got:";
case VK_GOT_LO12: return ":got_lo12:";
+ case VK_GOTTPREL: return ":gottprel:";
case VK_GOTTPREL_PAGE: return ":gottprel:";
case VK_GOTTPREL_LO12_NC: return ":gottprel_lo12:";
case VK_GOTTPREL_G1: return ":gottprel_g1:";
OpenPOWER on IntegriCloud