summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-06-27 04:16:01 +0000
committerChris Lattner <sabre@nondot.org>2009-06-27 04:16:01 +0000
commitfea81da433b4540df545692ff9223213fe18d594 (patch)
tree5a0d9624120a18156f1cf319fa9c3cb67b895fed /llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
parentdf92e147c925b8c11c913f51b238f67bd99fa374 (diff)
downloadbcm5719-llvm-fea81da433b4540df545692ff9223213fe18d594.tar.gz
bcm5719-llvm-fea81da433b4540df545692ff9223213fe18d594.zip
Reimplement rip-relative addressing in the X86-64 backend. The new
implementation primarily differs from the former in that the asmprinter doesn't make a zillion decisions about whether or not something will be RIP relative or not. Instead, those decisions are made by isel lowering and propagated through to the asm printer. To achieve this, we: 1. Represent RIP relative addresses by setting the base of the X86 addr mode to X86::RIP. 2. When ISel Lowering decides that it is safe to use RIP, it lowers to X86ISD::WrapperRIP. When it is unsafe to use RIP, it lowers to X86ISD::Wrapper as before. 3. This removes isRIPRel from X86ISelAddressMode, representing it with a basereg of RIP instead. 4. The addressing mode matching logic in isel is greatly simplified. 5. The asmprinter is greatly simplified, notably the "NotRIPRel" predicate passed through various printoperand routines is gone now. 6. The various symbol printing routines in asmprinter now no longer infer when to emit (%rip), they just print the symbol. I think this is a big improvement over the previous situation. It does have two small caveats though: 1. I implemented a horrible "no-rip" modifier for the inline asm "P" constraint modifier. This is a short term hack, there is a much better, but more involved, solution. 2. I had to xfail an -aggressive-remat testcase because it isn't handling the use of RIP in the constant-pool reading instruction. This specific test is easy to fix without -aggressive-remat, which I intend to do next. llvm-svn: 74372
Diffstat (limited to 'llvm/lib/Target/X86/X86ISelDAGToDAG.cpp')
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp173
1 files changed, 105 insertions, 68 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 2efe3a2ba7b..1336177de24 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -65,7 +65,6 @@ namespace {
int FrameIndex;
} Base;
- bool isRIPRel; // RIP as base?
unsigned Scale;
SDValue IndexReg;
int32_t Disp;
@@ -78,13 +77,32 @@ namespace {
unsigned char SymbolFlags; // X86II::MO_*
X86ISelAddressMode()
- : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
+ : BaseType(RegBase), Scale(1), IndexReg(), Disp(0),
Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0), SymbolFlags(0) {
}
bool hasSymbolicDisplacement() const {
return GV != 0 || CP != 0 || ES != 0 || JT != -1;
}
+
+ bool hasBaseOrIndexReg() const {
+ return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0;
+ }
+
+ /// isRIPRelative - Return true if this addressing mode is already RIP
+ /// relative.
+ bool isRIPRelative() const {
+ if (BaseType != RegBase) return false;
+ if (RegisterSDNode *RegNode =
+ dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode()))
+ return RegNode->getReg() == X86::RIP;
+ return false;
+ }
+
+ void setBaseReg(SDValue Reg) {
+ BaseType = RegBase;
+ Base.Reg = Reg;
+ }
void dump() {
cerr << "X86ISelAddressMode " << this << "\n";
@@ -92,7 +110,7 @@ namespace {
if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
else cerr << "nul";
cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
- cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n";
+ cerr << " Scale" << Scale << "\n";
cerr << "IndexReg ";
if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
else cerr << "nul";
@@ -685,65 +703,80 @@ bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
return true;
}
+/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
+/// into an addressing mode. These wrap things that will resolve down into a
+/// symbol reference. If no match is possible, this returns true, otherwise it
+/// returns false.
bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
- bool SymbolicAddressesAreRIPRel =
- getTargetMachine().symbolicAddressesAreRIPRel();
- bool is64Bit = Subtarget->is64Bit();
- DOUT << "Wrapper: 64bit " << is64Bit;
- DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
-
- // Under X86-64 non-small code model, GV (and friends) are 64-bits.
- if (is64Bit && (TM.getCodeModel() != CodeModel::Small))
- return true;
-
- // Base and index reg must be 0 in order to use rip as base.
- bool canUsePICRel = !AM.Base.Reg.getNode() && !AM.IndexReg.getNode();
- if (is64Bit && !canUsePICRel && SymbolicAddressesAreRIPRel)
- return true;
-
+ // If the addressing mode already has a symbol as the displacement, we can
+ // never match another symbol.
if (AM.hasSymbolicDisplacement())
return true;
- // If value is available in a register both base and index components have
- // been picked, we can't fit the result available in the register in the
- // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
SDValue N0 = N.getOperand(0);
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
- uint64_t Offset = G->getOffset();
- if (!is64Bit || isInt32(AM.Disp + Offset)) {
- GlobalValue *GV = G->getGlobal();
- bool isRIPRel = SymbolicAddressesAreRIPRel;
- if (N0.getOpcode() == llvm::ISD::TargetGlobalTLSAddress) {
- TLSModel::Model model =
- getTLSModel (GV, TM.getRelocationModel());
- if (is64Bit && model == TLSModel::InitialExec)
- isRIPRel = true;
- }
- AM.GV = GV;
- AM.Disp += Offset;
- AM.isRIPRel = isRIPRel;
+
+ // Handle X86-64 rip-relative addresses. We check this before checking direct
+ // folding because RIP is preferable to non-RIP accesses.
+ if (Subtarget->is64Bit() &&
+ // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
+ // they cannot be folded into immediate fields.
+ // FIXME: This can be improved for kernel and other models?
+ TM.getCodeModel() == CodeModel::Small &&
+
+ // Base and index reg must be 0 in order to use %rip as base and lowering
+ // must allow RIP.
+ !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
+
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
+ int64_t Offset = AM.Disp + G->getOffset();
+ if (!isInt32(Offset)) return true;
+ AM.GV = G->getGlobal();
+ AM.Disp = Offset;
AM.SymbolFlags = G->getTargetFlags();
- return false;
- }
- } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
- uint64_t Offset = CP->getOffset();
- if (!is64Bit || isInt32(AM.Disp + Offset)) {
+ } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
+ int64_t Offset = AM.Disp + CP->getOffset();
+ if (!isInt32(Offset)) return true;
AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment();
- AM.Disp += Offset;
- AM.isRIPRel = SymbolicAddressesAreRIPRel;
+ AM.Disp = Offset;
AM.SymbolFlags = CP->getTargetFlags();
- return false;
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
+ AM.ES = S->getSymbol();
+ AM.SymbolFlags = S->getTargetFlags();
+ } else {
+ JumpTableSDNode *J = cast<JumpTableSDNode>(N0);
+ AM.JT = J->getIndex();
+ AM.SymbolFlags = J->getTargetFlags();
}
- } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
- AM.ES = S->getSymbol();
- AM.isRIPRel = SymbolicAddressesAreRIPRel;
- AM.SymbolFlags = S->getTargetFlags();
+
+ if (N.getOpcode() == X86ISD::WrapperRIP)
+ AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
return false;
- } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
- AM.JT = J->getIndex();
- AM.isRIPRel = SymbolicAddressesAreRIPRel;
- AM.SymbolFlags = J->getTargetFlags();
+ }
+
+ // Handle the case when globals fit in our immediate field: This is true for
+ // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
+ // mode, this results in a non-RIP-relative computation.
+ if (!Subtarget->is64Bit() ||
+ (TM.getCodeModel() == CodeModel::Small &&
+ TM.getRelocationModel() == Reloc::Static)) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
+ AM.GV = G->getGlobal();
+ AM.Disp += G->getOffset();
+ AM.SymbolFlags = G->getTargetFlags();
+ } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
+ AM.CP = CP->getConstVal();
+ AM.Align = CP->getAlignment();
+ AM.Disp += CP->getOffset();
+ AM.SymbolFlags = CP->getTargetFlags();
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
+ AM.ES = S->getSymbol();
+ AM.SymbolFlags = S->getTargetFlags();
+ } else {
+ JumpTableSDNode *J = cast<JumpTableSDNode>(N0);
+ AM.JT = J->getIndex();
+ AM.SymbolFlags = J->getTargetFlags();
+ }
return false;
}
@@ -762,12 +795,19 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
if (Depth > 5)
return MatchAddressBase(N, AM);
+ // If this is already a %rip relative address, we can only merge immediates
+ // into it. Instead of handling this in every case, we handle it here.
// RIP relative addressing: %rip + 32-bit displacement!
- if (AM.isRIPRel) {
- if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
- uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
- if (!is64Bit || isInt32(AM.Disp + Val)) {
- AM.Disp += Val;
+ if (AM.isRIPRelative()) {
+ // FIXME: JumpTable and ExternalSymbol address currently don't like
+ // displacements. It isn't very important, but this should be fixed for
+ // consistency.
+ if (!AM.ES && AM.JT != -1) return true;
+
+ if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
+ int64_t Val = AM.Disp + Cst->getSExtValue();
+ if (isInt32(Val)) {
+ AM.Disp = Val;
return false;
}
}
@@ -791,6 +831,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
break;
case X86ISD::Wrapper:
+ case X86ISD::WrapperRIP:
if (!MatchWrapper(N, AM))
return false;
break;
@@ -810,7 +851,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
break;
case ISD::SHL:
- if (AM.IndexReg.getNode() != 0 || AM.Scale != 1 || AM.isRIPRel)
+ if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
break;
if (ConstantSDNode
@@ -851,8 +892,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
// X*[3,5,9] -> X+X*[2,4,8]
if (AM.BaseType == X86ISelAddressMode::RegBase &&
AM.Base.Reg.getNode() == 0 &&
- AM.IndexReg.getNode() == 0 &&
- !AM.isRIPRel) {
+ AM.IndexReg.getNode() == 0) {
if (ConstantSDNode
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
@@ -901,7 +941,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
break;
}
// Test if the index field is free for use.
- if (AM.IndexReg.getNode() || AM.isRIPRel) {
+ if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
AM = Backup;
break;
}
@@ -972,8 +1012,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
// the add.
if (AM.BaseType == X86ISelAddressMode::RegBase &&
!AM.Base.Reg.getNode() &&
- !AM.IndexReg.getNode() &&
- !AM.isRIPRel) {
+ !AM.IndexReg.getNode()) {
AM.Base.Reg = N.getNode()->getOperand(0);
AM.IndexReg = N.getNode()->getOperand(1);
AM.Scale = 1;
@@ -1012,9 +1051,6 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
// Scale must not be used already.
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
- // Not when RIP is used as the base.
- if (AM.isRIPRel) break;
-
SDValue X = Shift.getOperand(0);
ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
@@ -1136,7 +1172,7 @@ bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
// Is the base register already occupied?
if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
// If so, check to see if the scale index register is set.
- if (AM.IndexReg.getNode() == 0 && !AM.isRIPRel) {
+ if (AM.IndexReg.getNode() == 0) {
AM.IndexReg = N;
AM.Scale = 1;
return false;
@@ -1163,7 +1199,7 @@ bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
if (AvoidDupAddrCompute && !N.hasOneUse()) {
unsigned Opcode = N.getOpcode();
if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex &&
- Opcode != X86ISD::Wrapper) {
+ Opcode != X86ISD::Wrapper && Opcode != X86ISD::WrapperRIP) {
// If we are able to fold N into addressing mode, then we'll allow it even
// if N has multiple uses. In general, addressing computation is used as
// addresses by all of its uses. But watch out for CopyToReg uses, that
@@ -1694,7 +1730,8 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
// If N2 is not Wrapper(decriptor) then the llvm.declare is mangled
// somehow, just ignore it.
- if (N2.getOpcode() != X86ISD::Wrapper) {
+ if (N2.getOpcode() != X86ISD::Wrapper &&
+ N2.getOpcode() != X86ISD::WrapperRIP) {
ReplaceUses(N.getValue(0), Chain);
return NULL;
}
OpenPOWER on IntegriCloud