summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorDaniel Sanders <daniel_l_sanders@apple.com>2017-10-16 02:15:39 +0000
committerDaniel Sanders <daniel_l_sanders@apple.com>2017-10-16 02:15:39 +0000
commitce72d611af0114d0a6a39f06960d5b75e90a2dd0 (patch)
tree79cb83a650cfe636d36b37ca109dcdc22894be4d /llvm/lib
parent6735ea86cd5dd2b3abbce44a7542c40a588419f4 (diff)
downloadbcm5719-llvm-ce72d611af0114d0a6a39f06960d5b75e90a2dd0.tar.gz
bcm5719-llvm-ce72d611af0114d0a6a39f06960d5b75e90a2dd0.zip
Revert r315885: [globalisel][tblgen] Add support for iPTR and implement am_unscaled* and am_indexed*
MSVC doesn't like one of the constructors. llvm-svn: 315886
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp18
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrFormats.td33
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp125
3 files changed, 0 insertions, 176 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index 2a563c9bf5c..e4ee2bb7cf4 100644
--- a/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -18,7 +18,6 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -99,23 +98,6 @@ bool InstructionSelector::isOperandImmEqual(
return false;
}
-bool InstructionSelector::isBaseWithConstantOffset(
- const MachineOperand &Root, const MachineRegisterInfo &MRI) const {
- if (!Root.isReg())
- return false;
-
- MachineInstr *RootI = MRI.getVRegDef(Root.getReg());
- if (RootI->getOpcode() != TargetOpcode::G_GEP)
- return false;
-
- MachineOperand &RHS = RootI->getOperand(2);
- MachineInstr *RHSI = MRI.getVRegDef(RHS.getReg());
- if (RHSI->getOpcode() != TargetOpcode::G_CONSTANT)
- return false;
-
- return true;
-}
-
bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI) const {
return !MI.mayLoadOrStore() && !MI.hasUnmodeledSideEffects() &&
MI.implicit_operands().begin() == MI.implicit_operands().end();
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 1946d5a14dc..572e018da30 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -2516,22 +2516,6 @@ def am_indexed32 : ComplexPattern<i64, 2, "SelectAddrModeIndexed32", []>;
def am_indexed64 : ComplexPattern<i64, 2, "SelectAddrModeIndexed64", []>;
def am_indexed128 : ComplexPattern<i64, 2, "SelectAddrModeIndexed128", []>;
-def gi_am_indexed8 :
- GIComplexOperandMatcher<s64, "selectAddrModeIndexed<8>">,
- GIComplexPatternEquiv<am_indexed8>;
-def gi_am_indexed16 :
- GIComplexOperandMatcher<s64, "selectAddrModeIndexed<16>">,
- GIComplexPatternEquiv<am_indexed16>;
-def gi_am_indexed32 :
- GIComplexOperandMatcher<s64, "selectAddrModeIndexed<32>">,
- GIComplexPatternEquiv<am_indexed32>;
-def gi_am_indexed64 :
- GIComplexOperandMatcher<s64, "selectAddrModeIndexed<64>">,
- GIComplexPatternEquiv<am_indexed64>;
-def gi_am_indexed128 :
- GIComplexOperandMatcher<s64, "selectAddrModeIndexed<128>">,
- GIComplexPatternEquiv<am_indexed128>;
-
class UImm12OffsetOperand<int Scale> : AsmOperandClass {
let Name = "UImm12Offset" # Scale;
let RenderMethod = "addUImm12OffsetOperands<" # Scale # ">";
@@ -3162,23 +3146,6 @@ def am_unscaled32 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled32", []>;
def am_unscaled64 : ComplexPattern<i64, 2, "SelectAddrModeUnscaled64", []>;
def am_unscaled128 :ComplexPattern<i64, 2, "SelectAddrModeUnscaled128", []>;
-def gi_am_unscaled8 :
- GIComplexOperandMatcher<s64, "selectAddrModeUnscaled8">,
- GIComplexPatternEquiv<am_unscaled8>;
-def gi_am_unscaled16 :
- GIComplexOperandMatcher<s64, "selectAddrModeUnscaled16">,
- GIComplexPatternEquiv<am_unscaled16>;
-def gi_am_unscaled32 :
- GIComplexOperandMatcher<s64, "selectAddrModeUnscaled32">,
- GIComplexPatternEquiv<am_unscaled32>;
-def gi_am_unscaled64 :
- GIComplexOperandMatcher<s64, "selectAddrModeUnscaled64">,
- GIComplexPatternEquiv<am_unscaled64>;
-def gi_am_unscaled128 :
- GIComplexOperandMatcher<s64, "selectAddrModeUnscaled128">,
- GIComplexPatternEquiv<am_unscaled128>;
-
-
class BaseLoadStoreUnscale<bits<2> sz, bit V, bits<2> opc, dag oops, dag iops,
string asm, list<dag> pattern>
: I<oops, iops, asm, "\t$Rt, [$Rn, $offset]", "", pattern> {
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index 622bf995147..ca93d1feaa6 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -66,32 +66,6 @@ private:
ComplexRendererFn selectArithImmed(MachineOperand &Root) const;
- ComplexRendererFn selectAddrModeUnscaled(MachineOperand &Root,
- unsigned Size) const;
-
- ComplexRendererFn selectAddrModeUnscaled8(MachineOperand &Root) const {
- return selectAddrModeUnscaled(Root, 1);
- }
- ComplexRendererFn selectAddrModeUnscaled16(MachineOperand &Root) const {
- return selectAddrModeUnscaled(Root, 2);
- }
- ComplexRendererFn selectAddrModeUnscaled32(MachineOperand &Root) const {
- return selectAddrModeUnscaled(Root, 4);
- }
- ComplexRendererFn selectAddrModeUnscaled64(MachineOperand &Root) const {
- return selectAddrModeUnscaled(Root, 8);
- }
- ComplexRendererFn selectAddrModeUnscaled128(MachineOperand &Root) const {
- return selectAddrModeUnscaled(Root, 16);
- }
-
- ComplexRendererFn selectAddrModeIndexed(MachineOperand &Root,
- unsigned Size) const;
- template <int Width>
- ComplexRendererFn selectAddrModeIndexed(MachineOperand &Root) const {
- return selectAddrModeIndexed(Root, Width / 8);
- }
-
const AArch64TargetMachine &TM;
const AArch64Subtarget &STI;
const AArch64InstrInfo &TII;
@@ -1418,105 +1392,6 @@ AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
}};
}
-/// Select a "register plus unscaled signed 9-bit immediate" address. This
-/// should only match when there is an offset that is not valid for a scaled
-/// immediate addressing mode. The "Size" argument is the size in bytes of the
-/// memory reference, which is needed here to know what is valid for a scaled
-/// immediate.
-InstructionSelector::ComplexRendererFn
-AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
- unsigned Size) const {
- MachineRegisterInfo &MRI =
- Root.getParent()->getParent()->getParent()->getRegInfo();
-
- if (!Root.isReg())
- return None;
-
- if (!isBaseWithConstantOffset(Root, MRI))
- return None;
-
- MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
- if (!RootDef)
- return None;
-
- MachineOperand &OffImm = RootDef->getOperand(2);
- if (!OffImm.isReg())
- return None;
- MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
- if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
- return None;
- int64_t RHSC;
- MachineOperand &RHSOp1 = RHS->getOperand(1);
- if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
- return None;
- RHSC = RHSOp1.getCImm()->getSExtValue();
-
- // If the offset is valid as a scaled immediate, don't match here.
- if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
- return None;
- if (RHSC >= -256 && RHSC < 256) {
- MachineOperand &Base = RootDef->getOperand(1);
- return {{
- [=](MachineInstrBuilder &MIB) { MIB.add(Base); },
- [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
- }};
- }
- return None;
-}
-
-/// Select a "register plus scaled unsigned 12-bit immediate" address. The
-/// "Size" argument is the size in bytes of the memory reference, which
-/// determines the scale.
-InstructionSelector::ComplexRendererFn
-AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
- unsigned Size) const {
- MachineRegisterInfo &MRI =
- Root.getParent()->getParent()->getParent()->getRegInfo();
-
- if (!Root.isReg())
- return None;
-
- MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
- if (!RootDef)
- return None;
-
- if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
- return {{
- [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
- [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
- }};
- }
-
- if (isBaseWithConstantOffset(Root, MRI)) {
- MachineOperand &LHS = RootDef->getOperand(1);
- MachineOperand &RHS = RootDef->getOperand(2);
- MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
- MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
- if (LHSDef && RHSDef) {
- int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
- unsigned Scale = Log2_32(Size);
- if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
- if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
- LHSDef = MRI.getVRegDef(LHSDef->getOperand(1).getReg());
- return {{
- [=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
- [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
- }};
- }
- }
- }
-
- // Before falling back to our general case, check if the unscaled
- // instructions can handle this. If so, that's preferable.
- if (selectAddrModeUnscaled(Root, Size).hasValue())
- return None;
-
- return {{
- [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
- [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
- }};
-}
-
namespace llvm {
InstructionSelector *
createAArch64InstructionSelector(const AArch64TargetMachine &TM,
OpenPOWER on IntegriCloud