summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86/X86FastISel.cpp
diff options
context:
space:
mode:
authorJuergen Ributzka <juergen@apple.com>2014-09-03 20:56:59 +0000
committerJuergen Ributzka <juergen@apple.com>2014-09-03 20:56:59 +0000
commit88e32517c4b190df960a9f281f5c958c4143df15 (patch)
treed2dcae44dcc09517d686fd07944f4de6fc435d04 /llvm/lib/Target/X86/X86FastISel.cpp
parent5b8bb4d7dd0aa514b8cd6c7cf89364330c5fa2d5 (diff)
downloadbcm5719-llvm-88e32517c4b190df960a9f281f5c958c4143df15.tar.gz
bcm5719-llvm-88e32517c4b190df960a9f281f5c958c4143df15.zip
[FastISel][tblgen] Rename tblgen generated FastISel functions. NFC.
This is the final round of renaming. This changes tblgen to emit lower-case function names for FastEmitInst_* and FastEmit_*, and updates all its uses in the source code. Reviewed by Eric llvm-svn: 217075
Diffstat (limited to 'llvm/lib/Target/X86/X86FastISel.cpp')
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp50
1 files changed, 25 insertions, 25 deletions
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index de11b8c0445..72c50ba2e70 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -532,7 +532,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
unsigned Src, EVT SrcVT,
unsigned &ResultReg) {
- unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
+ unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
Src, /*TODO: Kill=*/false);
if (RR == 0)
return false;
@@ -1043,7 +1043,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
}
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
ISD::SIGN_EXTEND;
- SrcReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
+ SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
SrcReg, /*TODO: Kill=*/false);
}
@@ -1196,7 +1196,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
ResultReg = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
ResultReg);
- ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
+ ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
X86::sub_8bit);
if (!ResultReg)
return false;
@@ -1314,7 +1314,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
ResultReg)
.addImm(0).addReg(Result32).addImm(X86::sub_32bit);
} else if (DstVT != MVT::i8) {
- ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
+ ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
ResultReg, /*Kill=*/true);
if (ResultReg == 0)
return false;
@@ -1714,7 +1714,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
ResultSuperReg).addReg(SourceSuperReg).addImm(8);
// Now reference the 8-bit subreg of the result.
- ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
+ ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
/*Kill=*/true, X86::sub_8bit);
}
// Copy the result out of the physreg if we haven't already.
@@ -1839,7 +1839,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
return false;
unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
LHSReg, LHSIsKill);
updateValueMap(I, ResultReg);
return true;
@@ -1919,13 +1919,13 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
return false;
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- unsigned CmpReg = FastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
+ unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
CmpRHSReg, CmpRHSIsKill, CC);
- unsigned AndReg = FastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
+ unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
LHSReg, LHSIsKill);
- unsigned AndNReg = FastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
+ unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
RHSReg, RHSIsKill);
- unsigned ResultReg = FastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
+ unsigned ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
AndReg, /*IsKill=*/true);
updateValueMap(I, ResultReg);
return true;
@@ -1990,7 +1990,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
unsigned ResultReg =
- FastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
+ fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
updateValueMap(I, ResultReg);
return true;
}
@@ -2115,7 +2115,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
}
// Issue an extract_subreg.
- unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
+ unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
InputReg, /*Kill=*/true,
X86::sub_8bit);
if (!ResultReg)
@@ -2307,7 +2307,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!isTypeLegal(RetTy, VT))
return false;
- // Unfortunately we can't use FastEmit_r, because the AVX version of FSQRT
+ // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
// is not generated by FastISel yet.
// FIXME: Update this code once tablegen can handle it.
static const unsigned SqrtOpc[2][2] = {
@@ -2425,7 +2425,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
TII.get(Opc[Is64Bit][IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill));
} else
- ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
+ ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
CI->getZExtValue());
}
@@ -2436,7 +2436,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (RHSReg == 0)
return false;
RHSIsKill = hasTrivialKill(RHS);
- ResultReg = FastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
+ ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
RHSIsKill);
}
@@ -2451,7 +2451,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
.addReg(LHSReg, getKillRegState(LHSIsKill));
- ResultReg = FastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
+ ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
} else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
static const unsigned MULOpc[] =
@@ -2462,10 +2462,10 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), X86::AL)
.addReg(LHSReg, getKillRegState(LHSIsKill));
- ResultReg = FastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
+ ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
RHSIsKill);
} else
- ResultReg = FastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
+ ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
}
@@ -2744,7 +2744,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
return false;
ResultReg =
- FastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
+ fastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
if (!ResultReg)
return false;
@@ -2831,7 +2831,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
break;
}
case CCValAssign::BCvt: {
- ArgReg = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
+ ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
/*TODO: Kill=*/false);
assert(ArgReg && "Failed to emit a bitcast!");
ArgVT = VA.getLocVT();
@@ -3108,15 +3108,15 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
uint64_t Imm = CI->getZExtValue();
if (Imm == 0) {
- unsigned SrcReg = FastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
+ unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type");
case MVT::i1:
case MVT::i8:
- return FastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
+ return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
X86::sub_8bit);
case MVT::i16:
- return FastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
+ return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
X86::sub_16bit);
case MVT::i32:
return SrcReg;
@@ -3148,14 +3148,14 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
}
}
if (VT == MVT::i64 && Opc == X86::MOV32ri) {
- unsigned SrcReg = FastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
+ unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
unsigned ResultReg = createResultReg(&X86::GR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
.addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
return ResultReg;
}
- return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
+ return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
}
unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
OpenPOWER on IntegriCloud