summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp66
-rw-r--r--llvm/lib/Target/ARM/ARMFastISel.cpp30
-rw-r--r--llvm/lib/Target/Mips/MipsFastISel.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCFastISel.cpp22
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp50
5 files changed, 85 insertions, 85 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index f7d9af2f76a..b6f3773d65e 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -281,7 +281,7 @@ unsigned AArch64FastISel::AArch64MaterializeInt(const ConstantInt *CI, MVT VT) {
return 0;
if (!CI->isZero())
- return FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ return fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
// Create a copy from the zero register to materialize a "0" value.
const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass
@@ -311,7 +311,7 @@ unsigned AArch64FastISel::AArch64MaterializeFP(const ConstantFP *CFP, MVT VT) {
Is64Bit ? AArch64_AM::getFP64Imm(Val) : AArch64_AM::getFP32Imm(Val);
assert((Imm != -1) && "Cannot encode floating-point constant.");
unsigned Opc = Is64Bit ? AArch64::FMOVDi : AArch64::FMOVSi;
- return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
+ return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
}
// Materialize via constant pool. MachineConstantPool wants an explicit
@@ -411,7 +411,7 @@ unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) {
bool Is64Bit = (VT == MVT::f64);
unsigned ZReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
unsigned Opc = Is64Bit ? AArch64::FMOVXDr : AArch64::FMOVWSr;
- return FastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
+ return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true);
}
// Computes the address to get to an object.
@@ -796,10 +796,10 @@ bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT) {
if (ImmediateOffsetNeedsLowering) {
unsigned ResultReg = 0;
if (Addr.getReg())
- ResultReg = FastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
+ ResultReg = fastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(),
/*IsKill=*/false, Offset, MVT::i64);
else
- ResultReg = FastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
+ ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset);
if (!ResultReg)
return false;
@@ -1246,7 +1246,7 @@ unsigned AArch64FastISel::emitAND_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
if (!AArch64_AM::isLogicalImmediate(Imm, RegSize))
return 0;
- return FastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
+ return fastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill,
AArch64_AM::encodeLogicalImmediate(Imm, RegSize));
}
@@ -1571,7 +1571,7 @@ bool AArch64FastISel::SelectBranch(const Instruction *I) {
// Issue an extract_subreg to get the lower 32-bits.
if (SrcVT == MVT::i64) {
- CondReg = FastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
+ CondReg = fastEmitInst_extractsubreg(MVT::i32, CondReg, CondIsKill,
AArch64::sub_32);
CondIsKill = true;
}
@@ -1764,7 +1764,7 @@ bool AArch64FastISel::SelectSelect(const Instruction *I) {
if (!TrueReg || !FalseReg)
return false;
- unsigned ResultReg = FastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
+ unsigned ResultReg = fastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
FalseReg, FalseIsKill, CC);
updateValueMap(I, ResultReg);
return true;
@@ -1872,7 +1872,7 @@ bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
}
- unsigned ResultReg = FastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
+ unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
SrcIsKill);
updateValueMap(I, ResultReg);
return true;
@@ -2368,7 +2368,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
unsigned DestReg;
unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
while (Depth--) {
- DestReg = FastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
+ DestReg = fastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
SrcReg, /*IsKill=*/true, 0);
assert(DestReg && "Unexpected LDR instruction emission failure.");
SrcReg = DestReg;
@@ -2446,7 +2446,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
return false;
bool Op0IsKill = hasTrivialKill(II->getOperand(0));
- unsigned ResultReg = FastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
+ unsigned ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill);
if (!ResultReg)
return false;
@@ -2514,16 +2514,16 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
MulReg = Emit_SMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg,
/*IsKill=*/false, 32);
- MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
+ MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
AArch64::sub_32);
- ShiftReg = FastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
+ ShiftReg = fastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true,
AArch64::sub_32);
emitSubs_rs(VT, ShiftReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
AArch64_AM::ASR, 31, /*WantResult=*/false);
} else {
assert(VT == MVT::i64 && "Unexpected value type.");
MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
- unsigned SMULHReg = FastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
+ unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
emitSubs_rs(VT, SMULHReg, /*IsKill=*/true, MulReg, /*IsKill=*/false,
AArch64_AM::ASR, 63, /*WantResult=*/false);
@@ -2547,12 +2547,12 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
emitSubs_rs(MVT::i64, AArch64::XZR, /*IsKill=*/true, MulReg,
/*IsKill=*/false, AArch64_AM::LSR, 32,
/*WantResult=*/false);
- MulReg = FastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
+ MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true,
AArch64::sub_32);
} else {
assert(VT == MVT::i64 && "Unexpected value type.");
MulReg = Emit_MUL_rr(VT, LHSReg, LHSIsKill, RHSReg, RHSIsKill);
- unsigned UMULHReg = FastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
+ unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
emitSubs_rr(VT, AArch64::XZR, /*IsKill=*/true, UMULHReg,
/*IsKill=*/false, /*WantResult=*/false);
@@ -2567,7 +2567,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
TII.get(TargetOpcode::COPY), ResultReg1).addReg(MulReg);
}
- ResultReg2 = FastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
+ ResultReg2 = fastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
AArch64::WZR, /*IsKill=*/true, AArch64::WZR,
/*IsKill=*/true, getInvertedCondCode(CC));
assert((ResultReg1 + 1) == ResultReg2 &&
@@ -2718,7 +2718,7 @@ bool AArch64FastISel::SelectTrunc(const Instruction *I) {
break;
}
// Issue an extract_subreg to get the lower 32-bits.
- unsigned Reg32 = FastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
+ unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill,
AArch64::sub_32);
// Create the AND instruction which performs the actual truncation.
ResultReg = emitAND_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask);
@@ -2762,7 +2762,7 @@ unsigned AArch64FastISel::Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt) {
// FIXME: We're SExt i1 to i64.
return 0;
}
- return FastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
+ return fastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
/*TODO:IsKill=*/false, 0, 0);
}
}
@@ -2783,7 +2783,7 @@ unsigned AArch64FastISel::Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
const TargetRegisterClass *RC =
(RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- return FastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
+ return fastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
/*IsKill=*/ZReg, true);
}
@@ -2792,7 +2792,7 @@ unsigned AArch64FastISel::Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
if (RetVT != MVT::i64)
return 0;
- return FastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
+ return fastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
Op0, Op0IsKill, Op1, Op1IsKill,
AArch64::XZR, /*IsKill=*/true);
}
@@ -2802,7 +2802,7 @@ unsigned AArch64FastISel::Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
if (RetVT != MVT::i64)
return 0;
- return FastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
+ return fastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
Op0, Op0IsKill, Op1, Op1IsKill,
AArch64::XZR, /*IsKill=*/true);
}
@@ -2826,7 +2826,7 @@ unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
@@ -2897,7 +2897,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
@@ -2920,7 +2920,7 @@ unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op0IsKill = Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
@@ -3006,7 +3006,7 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
@@ -3029,7 +3029,7 @@ unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill,
Op1Reg = emitAND_ri(MVT::i32, Op1Reg, Op1IsKill, Mask);
Op0IsKill = Op1IsKill = true;
}
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg,
Op1IsKill);
if (NeedTrunc)
ResultReg = emitAND_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask);
@@ -3103,7 +3103,7 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
Op0 = TmpReg;
Op0IsKill = true;
}
- return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
+ return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
@@ -3164,7 +3164,7 @@ unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- return FastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
+ return fastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
}
bool AArch64FastISel::SelectIntExt(const Instruction *I) {
@@ -3250,12 +3250,12 @@ bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) {
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- unsigned QuotReg = FastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
+ unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
Src1Reg, /*IsKill=*/false);
assert(QuotReg && "Unexpected DIV instruction emission failure.");
// The remainder is computed as numerator - (quotient * denominator) using the
// MSUB instruction.
- unsigned ResultReg = FastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
+ unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
Src1Reg, Src1IsKill, Src0Reg,
Src0IsKill);
updateValueMap(I, ResultReg);
@@ -3407,7 +3407,7 @@ bool AArch64FastISel::SelectBitCast(const Instruction *I) {
if (!Op0Reg)
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
- unsigned ResultReg = FastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
+ unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
if (!ResultReg)
return false;
@@ -3483,7 +3483,7 @@ bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {
return SelectIndirectBr(I);
case Instruction::Unreachable:
if (TM.Options.TrapUnreachable)
- return FastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
+ return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
else
return true;
case Instruction::Alloca:
diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp
index 3c85f08437c..a5f635e3cda 100644
--- a/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -105,28 +105,28 @@ class ARMFastISel final : public FastISel {
// Code from FastISel.cpp.
private:
- unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill);
- unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill);
- unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill);
- unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm);
- unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
uint64_t Imm);
- unsigned FastEmitInst_i(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm);
@@ -285,7 +285,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
return MIB;
}
-unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill) {
unsigned ResultReg = createResultReg(RC);
@@ -307,7 +307,7 @@ unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
@@ -335,7 +335,7 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
@@ -367,7 +367,7 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm) {
@@ -393,7 +393,7 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
@@ -423,7 +423,7 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
return ResultReg;
}
-unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
+unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
@@ -548,7 +548,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
unsigned ResultReg = 0;
if (Subtarget->useMovt(*FuncInfo.MF))
- ResultReg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
if (ResultReg)
return ResultReg;
@@ -909,7 +909,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
// Since the offset is too large for the load/store instruction
// get the reg+offset into a register.
if (needsLowering) {
- Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
+ Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
/*Op0IsKill*/false, Addr.Offset, MVT::i32);
Addr.Offset = 0;
}
@@ -1976,7 +1976,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
break;
}
case CCValAssign::BCvt: {
- unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
+ unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
/*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC;
diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index c83fd3aeec9..9dde1d8e5e8 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -89,7 +89,7 @@ private:
// for some reason, this default is not generated by tablegen
// so we explicitly generate it here.
//
- unsigned FastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
+ unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill, uint64_t imm1,
uint64_t imm2, unsigned Op3, bool Op3IsKill) {
return 0;
diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index b64215b773e..e62d4e16773 100644
--- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -106,15 +106,15 @@ class PPCFastISel final : public FastISel {
bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
const LoadInst *LI) override;
bool fastLowerArguments() override;
- unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
- unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ unsigned fastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
+ unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm);
- unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill);
- unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill);
@@ -2183,7 +2183,7 @@ bool PPCFastISel::fastLowerArguments() {
// Handle materializing integer constants into a register. This is not
// automatically generated for PowerPC, so must be explicitly created here.
-unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
+unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
if (Opc != ISD::Constant)
return 0;
@@ -2220,7 +2220,7 @@ unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
// assigning R0 or X0 to the output register for GPRC and G8RC
// register classes, as any such result could be used in ADDI, etc.,
// where those regs have another meaning.
-unsigned PPCFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
+unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
uint64_t Imm) {
@@ -2233,27 +2233,27 @@ unsigned PPCFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
- return FastISel::FastEmitInst_ri(MachineInstOpcode, UseRC,
+ return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC,
Op0, Op0IsKill, Imm);
}
// Override for instructions with one register operand to avoid use of
// R0/X0. The automatic infrastructure isn't aware of the context so
// we must be conservative.
-unsigned PPCFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
+unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass* RC,
unsigned Op0, bool Op0IsKill) {
const TargetRegisterClass *UseRC =
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
- return FastISel::FastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
+ return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
}
// Override for instructions with two register operands to avoid use
// of R0/X0. The automatic infrastructure isn't aware of the context
// so we must be conservative.
-unsigned PPCFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
+unsigned PPCFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass* RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill) {
@@ -2261,7 +2261,7 @@ unsigned PPCFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
- return FastISel::FastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
+ return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
Op1, Op1IsKill);
}
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index de11b8c0445..72c50ba2e70 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -532,7 +532,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
unsigned Src, EVT SrcVT,
unsigned &ResultReg) {
- unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
+ unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
Src, /*TODO: Kill=*/false);
if (RR == 0)
return false;
@@ -1043,7 +1043,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
}
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
ISD::SIGN_EXTEND;
- SrcReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
+ SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
SrcReg, /*TODO: Kill=*/false);
}
@@ -1196,7 +1196,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
ResultReg = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
ResultReg);
- ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
+ ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
X86::sub_8bit);
if (!ResultReg)
return false;
@@ -1314,7 +1314,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
ResultReg)
.addImm(0).addReg(Result32).addImm(X86::sub_32bit);
} else if (DstVT != MVT::i8) {
- ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
+ ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
ResultReg, /*Kill=*/true);
if (ResultReg == 0)
return false;
@@ -1714,7 +1714,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
ResultSuperReg).addReg(SourceSuperReg).addImm(8);
// Now reference the 8-bit subreg of the result.
- ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
+ ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
/*Kill=*/true, X86::sub_8bit);
}
// Copy the result out of the physreg if we haven't already.
@@ -1839,7 +1839,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
return false;
unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
- unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
+ unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
LHSReg, LHSIsKill);
updateValueMap(I, ResultReg);
return true;
@@ -1919,13 +1919,13 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
return false;
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- unsigned CmpReg = FastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
+ unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
CmpRHSReg, CmpRHSIsKill, CC);
- unsigned AndReg = FastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
+ unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false,
LHSReg, LHSIsKill);
- unsigned AndNReg = FastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
+ unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true,
RHSReg, RHSIsKill);
- unsigned ResultReg = FastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
+ unsigned ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true,
AndReg, /*IsKill=*/true);
updateValueMap(I, ResultReg);
return true;
@@ -1990,7 +1990,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
unsigned ResultReg =
- FastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
+ fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
updateValueMap(I, ResultReg);
return true;
}
@@ -2115,7 +2115,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
}
// Issue an extract_subreg.
- unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
+ unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
InputReg, /*Kill=*/true,
X86::sub_8bit);
if (!ResultReg)
@@ -2307,7 +2307,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!isTypeLegal(RetTy, VT))
return false;
- // Unfortunately we can't use FastEmit_r, because the AVX version of FSQRT
+ // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
// is not generated by FastISel yet.
// FIXME: Update this code once tablegen can handle it.
static const unsigned SqrtOpc[2][2] = {
@@ -2425,7 +2425,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
TII.get(Opc[Is64Bit][IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill));
} else
- ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
+ ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
CI->getZExtValue());
}
@@ -2436,7 +2436,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (RHSReg == 0)
return false;
RHSIsKill = hasTrivialKill(RHS);
- ResultReg = FastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
+ ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
RHSIsKill);
}
@@ -2451,7 +2451,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
.addReg(LHSReg, getKillRegState(LHSIsKill));
- ResultReg = FastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
+ ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
} else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
static const unsigned MULOpc[] =
@@ -2462,10 +2462,10 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), X86::AL)
.addReg(LHSReg, getKillRegState(LHSIsKill));
- ResultReg = FastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
+ ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
RHSIsKill);
} else
- ResultReg = FastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
+ ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
RHSReg, RHSIsKill);
}
@@ -2744,7 +2744,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
return false;
ResultReg =
- FastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
+ fastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1);
if (!ResultReg)
return false;
@@ -2831,7 +2831,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
break;
}
case CCValAssign::BCvt: {
- ArgReg = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
+ ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
/*TODO: Kill=*/false);
assert(ArgReg && "Failed to emit a bitcast!");
ArgVT = VA.getLocVT();
@@ -3108,15 +3108,15 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
uint64_t Imm = CI->getZExtValue();
if (Imm == 0) {
- unsigned SrcReg = FastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
+ unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type");
case MVT::i1:
case MVT::i8:
- return FastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
+ return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
X86::sub_8bit);
case MVT::i16:
- return FastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
+ return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
X86::sub_16bit);
case MVT::i32:
return SrcReg;
@@ -3148,14 +3148,14 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
}
}
if (VT == MVT::i64 && Opc == X86::MOV32ri) {
- unsigned SrcReg = FastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
+ unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
unsigned ResultReg = createResultReg(&X86::GR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
.addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
return ResultReg;
}
- return FastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
+ return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
}
unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
OpenPOWER on IntegriCloud