summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp275
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-abi.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-select.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-xaluo.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/fast-isel-call-return.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fast-isel-mul.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/fast-isel-shift.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/fast-isel-sqrt.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/frameaddr.ll4
28 files changed, 184 insertions, 180 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 79642a13d48..aab2b4b01f2 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -247,7 +247,7 @@ unsigned AArch64FastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
+ unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
ResultReg)
.addFrameIndex(SI->second)
@@ -271,9 +271,8 @@ unsigned AArch64FastISel::AArch64MaterializeInt(const ConstantInt *CI, MVT VT) {
: &AArch64::GPR32RegClass;
unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(ZeroReg, getKillRegState(true));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(ZeroReg, getKillRegState(true));
return ResultReg;
}
@@ -686,7 +685,7 @@ bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT) {
// the alloca address into a register, set the base type back to register and
// continue. This should almost never happen.
if (ImmediateOffsetNeedsLowering && Addr.isFIBase()) {
- unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
+ unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
ResultReg)
.addFrameIndex(Addr.getFI())
@@ -698,14 +697,12 @@ bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT) {
if (RegisterOffsetNeedsLowering) {
unsigned ResultReg = 0;
- if (Addr.getReg()) {
- ResultReg = createResultReg(&AArch64::GPR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(AArch64::ADDXrs), ResultReg)
- .addReg(Addr.getReg())
- .addReg(Addr.getOffsetReg())
- .addImm(Addr.getShift());
- } else
+ if (Addr.getReg())
+ ResultReg = FastEmitInst_rri(AArch64::ADDXrs, &AArch64::GPR64RegClass,
+ Addr.getReg(), /*TODO:IsKill=*/false,
+ Addr.getOffsetReg(), /*TODO:IsKill=*/false,
+ Addr.getShift());
+ else
ResultReg = Emit_LSL_ri(MVT::i64, Addr.getOffsetReg(),
/*Op0IsKill=*/false, Addr.getShift());
if (!ResultReg)
@@ -752,6 +749,12 @@ void AArch64FastISel::AddLoadStoreOperands(Address &Addr,
MIB.addFrameIndex(FI).addImm(Offset);
} else {
assert(Addr.isRegBase() && "Unexpected address kind.");
+ const MCInstrDesc &II = MIB->getDesc();
+ unsigned Idx = (Flags & MachineMemOperand::MOStore) ? 1 : 0;
+ Addr.setReg(
+ constrainOperandRegClass(II, Addr.getReg(), II.getNumDefs()+Idx));
+ Addr.setOffsetReg(
+ constrainOperandRegClass(II, Addr.getOffsetReg(), II.getNumDefs()+Idx+1));
if (Addr.getOffsetReg()) {
assert(Addr.getOffset() == 0 && "Unexpected offset");
bool IsSigned = Addr.getExtendType() == AArch64_AM::SXTW ||
@@ -900,12 +903,17 @@ unsigned AArch64FastISel::emitAddsSubs_rr(bool UseAdds, MVT RetVT,
};
unsigned Opc = OpcTable[!UseAdds][(RetVT == MVT::i64)];
unsigned ResultReg;
- if (WantResult)
- ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- else
+ if (WantResult) {
+ const TargetRegisterClass *RC =
+ (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ ResultReg = createResultReg(RC);
+ } else
ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ const MCInstrDesc &II = TII.get(Opc);
+ LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
+ RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill))
.addReg(RHSReg, getKillRegState(RHSIsKill));
@@ -935,12 +943,16 @@ unsigned AArch64FastISel::emitAddsSubs_ri(bool UseAdds, MVT RetVT,
};
unsigned Opc = OpcTable[!UseAdds][(RetVT == MVT::i64)];
unsigned ResultReg;
- if (WantResult)
- ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- else
+ if (WantResult) {
+ const TargetRegisterClass *RC =
+ (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ ResultReg = createResultReg(RC);
+ } else
ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ const MCInstrDesc &II = TII.get(Opc);
+ LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill))
.addImm(Imm)
.addImm(getShifterImm(AArch64_AM::LSL, ShiftImm));
@@ -964,12 +976,17 @@ unsigned AArch64FastISel::emitAddsSubs_rs(bool UseAdds, MVT RetVT,
};
unsigned Opc = OpcTable[!UseAdds][(RetVT == MVT::i64)];
unsigned ResultReg;
- if (WantResult)
- ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- else
+ if (WantResult) {
+ const TargetRegisterClass *RC =
+ (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ ResultReg = createResultReg(RC);
+ } else
ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ const MCInstrDesc &II = TII.get(Opc);
+ LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
+ RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill))
.addReg(RHSReg, getKillRegState(RHSIsKill))
.addImm(getShifterImm(ShiftType, ShiftImm));
@@ -993,12 +1010,17 @@ unsigned AArch64FastISel::emitAddsSubs_rx(bool UseAdds, MVT RetVT,
};
unsigned Opc = OpcTable[!UseAdds][(RetVT == MVT::i64)];
unsigned ResultReg;
- if (WantResult)
- ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- else
+ if (WantResult) {
+ const TargetRegisterClass *RC =
+ (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ ResultReg = createResultReg(RC);
+ } else
ResultReg = (RetVT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ const MCInstrDesc &II = TII.get(Opc);
+ LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs());
+ RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(LHSReg, getKillRegState(LHSIsKill))
.addReg(RHSReg, getKillRegState(RHSIsKill))
.addImm(getArithExtendImm(ExtType, ShiftImm));
@@ -1290,9 +1312,10 @@ bool AArch64FastISel::EmitStore(MVT VT, unsigned SrcReg, Address Addr,
SrcReg = ANDReg;
}
// Create the base instruction, then add the operands.
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(Opc))
- .addReg(SrcReg);
+ const MCInstrDesc &II = TII.get(Opc);
+ SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs());
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(SrcReg);
AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, ScaleFactor, MMO);
return true;
@@ -1517,8 +1540,9 @@ bool AArch64FastISel::SelectIndirectBr(const Instruction *I) {
return false;
// Emit the indirect branch.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BR))
- .addReg(AddrReg);
+ const MCInstrDesc &II = TII.get(AArch64::BR);
+ AddrReg = constrainOperandRegClass(II, AddrReg, II.getNumDefs());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(AddrReg);
// Make sure the CFG is up-to-date.
for (unsigned i = 0, e = BI->getNumSuccessors(); i != e; ++i)
@@ -1565,12 +1589,17 @@ bool AArch64FastISel::SelectSelect(const Instruction *I) {
return false;
unsigned SelectOpc;
+ const TargetRegisterClass *RC = nullptr;
switch (DestVT.SimpleTy) {
default: return false;
- case MVT::i32: SelectOpc = AArch64::CSELWr; break;
- case MVT::i64: SelectOpc = AArch64::CSELXr; break;
- case MVT::f32: SelectOpc = AArch64::FCSELSrrr; break;
- case MVT::f64: SelectOpc = AArch64::FCSELDrrr; break;
+ case MVT::i32:
+ SelectOpc = AArch64::CSELWr; RC = &AArch64::GPR32RegClass; break;
+ case MVT::i64:
+ SelectOpc = AArch64::CSELXr; RC = &AArch64::GPR64RegClass; break;
+ case MVT::f32:
+ SelectOpc = AArch64::FCSELSrrr; RC = &AArch64::FPR32RegClass; break;
+ case MVT::f64:
+ SelectOpc = AArch64::FCSELDrrr; RC = &AArch64::FPR64RegClass; break;
}
const Value *Cond = SI->getCondition();
@@ -1599,13 +1628,8 @@ bool AArch64FastISel::SelectSelect(const Instruction *I) {
if (!TrueReg || !FalseReg)
return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SelectOpc),
- ResultReg)
- .addReg(TrueReg, getKillRegState(TrueIsKill))
- .addReg(FalseReg, getKillRegState(FalseIsKill))
- .addImm(CC);
-
+ unsigned ResultReg = FastEmitInst_rri(SelectOpc, RC, TrueReg, TrueIsKill,
+ FalseReg, FalseIsKill, CC);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1684,8 +1708,9 @@ bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
"Unexpected value type.");
unsigned SrcReg = getRegForValue(I->getOperand(0));
- if (SrcReg == 0)
+ if (!SrcReg)
return false;
+ bool SrcIsKill = hasTrivialKill(I->getOperand(0));
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true);
@@ -1693,13 +1718,11 @@ bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) {
SrcReg =
EmitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed);
- if (SrcReg == 0)
+ if (!SrcReg)
return false;
+ SrcIsKill = true;
}
- MRI.constrainRegClass(SrcReg, SrcVT == MVT::i64 ? &AArch64::GPR64RegClass
- : &AArch64::GPR32RegClass);
-
unsigned Opc;
if (SrcVT == MVT::i64) {
if (Signed)
@@ -1713,9 +1736,8 @@ bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) {
Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
}
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
- .addReg(SrcReg);
+ unsigned ResultReg = FastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg,
+ SrcIsKill);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1790,16 +1812,22 @@ bool AArch64FastISel::FastLowerArguments() {
for (auto const &Arg : F->args()) {
MVT VT = TLI.getSimpleValueType(Arg.getType());
unsigned SrcReg;
+ const TargetRegisterClass *RC = nullptr;
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type.");
case MVT::i1:
case MVT::i8:
case MVT::i16: VT = MVT::i32; // fall-through
- case MVT::i32: SrcReg = Registers[0][GPRIdx++]; break;
- case MVT::i64: SrcReg = Registers[1][GPRIdx++]; break;
- case MVT::f16: SrcReg = Registers[2][FPRIdx++]; break;
- case MVT::f32: SrcReg = Registers[3][FPRIdx++]; break;
- case MVT::f64: SrcReg = Registers[4][FPRIdx++]; break;
+ case MVT::i32:
+ SrcReg = Registers[0][GPRIdx++]; RC = &AArch64::GPR32RegClass; break;
+ case MVT::i64:
+ SrcReg = Registers[1][GPRIdx++]; RC = &AArch64::GPR64RegClass; break;
+ case MVT::f16:
+ SrcReg = Registers[2][FPRIdx++]; RC = &AArch64::FPR16RegClass; break;
+ case MVT::f32:
+ SrcReg = Registers[3][FPRIdx++]; RC = &AArch64::FPR32RegClass; break;
+ case MVT::f64:
+ SrcReg = Registers[4][FPRIdx++]; RC = &AArch64::FPR64RegClass; break;
}
// Skip unused arguments.
@@ -1808,7 +1836,6 @@ bool AArch64FastISel::FastLowerArguments() {
continue;
}
- const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
@@ -1816,7 +1843,7 @@ bool AArch64FastISel::FastLowerArguments() {
unsigned ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(DstReg, getKillRegState(true));
+ .addReg(DstReg, getKillRegState(true));
UpdateValueMap(&Arg, ResultReg);
}
return true;
@@ -1937,7 +1964,7 @@ bool AArch64FastISel::FinishCall(CallLoweringInfo &CLI, MVT RetVT,
unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(RVLocs[0].getLocReg());
+ .addReg(RVLocs[0].getLocReg());
CLI.InRegs.push_back(RVLocs[0].getLocReg());
CLI.ResultReg = ResultReg;
@@ -2192,8 +2219,9 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
static_cast<const AArch64RegisterInfo *>(
TM.getSubtargetImpl()->getRegisterInfo());
unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
- unsigned SrcReg = FramePtr;
-
+ unsigned SrcReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), SrcReg).addReg(FramePtr);
// Recursively load frame address
// ldr x0, [fp]
// ldr x0, [x0]
@@ -2202,10 +2230,9 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
unsigned DestReg;
unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
while (Depth--) {
- DestReg = createResultReg(&AArch64::GPR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(AArch64::LDRXui), DestReg)
- .addReg(SrcReg).addImm(0);
+ DestReg = FastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
+ SrcReg, /*IsKill=*/true, 0);
+ assert(DestReg && "Unexpected LDR instruction emission failure.");
SrcReg = DestReg;
}
@@ -2298,7 +2325,6 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
const Function *Callee = II->getCalledFunction();
auto *Ty = cast<StructType>(Callee->getReturnType());
Type *RetTy = Ty->getTypeAtIndex(0U);
- Type *CondTy = Ty->getTypeAtIndex(1);
MVT VT;
if (!isTypeLegal(RetTy, VT))
@@ -2394,15 +2420,11 @@ bool AArch64FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
TII.get(TargetOpcode::COPY), ResultReg1).addReg(MulReg);
}
- ResultReg2 = FuncInfo.CreateRegs(CondTy);
+ ResultReg2 = FastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
+ AArch64::WZR, /*IsKill=*/true, AArch64::WZR,
+ /*IsKill=*/true, getInvertedCondCode(CC));
assert((ResultReg1 + 1) == ResultReg2 &&
"Nonconsecutive result registers.");
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
- ResultReg2)
- .addReg(AArch64::WZR, getKillRegState(true))
- .addReg(AArch64::WZR, getKillRegState(true))
- .addImm(getInvertedCondCode(CC));
-
UpdateValueMap(II, ResultReg1, 2);
return true;
}
@@ -2586,13 +2608,8 @@ unsigned AArch64FastISel::Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt) {
// FIXME: We're SExt i1 to i64.
return 0;
}
- unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SBFMWri),
- ResultReg)
- .addReg(SrcReg)
- .addImm(0)
- .addImm(0);
- return ResultReg;
+ return FastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
+ /*TODO:IsKill=*/false, 0, 0);
}
}
@@ -2610,14 +2627,10 @@ unsigned AArch64FastISel::Emit_MUL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
Opc = AArch64::MADDXrrr; ZReg = AArch64::XZR; break;
}
- // Create the base instruction, then add the operands.
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
- .addReg(Op0, getKillRegState(Op0IsKill))
- .addReg(Op1, getKillRegState(Op1IsKill))
- .addReg(ZReg, getKillRegState(true));
-
- return ResultReg;
+ const TargetRegisterClass *RC =
+ (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ return FastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill,
+ /*IsKill=*/ZReg, true);
}
unsigned AArch64FastISel::Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
@@ -2625,15 +2638,9 @@ unsigned AArch64FastISel::Emit_SMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
if (RetVT != MVT::i64)
return 0;
- // Create the base instruction, then add the operands.
- unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SMADDLrrr),
- ResultReg)
- .addReg(Op0, getKillRegState(Op0IsKill))
- .addReg(Op1, getKillRegState(Op1IsKill))
- .addReg(AArch64::XZR, getKillRegState(true));
-
- return ResultReg;
+ return FastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
+ Op0, Op0IsKill, Op1, Op1IsKill,
+ AArch64::XZR, /*IsKill=*/true);
}
unsigned AArch64FastISel::Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
@@ -2641,15 +2648,9 @@ unsigned AArch64FastISel::Emit_UMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill,
if (RetVT != MVT::i64)
return 0;
- // Create the base instruction, then add the operands.
- unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::UMADDLrrr),
- ResultReg)
- .addReg(Op0, getKillRegState(Op0IsKill))
- .addReg(Op1, getKillRegState(Op1IsKill))
- .addReg(AArch64::XZR, getKillRegState(true));
-
- return ResultReg;
+ return FastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
+ Op0, Op0IsKill, Op1, Op1IsKill,
+ AArch64::XZR, /*IsKill=*/true);
}
unsigned AArch64FastISel::Emit_LSL_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
@@ -2667,9 +2668,9 @@ unsigned AArch64FastISel::Emit_LSL_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
Opc = AArch64::UBFMXri; ImmR = -Shift % 64; ImmS = 63 - Shift; break;
}
- RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy);
- return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, ImmR,
- ImmS);
+ const TargetRegisterClass *RC =
+ (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS);
}
unsigned AArch64FastISel::Emit_LSR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
@@ -2683,9 +2684,9 @@ unsigned AArch64FastISel::Emit_LSR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
case MVT::i64: Opc = AArch64::UBFMXri; ImmS = 63; break;
}
- RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy);
- return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift,
- ImmS);
+ const TargetRegisterClass *RC =
+ (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, Shift, ImmS);
}
unsigned AArch64FastISel::Emit_ASR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
@@ -2699,9 +2700,9 @@ unsigned AArch64FastISel::Emit_ASR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill,
case MVT::i64: Opc = AArch64::SBFMXri; ImmS = 63; break;
}
- RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy);
- return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift,
- ImmS);
+ const TargetRegisterClass *RC =
+ (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ return FastEmitInst_rii(Opc, RC, Op0, Op0IsKill, Shift, ImmS);
}
unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
@@ -2760,13 +2761,9 @@ unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
SrcReg = Src64;
}
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
- .addReg(SrcReg)
- .addImm(0)
- .addImm(Imm);
-
- return ResultReg;
+ const TargetRegisterClass *RC =
+ (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ return FastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm);
}
bool AArch64FastISel::SelectIntExt(const Instruction *I) {
@@ -2843,22 +2840,23 @@ bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) {
unsigned Src0Reg = getRegForValue(I->getOperand(0));
if (!Src0Reg)
return false;
+ bool Src0IsKill = hasTrivialKill(I->getOperand(0));
unsigned Src1Reg = getRegForValue(I->getOperand(1));
if (!Src1Reg)
return false;
+ bool Src1IsKill = hasTrivialKill(I->getOperand(1));
- unsigned QuotReg = createResultReg(TLI.getRegClassFor(DestVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(DivOpc), QuotReg)
- .addReg(Src0Reg)
- .addReg(Src1Reg);
+ const TargetRegisterClass *RC =
+ (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
+ unsigned QuotReg = FastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false,
+ Src1Reg, /*IsKill=*/false);
+ assert(QuotReg && "Unexpected DIV instruction emission failure.");
// The remainder is computed as numerator - (quotient * denominator) using the
// MSUB instruction.
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MSubOpc), ResultReg)
- .addReg(QuotReg)
- .addReg(Src1Reg)
- .addReg(Src0Reg);
+ unsigned ResultReg = FastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true,
+ Src1Reg, Src1IsKill, Src0Reg,
+ Src0IsKill);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -2948,12 +2946,19 @@ bool AArch64FastISel::SelectBitCast(const Instruction *I) {
else
return false;
+ const TargetRegisterClass *RC = nullptr;
+ switch (RetVT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type.");
+ case MVT::i32: RC = &AArch64::GPR32RegClass; break;
+ case MVT::i64: RC = &AArch64::GPR64RegClass; break;
+ case MVT::f32: RC = &AArch64::FPR32RegClass; break;
+ case MVT::f64: RC = &AArch64::FPR64RegClass; break;
+ }
unsigned Op0Reg = getRegForValue(I->getOperand(0));
if (!Op0Reg)
return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
- unsigned ResultReg = FastEmitInst_r(Opc, TLI.getRegClassFor(RetVT),
- Op0Reg, Op0IsKill);
+ unsigned ResultReg = FastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill);
if (!ResultReg)
return false;
diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
index 168e921bcc0..7d880f300bb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=arm64 -O0 < %s | FileCheck %s
-; RUN: llc -march=arm64 -O3 < %s | FileCheck %s
+; RUN: llc -march=arm64 -O0 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=arm64 -O3 -verify-machineinstrs < %s | FileCheck %s
@.str = private unnamed_addr constant [9 x i8] c"%lf %lu\0A\00", align 1
@.str1 = private unnamed_addr constant [8 x i8] c"%lf %u\0A\00", align 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll b/llvm/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll
index 1b2d54317c2..1bb47fc00b2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-EXT-undef-mask.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -march=arm64 -aarch64-neon-syntax=apple | FileCheck %s
+; RUN: llc -O0 -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs < %s | FileCheck %s
; The following 2 test cases test shufflevector with beginning UNDEF mask.
define <8 x i16> @test_vext_undef_traverse(<8 x i16> %in) {
diff --git a/llvm/test/CodeGen/AArch64/arm64-abi.ll b/llvm/test/CodeGen/AArch64/arm64-abi.ll
index b48e3574756..389f10f3b12 100644
--- a/llvm/test/CodeGen/AArch64/arm64-abi.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-abi.ll
@@ -1,7 +1,6 @@
-; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s
-; RUN: llc < %s -O0 | FileCheck -check-prefix=FAST %s
+; RUN: llc -mtriple=arm64-apple-darwin -mcpu=cyclone -enable-misched=false < %s | FileCheck %s
+; RUN: llc -O0 -mtriple=arm64-apple-darwin < %s | FileCheck --check-prefix=FAST %s
; REQUIRES: asserts
-target triple = "arm64-apple-darwin"
; rdar://9932559
define i64 @i8i16callee(i64 %a1, i64 %a2, i64 %a3, i8 signext %a4, i16 signext %a5, i64 %a6, i64 %a7, i64 %a8, i8 signext %b1, i16 signext %b2, i8 signext %b3, i8 signext %b4) nounwind readnone noinline {
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
index ebd847e0f72..d81bc7cee11 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
@sortlist = common global [5001 x i32] zeroinitializer, align 16
@sortlist2 = common global [5001 x i64] zeroinitializer, align 16
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
index 1706e9eba2b..34394b2af0a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
@@ -1,5 +1,5 @@
; This test should cause the TargetMaterializeAlloca to be invoked
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
%struct.S1Ty = type { i64 }
%struct.S2Ty = type { %struct.S1Ty, %struct.S1Ty }
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
index 7885a6251b8..09e449c7433 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin -mcpu=cyclone | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -mtriple=arm64-apple-darwin -mcpu=cyclone -verify-machineinstrs < %s | FileCheck %s
define void @branch1() nounwind uwtable ssp {
%x = alloca i32, align 4
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll
index 34a227a9dc0..6e22fcf7dc7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll
@@ -1,6 +1,6 @@
-; RUN: llc -O0 -fast-isel-abort -fast-isel-abort-args -code-model=small -mtriple=arm64-apple-darwin < %s | FileCheck %s
-; RUN: llc -O0 -fast-isel-abort -fast-isel-abort-args -code-model=large -mtriple=arm64-apple-darwin < %s | FileCheck %s --check-prefix=LARGE
-; RUN: llc -O0 -fast-isel-abort -fast-isel-abort-args -code-model=small -mtriple=aarch64_be-linux-gnu < %s | FileCheck %s --check-prefix=CHECK-BE
+; RUN: llc -O0 -fast-isel-abort -fast-isel-abort-args -code-model=small -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -fast-isel-abort-args -code-model=large -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s --check-prefix=LARGE
+; RUN: llc -O0 -fast-isel-abort -fast-isel-abort-args -code-model=small -verify-machineinstrs -mtriple=aarch64_be-linux-gnu < %s | FileCheck %s --check-prefix=CHECK-BE
define void @call0() nounwind {
entry:
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
index ca8ab2cb504..a6c7bef3725 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin -mcpu=cyclone | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin -mcpu=cyclone < %s | FileCheck %s
;; Test various conversions.
define zeroext i32 @trunc_(i8 zeroext %a, i16 zeroext %b, i32 %c, i64 %d) nounwind ssp {
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll
index f0305962076..de101f2665e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-fcmp.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
define zeroext i1 @fcmp_float1(float %a) nounwind ssp {
entry:
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
index dc4d8953c27..78a4717c9a0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
; Test load/store of global value from global offset table.
@seed = common global i64 0, align 8
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
index e0d7d413166..2d60783cd1c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -fast-isel-abort -mtriple=arm64-apple-darwin < %s | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
define i32 @icmp_eq_imm(i32 %a) nounwind ssp {
entry:
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
index 70335ace50c..a5f45249678 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
@fn.table = internal global [2 x i8*] [i8* blockaddress(@fn, %ZERO), i8* blockaddress(@fn, %ONE)], align 8
define i32 @fn(i32 %target) nounwind {
entry:
-; CHECK: @fn
+; CHECK-LABEL: fn
%retval = alloca i32, align 4
%target.addr = alloca i32, align 4
store i32 %target, i32* %target.addr, align 4
@@ -29,8 +29,8 @@ return: ; preds = %ONE, %ZERO
ret i32 %2
indirectgoto: ; preds = %entry
-; CHECK: ldr x0, [sp]
-; CHECK: br x0
+; CHECK: ldr [[REG:x[0-9]+]], [sp]
+; CHECK-NEXT: br [[REG]]
%indirect.goto.dest = phi i8* [ %1, %entry ]
indirectbr i8* %indirect.goto.dest, [label %ZERO, label %ONE]
}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
index b16c899f421..9ac3e443183 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=arm64-apple-ios | FileCheck %s --check-prefix=ARM64
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=arm64-apple-ios < %s | FileCheck %s --check-prefix=ARM64
@message = global [80 x i8] c"The LLVM Compiler Infrastructure\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 16
@temp = common global [80 x i8] zeroinitializer, align 16
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll
index 483d1799f9c..81daa7c1d5a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-noconvert.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm64-apple-ios -O0 %s -o - | FileCheck %s
+; RUN: llc -O0 -verify-machineinstrs -mtriple=aarch64-apple-ios < %s | FileCheck %s
; Fast-isel can't do vector conversions yet, but it was emitting some highly
; suspect UCVTFUWDri MachineInstrs.
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
index d5bdbaae9e7..5781a602101 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
; RUN: llc %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin -print-machineinstrs=expand-isel-pseudos -o /dev/null 2> %t
; RUN: FileCheck %s < %t --check-prefix=CHECK-SSA
; REQUIRES: asserts
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
index d91fd285d55..f84c75504f6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
;; Test returns.
define void @t0() nounwind ssp {
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-select.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-select.ll
index c8a997e9a81..1bd4d05454f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-select.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-select.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -mtriple=arm64-apple-darwin -verify-machineinstrs < %s | FileCheck %s
define i32 @t1(i32 %c) nounwind readnone {
entry:
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll
index 362224fd0ca..9494d555301 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-unknown-unknown -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-unknown-unknown -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-unknown-unknown -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
define void @store_i8(i8* %a) {
; CHECK-LABEL: store_i8
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel.ll
index a71bdb2a3df..d3b7e67a315 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -mtriple=arm64-apple-darwin | FileCheck %s
+; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
define void @t0(i32 %a) nounwind {
entry:
diff --git a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
index 71300c4dd2c..d23eb877b99 100644
--- a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s
-; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=arm64 -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
;
; Get the actual value of the overflow bit.
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll b/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
index 11b95f92bf2..b950a24f558 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=aarch64-apple-darwin < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SDAG
-; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort < %s | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
+; RUN: llc -mtriple=aarch64-apple-darwin -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SDAG
+; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
; Load / Store Base Register only
define zeroext i1 @load_breg_i1(i1* %a) {
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll b/llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll
index f96fff18d03..5b224760460 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-branch_weights.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=arm64-apple-darwin -aarch64-atomic-cfg-tidy=0 < %s | FileCheck %s
-; RUN: llc -mtriple=arm64-apple-darwin -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-apple-darwin -aarch64-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-apple-darwin -aarch64-atomic-cfg-tidy=0 -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
; Test if the BBs are reordred according to their branch weights.
define i64 @branch_weights_test(i64 %a, i64 %b) {
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-call-return.ll b/llvm/test/CodeGen/AArch64/fast-isel-call-return.ll
index 22cb35d99ab..9b10969417d 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-call-return.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-call-return.ll
@@ -1,4 +1,4 @@
-; RUN: llc -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-linux-gnu"
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-mul.ll b/llvm/test/CodeGen/AArch64/fast-isel-mul.ll
index d02c67f52f8..88a3ee8db3d 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-mul.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-mul.ll
@@ -1,4 +1,4 @@
-; RUN: llc -fast-isel -fast-isel-abort -mtriple=aarch64 -o - %s | FileCheck %s
+; RUN: llc -fast-isel -fast-isel-abort -verify-machineinstrs -mtriple=aarch64 < %s | FileCheck %s
@var8 = global i8 0
@var16 = global i16 0
@@ -7,7 +7,7 @@
define void @test_mul8(i8 %lhs, i8 %rhs) {
; CHECK-LABEL: test_mul8:
-; CHECK: mul w0, w0, w1
+; CHECK: mul {{w[0-9]+}}, w0, w1
; %lhs = load i8* @var8
; %rhs = load i8* @var8
%prod = mul i8 %lhs, %rhs
@@ -17,7 +17,7 @@ define void @test_mul8(i8 %lhs, i8 %rhs) {
define void @test_mul16(i16 %lhs, i16 %rhs) {
; CHECK-LABEL: test_mul16:
-; CHECK: mul w0, w0, w1
+; CHECK: mul {{w[0-9]+}}, w0, w1
%prod = mul i16 %lhs, %rhs
store i16 %prod, i16* @var16
ret void
@@ -25,7 +25,7 @@ define void @test_mul16(i16 %lhs, i16 %rhs) {
define void @test_mul32(i32 %lhs, i32 %rhs) {
; CHECK-LABEL: test_mul32:
-; CHECK: mul w0, w0, w1
+; CHECK: mul {{w[0-9]+}}, w0, w1
%prod = mul i32 %lhs, %rhs
store i32 %prod, i32* @var32
ret void
@@ -33,7 +33,7 @@ define void @test_mul32(i32 %lhs, i32 %rhs) {
define void @test_mul64(i64 %lhs, i64 %rhs) {
; CHECK-LABEL: test_mul64:
-; CHECK: mul x0, x0, x1
+; CHECK: mul {{x[0-9]+}}, x0, x1
%prod = mul i64 %lhs, %rhs
store i64 %prod, i64* @var64
ret void
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-shift.ll b/llvm/test/CodeGen/AArch64/fast-isel-shift.ll
index 8f670b4ba20..9b71930ea06 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-shift.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-shift.ll
@@ -1,4 +1,4 @@
-; RUN: llc -fast-isel -fast-isel-abort -mtriple=arm64-apple-darwin < %s | FileCheck %s
+; RUN: llc -fast-isel -fast-isel-abort -mtriple=arm64-apple-darwin -verify-machineinstrs < %s | FileCheck %s
; CHECK-LABEL: lsl_i8
; CHECK: ubfiz {{w[0-9]*}}, {{w[0-9]*}}, #4, #4
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-sqrt.ll b/llvm/test/CodeGen/AArch64/fast-isel-sqrt.ll
index 44ad8eab369..1331d5c7de5 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-sqrt.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-sqrt.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=arm64-apple-darwin < %s | FileCheck %s
-; RUN: llc -mtriple=arm64-apple-darwin -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-apple-darwin -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=arm64-apple-darwin -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
define float @test_sqrt_f32(float %a) {
; CHECK-LABEL: test_sqrt_f32
diff --git a/llvm/test/CodeGen/AArch64/frameaddr.ll b/llvm/test/CodeGen/AArch64/frameaddr.ll
index ff9916c156b..d6bb50e57a7 100644
--- a/llvm/test/CodeGen/AArch64/frameaddr.ll
+++ b/llvm/test/CodeGen/AArch64/frameaddr.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=arm64-apple-ios7.0 < %s | FileCheck %s
-; RUN: llc -mtriple=arm64-apple-ios7.0 -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-apple-darwin -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
define i8* @test_frameaddress0() nounwind {
entry:
OpenPOWER on IntegriCloud