summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-06-24 16:16:12 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2019-06-24 16:16:12 +0000
commitfaeaedf8e938696497021adcd5925e5741c72f62 (patch)
tree66d3c65d42cfac2b080183c6c79562d0c205b23f /llvm/lib/Target/AArch64/AArch64CallLowering.cpp
parent906d494b6e7eb0d8bde19bec2de7d93a9516ebe3 (diff)
downloadbcm5719-llvm-faeaedf8e938696497021adcd5925e5741c72f62.tar.gz
bcm5719-llvm-faeaedf8e938696497021adcd5925e5741c72f62.zip
GlobalISel: Remove unsigned variant of SrcOp
Force using Register. One downside is the generated register enums require explicit conversion. llvm-svn: 364194
Diffstat (limited to 'llvm/lib/Target/AArch64/AArch64CallLowering.cpp')
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallLowering.cpp32
1 files changed, 16 insertions, 16 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index 1e1c20d5f30..a497bd9b339 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -57,18 +57,18 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {
CCAssignFn *AssignFn)
: ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
auto &MFI = MIRBuilder.getMF().getFrameInfo();
int FI = MFI.CreateFixedObject(Size, Offset, true);
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
- unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
+ Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
MIRBuilder.buildFrameIndex(AddrReg, FI);
StackUsed = std::max(StackUsed, Size + Offset);
return AddrReg;
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
markPhysRegUsed(PhysReg);
switch (VA.getLocInfo()) {
@@ -85,7 +85,7 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {
}
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
// FIXME: Get alignment
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
@@ -133,31 +133,31 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
AssignFnVarArg(AssignFnVarArg), StackSize(0) {}
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
LLT p0 = LLT::pointer(0, 64);
LLT s64 = LLT::scalar(64);
- unsigned SPReg = MRI.createGenericVirtualRegister(p0);
- MIRBuilder.buildCopy(SPReg, AArch64::SP);
+ Register SPReg = MRI.createGenericVirtualRegister(p0);
+ MIRBuilder.buildCopy(SPReg, Register(AArch64::SP));
- unsigned OffsetReg = MRI.createGenericVirtualRegister(s64);
+ Register OffsetReg = MRI.createGenericVirtualRegister(s64);
MIRBuilder.buildConstant(OffsetReg, Offset);
- unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
+ Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
return AddrReg;
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
MIB.addUse(PhysReg, RegState::Implicit);
- unsigned ExtReg = extendRegister(ValVReg, VA);
+ Register ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildCopy(PhysReg, ExtReg);
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) {
Size = VA.getLocVT().getSizeInBits() / 8;
@@ -263,7 +263,7 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
return false;
}
- unsigned CurVReg = VRegs[i];
+ Register CurVReg = VRegs[i];
ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
@@ -367,7 +367,7 @@ bool AArch64CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
bool Split = false;
LLT Ty = MRI.getType(VRegs[i]);
- unsigned Dst = VRegs[i];
+ Register Dst = VRegs[i];
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv(),
[&](unsigned Reg, uint64_t Offset) {
@@ -436,7 +436,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 8> SplitArgs;
for (auto &OrigArg : OrigArgs) {
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv,
- [&](unsigned Reg, uint64_t Offset) {
+ [&](Register Reg, uint64_t Offset) {
MIRBuilder.buildExtract(Reg, OrigArg.Reg, Offset);
});
// AAPCS requires that we zero-extend i1 to 8 bits by the caller.
@@ -512,7 +512,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
if (SwiftErrorVReg) {
MIB.addDef(AArch64::X21, RegState::Implicit);
- MIRBuilder.buildCopy(SwiftErrorVReg, AArch64::X21);
+ MIRBuilder.buildCopy(SwiftErrorVReg, Register(AArch64::X21));
}
CallSeqStart.addImm(Handler.StackSize).addImm(0);
OpenPOWER on IntegriCloud