summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CallLowering.cpp6
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp5
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineLegalizeHelper.cpp3
-rw-r--r--llvm/lib/CodeGen/GlobalISel/MachineLegalizer.cpp3
-rw-r--r--llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp3
-rw-r--r--llvm/lib/CodeGen/MachineVerifier.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallLowering.cpp69
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallLowering.h8
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp7
9 files changed, 61 insertions, 46 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 9d1c1e7402d..4ce643de52e 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -25,9 +25,9 @@ bool CallLowering::lowerCall(
// First step is to marshall all the function's parameters into the correct
// physregs and memory locations. Gather the sequence of argument types that
// we'll pass to the assigner function.
- SmallVector<MVT, 8> ArgTys;
+ SmallVector<Type *, 8> ArgTys;
for (auto &Arg : CI.arg_operands())
- ArgTys.push_back(MVT::getVT(Arg->getType()));
+ ArgTys.push_back(Arg->getType());
MachineOperand Callee = MachineOperand::CreateImm(0);
if (Function *F = CI.getCalledFunction())
@@ -35,6 +35,6 @@ bool CallLowering::lowerCall(
else
Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
- return lowerCall(MIRBuilder, Callee, MVT::getVT(CI.getType()),
+ return lowerCall(MIRBuilder, Callee, CI.getType(),
ResReg ? ResReg : ArrayRef<unsigned>(), ArgTys, ArgRegs);
}
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index df48c428cb1..da18b036f0c 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -168,6 +168,11 @@ MachineIRBuilder::buildUAdde(ArrayRef<LLT> Tys, unsigned Res, unsigned CarryOut,
.addUse(CarryIn);
}
+MachineInstrBuilder MachineIRBuilder::buildType(LLT Ty,
+ unsigned Res, unsigned Op) {
+ return buildInstr(TargetOpcode::G_TYPE, Ty).addDef(Res).addUse(Op);
+}
+
MachineInstrBuilder MachineIRBuilder::buildAnyExt(ArrayRef<LLT> Tys,
unsigned Res, unsigned Op) {
validateTruncExt(Tys, true);
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineLegalizeHelper.cpp b/llvm/lib/CodeGen/GlobalISel/MachineLegalizeHelper.cpp
index 9787227d284..18db91cbd28 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineLegalizeHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineLegalizeHelper.cpp
@@ -104,7 +104,8 @@ MachineLegalizeHelper::libcall(MachineInstr &MI) {
default:
return UnableToLegalize;
case TargetOpcode::G_FREM: {
- MVT Ty = MVT::getFloatingPointVT(MI.getType().getSizeInBits());
+ auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ Type *Ty = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
const char *Name =
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineLegalizer.cpp b/llvm/lib/CodeGen/GlobalISel/MachineLegalizer.cpp
index baef7657eac..cc3d4ecd7c6 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineLegalizer.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineLegalizer.cpp
@@ -30,6 +30,9 @@ MachineLegalizer::MachineLegalizer() : TablesInitialized(false) {
DefaultActions[TargetOpcode::G_ANYEXT] = Legal;
DefaultActions[TargetOpcode::G_TRUNC] = Legal;
+ // G_TYPE is essentially an annotated COPY so it's always legal.
+ DefaultActions[TargetOpcode::G_TYPE] = Legal;
+
DefaultActions[TargetOpcode::G_INTRINSIC] = Legal;
DefaultActions[TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS] = Legal;
diff --git a/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
index 5e5541799c5..5a950ff17c1 100644
--- a/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
@@ -224,7 +224,8 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
bool CompleteMapping = true;
// For copies we want to walk over the operands and try to find one
// that has a register bank.
- bool isCopyLike = MI.isCopy() || MI.isPHI();
+ bool isCopyLike =
+ MI.isCopy() || MI.isPHI() || MI.getOpcode() == TargetOpcode::G_TYPE;
// Remember the register bank for reuse for copy-like instructions.
const RegisterBank *RegBank = nullptr;
// Remember the size of the register for reuse for copy-like instructions.
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 479c9e7eeae..0b8a85d9af7 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -903,7 +903,8 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
}
// Generic opcodes must not have physical register operands.
- if (isPreISelGenericOpcode(MCID.getOpcode())) {
+ if (isPreISelGenericOpcode(MCID.getOpcode()) &&
+ MCID.getOpcode() != TargetOpcode::G_TYPE) {
for (auto &Op : MI->operands()) {
if (Op.isReg() && TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
report("Generic instruction cannot have physical register", MI);
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index 48ff3edfc35..91bd6f16492 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -44,19 +44,19 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
- handleAssignments(
- MIRBuilder, AssignFn, MVT::getVT(Val->getType()), VReg,
- [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.buildCopy(PhysReg, ValReg);
- MIB.addUse(PhysReg, RegState::Implicit);
- });
+ handleAssignments(MIRBuilder, AssignFn, Val->getType(), VReg,
+ [&](MachineIRBuilder &MIRBuilder, Type *Ty,
+ unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.buildCopy(PhysReg, ValReg);
+ MIB.addUse(PhysReg, RegState::Implicit);
+ });
}
return true;
}
bool AArch64CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
CCAssignFn *AssignFn,
- ArrayRef<MVT> ArgTypes,
+ ArrayRef<Type *> ArgTypes,
ArrayRef<unsigned> ArgRegs,
AssignFnTy AssignValToReg) const {
MachineFunction &MF = MIRBuilder.getMF();
@@ -66,11 +66,10 @@ bool AArch64CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
unsigned NumArgs = ArgTypes.size();
- auto CurVT = ArgTypes.begin();
- for (unsigned i = 0; i != NumArgs; ++i, ++CurVT) {
- bool Res = AssignFn(i, *CurVT, *CurVT, CCValAssign::Full, ISD::ArgFlagsTy(),
- CCInfo);
- if (Res)
+ auto CurTy = ArgTypes.begin();
+ for (unsigned i = 0; i != NumArgs; ++i, ++CurTy) {
+ MVT CurVT = MVT::getVT(*CurTy);
+ if (AssignFn(i, CurVT, CurVT, CCValAssign::Full, ISD::ArgFlagsTy(), CCInfo))
return false;
}
assert(ArgLocs.size() == ArgTypes.size() &&
@@ -103,7 +102,7 @@ bool AArch64CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
// Everything checks out, tell the caller where we've decided this
// parameter/return value should go.
- AssignValToReg(MIRBuilder, ArgRegs[i], VA.getLocReg());
+ AssignValToReg(MIRBuilder, ArgTypes[i], ArgRegs[i], VA.getLocReg());
}
return true;
}
@@ -114,27 +113,27 @@ bool AArch64CallLowering::lowerFormalArguments(
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
- SmallVector<MVT, 8> ArgTys;
+ SmallVector<Type *, 8> ArgTys;
for (auto &Arg : Args)
- ArgTys.push_back(MVT::getVT(Arg.getType()));
+ ArgTys.push_back(Arg.getType());
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
CCAssignFn *AssignFn =
TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
- return handleAssignments(
- MIRBuilder, AssignFn, ArgTys, VRegs,
- [](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.getMBB().addLiveIn(PhysReg);
- MIRBuilder.buildCopy(ValReg, PhysReg);
- });
+ return handleAssignments(MIRBuilder, AssignFn, ArgTys, VRegs,
+ [](MachineIRBuilder &MIRBuilder, Type *Ty,
+ unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.getMBB().addLiveIn(PhysReg);
+ MIRBuilder.buildType(LLT{*Ty}, ValReg, PhysReg);
+ });
}
bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const MachineOperand &Callee,
- ArrayRef<MVT> ResTys,
+ ArrayRef<Type *> ResTys,
ArrayRef<unsigned> ResRegs,
- ArrayRef<MVT> ArgTys,
+ ArrayRef<Type *> ArgTys,
ArrayRef<unsigned> ArgRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = *MF.getFunction();
@@ -147,12 +146,12 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
// And finally we can do the actual assignments. For a call we need to keep
// track of the registers used because they'll be implicit uses of the BL.
SmallVector<unsigned, 8> PhysRegs;
- handleAssignments(
- MIRBuilder, CallAssignFn, ArgTys, ArgRegs,
- [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.buildCopy(PhysReg, ValReg);
- PhysRegs.push_back(PhysReg);
- });
+ handleAssignments(MIRBuilder, CallAssignFn, ArgTys, ArgRegs,
+ [&](MachineIRBuilder &MIRBuilder, Type *Ty, unsigned ValReg,
+ unsigned PhysReg) {
+ MIRBuilder.buildCopy(PhysReg, ValReg);
+ PhysRegs.push_back(PhysReg);
+ });
// Now we can build the actual call instruction.
auto MIB = MIRBuilder.buildInstr(Callee.isReg() ? AArch64::BLR : AArch64::BL);
@@ -170,12 +169,12 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
// implicit-define of the call instruction.
CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
if (!ResRegs.empty())
- handleAssignments(
- MIRBuilder, RetAssignFn, ResTys, ResRegs,
- [&](MachineIRBuilder &MIRBuilder, unsigned ValReg, unsigned PhysReg) {
- MIRBuilder.buildCopy(ValReg, PhysReg);
- MIB.addDef(PhysReg, RegState::Implicit);
- });
+ handleAssignments(MIRBuilder, RetAssignFn, ResTys, ResRegs,
+ [&](MachineIRBuilder &MIRBuilder, Type *Ty,
+ unsigned ValReg, unsigned PhysReg) {
+ MIRBuilder.buildType(LLT{*Ty}, ValReg, PhysReg);
+ MIB.addDef(PhysReg, RegState::Implicit);
+ });
return true;
}
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.h b/llvm/lib/Target/AArch64/AArch64CallLowering.h
index 19bbb150e99..1588ad44ad7 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.h
@@ -35,16 +35,16 @@ class AArch64CallLowering: public CallLowering {
ArrayRef<unsigned> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, const MachineOperand &Callee,
- ArrayRef<MVT> ResTys, ArrayRef<unsigned> ResRegs,
- ArrayRef<MVT> ArgTys,
+ ArrayRef<Type *> ResTys, ArrayRef<unsigned> ResRegs,
+ ArrayRef<Type *> ArgTys,
ArrayRef<unsigned> ArgRegs) const override;
private:
- typedef std::function<void(MachineIRBuilder &, unsigned, unsigned)>
+ typedef std::function<void(MachineIRBuilder &, Type *, unsigned, unsigned)>
AssignFnTy;
bool handleAssignments(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn,
- ArrayRef<MVT> ArgsTypes, ArrayRef<unsigned> ArgRegs,
+ ArrayRef<Type *> ArgsTypes, ArrayRef<unsigned> ArgRegs,
AssignFnTy AssignValToReg) const;
};
} // End of namespace llvm;
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index d0ad78eb302..779b624b0f7 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -229,6 +229,12 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
return true;
}
+ case TargetOpcode::G_TYPE: {
+ I.setDesc(TII.get(TargetOpcode::COPY));
+ I.removeTypes();
+ return true;
+ }
+
case TargetOpcode::G_FRAME_INDEX: {
// allocas and G_FRAME_INDEX are only supported in addrspace(0).
if (I.getType() != LLT::pointer(0)) {
@@ -246,7 +252,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) const {
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
-
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE: {
LLT MemTy = I.getType(0);
OpenPOWER on IntegriCloud