diff options
Diffstat (limited to 'llvm/lib')
6 files changed, 234 insertions, 20 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64GenRegisterBankInfo.def b/llvm/lib/Target/AArch64/AArch64GenRegisterBankInfo.def index fb5b8a01156..528756b3485 100644 --- a/llvm/lib/Target/AArch64/AArch64GenRegisterBankInfo.def +++ b/llvm/lib/Target/AArch64/AArch64GenRegisterBankInfo.def @@ -110,6 +110,10 @@ RegisterBankInfo::ValueMapping AArch64GenRegisterBankInfo::ValMappings[]{ // 47: FPExt vector: 64 to 128. <-- This must match FPExt64To128Idx. {&AArch64GenRegisterBankInfo::PartMappings[PMI_FPR128 - PMI_Min], 1}, {&AArch64GenRegisterBankInfo::PartMappings[PMI_FPR64 - PMI_Min], 1}, + // 49: Shift scalar with 64 bit shift imm + {&AArch64GenRegisterBankInfo::PartMappings[PMI_GPR32 - PMI_Min], 1}, + {&AArch64GenRegisterBankInfo::PartMappings[PMI_GPR32 - PMI_Min], 1}, + {&AArch64GenRegisterBankInfo::PartMappings[PMI_GPR64 - PMI_Min], 1}, }; bool AArch64GenRegisterBankInfo::checkPartialMap(unsigned Idx, diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp index 0bad6bd9ec7..f03e0f39a0b 100644 --- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -59,6 +59,15 @@ private: /// the patterns that don't require complex C++. bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; + // A lowering phase that runs before any selection attempts. + + void preISelLower(MachineInstr &I) const; + + // An early selection function that runs before the selectImpl() call. + bool earlySelect(MachineInstr &I) const; + + bool earlySelectSHL(MachineInstr &I, MachineRegisterInfo &MRI) const; + bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const; bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF, @@ -136,6 +145,14 @@ private: MachineInstr *emitCSetForICMP(Register DefReg, unsigned Pred, MachineIRBuilder &MIRBuilder) const; + // Equivalent to the i32shift_a and friends from AArch64InstrInfo.td. + // We use these manually instead of using the importer since it doesn't + // support SDNodeXForm. + ComplexRendererFns selectShiftA_32(const MachineOperand &Root) const; + ComplexRendererFns selectShiftB_32(const MachineOperand &Root) const; + ComplexRendererFns selectShiftA_64(const MachineOperand &Root) const; + ComplexRendererFns selectShiftB_64(const MachineOperand &Root) const; + ComplexRendererFns selectArithImmed(MachineOperand &Root) const; ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root, @@ -1050,6 +1067,98 @@ void AArch64InstructionSelector::materializeLargeCMVal( return; } +void AArch64InstructionSelector::preISelLower(MachineInstr &I) const { + MachineBasicBlock &MBB = *I.getParent(); + MachineFunction &MF = *MBB.getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + switch (I.getOpcode()) { + case TargetOpcode::G_SHL: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: { + // These shifts are legalized to have 64 bit shift amounts because we want + // to take advantage of the existing imported selection patterns that assume + // the immediates are s64s. However, if the shifted type is 32 bits and for + // some reason we receive input GMIR that has an s64 shift amount that's not + // a G_CONSTANT, insert a truncate so that we can still select the s32 + // register-register variant. + unsigned SrcReg = I.getOperand(1).getReg(); + unsigned ShiftReg = I.getOperand(2).getReg(); + const LLT ShiftTy = MRI.getType(ShiftReg); + const LLT SrcTy = MRI.getType(SrcReg); + if (SrcTy.isVector()) + return; + assert(!ShiftTy.isVector() && "unexpected vector shift ty"); + if (SrcTy.getSizeInBits() != 32 || ShiftTy.getSizeInBits() != 64) + return; + auto *AmtMI = MRI.getVRegDef(ShiftReg); + assert(AmtMI && "could not find a vreg definition for shift amount"); + if (AmtMI->getOpcode() != TargetOpcode::G_CONSTANT) { + // Insert a subregister copy to implement a 64->32 trunc + MachineIRBuilder MIB(I); + auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {}) + .addReg(ShiftReg, 0, AArch64::sub_32); + MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID)); + I.getOperand(2).setReg(Trunc.getReg(0)); + } + return; + } + default: + return; + } +} + +bool AArch64InstructionSelector::earlySelectSHL( + MachineInstr &I, MachineRegisterInfo &MRI) const { + // We try to match the immediate variant of LSL, which is actually an alias + // for a special case of UBFM. Otherwise, we fall back to the imported + // selector which will match the register variant. + assert(I.getOpcode() == TargetOpcode::G_SHL && "unexpected op"); + const auto &MO = I.getOperand(2); + auto VRegAndVal = getConstantVRegVal(MO.getReg(), MRI); + if (!VRegAndVal) + return false; + + const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); + if (DstTy.isVector()) + return false; + bool Is64Bit = DstTy.getSizeInBits() == 64; + auto Imm1Fn = Is64Bit ? selectShiftA_64(MO) : selectShiftA_32(MO); + auto Imm2Fn = Is64Bit ? selectShiftB_64(MO) : selectShiftB_32(MO); + MachineIRBuilder MIB(I); + + if (!Imm1Fn || !Imm2Fn) + return false; + + auto NewI = + MIB.buildInstr(Is64Bit ? AArch64::UBFMXri : AArch64::UBFMWri, + {I.getOperand(0).getReg()}, {I.getOperand(1).getReg()}); + + for (auto &RenderFn : *Imm1Fn) + RenderFn(NewI); + for (auto &RenderFn : *Imm2Fn) + RenderFn(NewI); + + I.eraseFromParent(); + return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI); +} + +bool AArch64InstructionSelector::earlySelect(MachineInstr &I) const { + assert(I.getParent() && "Instruction should be in a basic block!"); + assert(I.getParent()->getParent() && "Instruction should be in a function!"); + + MachineBasicBlock &MBB = *I.getParent(); + MachineFunction &MF = *MBB.getParent(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + switch (I.getOpcode()) { + case TargetOpcode::G_SHL: + return earlySelectSHL(I, MRI); + default: + return false; + } +} + bool AArch64InstructionSelector::select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const { assert(I.getParent() && "Instruction should be in a basic block!"); @@ -1107,6 +1216,19 @@ bool AArch64InstructionSelector::select(MachineInstr &I, return false; } + // Try to do some lowering before we start instruction selecting. These + // lowerings are purely transformations on the input G_MIR and so selection + // must continue after any modification of the instruction. + preISelLower(I); + + // There may be patterns where the importer can't deal with them optimally, + // but does select it to a suboptimal sequence so our custom C++ selection + // code later never has a chance to work on it. Therefore, we have an early + // selection attempt here to give priority to certain selection routines + // over the imported ones. + if (earlySelect(I)) + return true; + if (selectImpl(I, CoverageInfo)) return true; @@ -3644,21 +3766,11 @@ bool AArch64InstructionSelector::selectIntrinsic( return false; } -/// SelectArithImmed - Select an immediate value that can be represented as -/// a 12-bit value shifted left by either 0 or 12. If so, return true with -/// Val set to the 12-bit value and Shift set to the shifter operand. -InstructionSelector::ComplexRendererFns -AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const { - MachineInstr &MI = *Root.getParent(); - MachineBasicBlock &MBB = *MI.getParent(); - MachineFunction &MF = *MBB.getParent(); - MachineRegisterInfo &MRI = MF.getRegInfo(); - - // This function is called from the addsub_shifted_imm ComplexPattern, - // which lists [imm] as the list of opcode it's interested in, however - // we still need to check whether the operand is actually an immediate - // here because the ComplexPattern opcode list is only used in - // root-level opcode matching. +static Optional<uint64_t> getImmedFromMO(const MachineOperand &Root) { + auto &MI = *Root.getParent(); + auto &MBB = *MI.getParent(); + auto &MF = *MBB.getParent(); + auto &MRI = MF.getRegInfo(); uint64_t Immed; if (Root.isImm()) Immed = Root.getImm(); @@ -3674,7 +3786,59 @@ AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const { Immed = Op1.getCImm()->getZExtValue(); } else return None; + return Immed; +} + +InstructionSelector::ComplexRendererFns +AArch64InstructionSelector::selectShiftA_32(const MachineOperand &Root) const { + auto MaybeImmed = getImmedFromMO(Root); + if (MaybeImmed == None || *MaybeImmed > 31) + return None; + uint64_t Enc = (32 - *MaybeImmed) & 0x1f; + return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; +} + +InstructionSelector::ComplexRendererFns +AArch64InstructionSelector::selectShiftB_32(const MachineOperand &Root) const { + auto MaybeImmed = getImmedFromMO(Root); + if (MaybeImmed == None || *MaybeImmed > 31) + return None; + uint64_t Enc = 31 - *MaybeImmed; + return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; +} + +InstructionSelector::ComplexRendererFns +AArch64InstructionSelector::selectShiftA_64(const MachineOperand &Root) const { + auto MaybeImmed = getImmedFromMO(Root); + if (MaybeImmed == None || *MaybeImmed > 63) + return None; + uint64_t Enc = (64 - *MaybeImmed) & 0x3f; + return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; +} + +InstructionSelector::ComplexRendererFns +AArch64InstructionSelector::selectShiftB_64(const MachineOperand &Root) const { + auto MaybeImmed = getImmedFromMO(Root); + if (MaybeImmed == None || *MaybeImmed > 63) + return None; + uint64_t Enc = 63 - *MaybeImmed; + return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}}; +} +/// SelectArithImmed - Select an immediate value that can be represented as +/// a 12-bit value shifted left by either 0 or 12. If so, return true with +/// Val set to the 12-bit value and Shift set to the shifter operand. +InstructionSelector::ComplexRendererFns +AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const { + // This function is called from the addsub_shifted_imm ComplexPattern, + // which lists [imm] as the list of opcode it's interested in, however + // we still need to check whether the operand is actually an immediate + // here because the ComplexPattern opcode list is only used in + // root-level opcode matching. + auto MaybeImmed = getImmedFromMO(Root); + if (MaybeImmed == None) + return None; + uint64_t Immed = *MaybeImmed; unsigned ShiftAmt; if (Immed >> 12 == 0) { diff --git a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp index c3392ae32fd..26c396b4388 100644 --- a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -109,7 +109,14 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) { .scalarize(0); getActionDefinitionsBuilder({G_LSHR, G_ASHR}) - .legalFor({{s32, s32}, {s64, s64}, {v2s32, v2s32}, {v4s32, v4s32}}) + .customIf([=](const LegalityQuery &Query) { + const auto &SrcTy = Query.Types[0]; + const auto &AmtTy = Query.Types[1]; + return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 && + AmtTy.getSizeInBits() == 32; + }) + .legalFor( + {{s32, s32}, {s32, s64}, {s64, s64}, {v2s32, v2s32}, {v4s32, v4s32}}) .clampScalar(1, s32, s64) .clampScalar(0, s32, s64) .minScalarSameAs(1, 0); @@ -601,11 +608,39 @@ bool AArch64LegalizerInfo::legalizeCustom(MachineInstr &MI, case TargetOpcode::G_LOAD: case TargetOpcode::G_STORE: return legalizeLoadStore(MI, MRI, MIRBuilder, Observer); + case TargetOpcode::G_SHL: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer); } llvm_unreachable("expected switch to return"); } +bool AArch64LegalizerInfo::legalizeShlAshrLshr( + MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, + GISelChangeObserver &Observer) const { + assert(MI.getOpcode() == TargetOpcode::G_ASHR || + MI.getOpcode() == TargetOpcode::G_LSHR || + MI.getOpcode() == TargetOpcode::G_SHL); + // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the + // imported patterns can select it later. Either way, it will be legal. + Register AmtReg = MI.getOperand(2).getReg(); + auto *CstMI = MRI.getVRegDef(AmtReg); + assert(CstMI && "expected to find a vreg def"); + if (CstMI->getOpcode() != TargetOpcode::G_CONSTANT) + return true; + // Check the shift amount is in range for an immediate form. + unsigned Amount = CstMI->getOperand(1).getCImm()->getZExtValue(); + if (Amount > 31) + return true; // This will have to remain a register variant. + assert(MRI.getType(AmtReg).getSizeInBits() == 32); + MIRBuilder.setInstr(MI); + auto ExtCst = MIRBuilder.buildZExt(LLT::scalar(64), AmtReg); + MI.getOperand(2).setReg(ExtCst.getReg(0)); + return true; +} + bool AArch64LegalizerInfo::legalizeLoadStore( MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const { diff --git a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.h b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.h index 699f916b316..f3362a18620 100644 --- a/llvm/lib/Target/AArch64/AArch64LegalizerInfo.h +++ b/llvm/lib/Target/AArch64/AArch64LegalizerInfo.h @@ -37,6 +37,9 @@ private: bool legalizeLoadStore(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder, GISelChangeObserver &Observer) const; + bool legalizeShlAshrLshr(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder, + GISelChangeObserver &Observer) const; }; } // End llvm namespace. #endif diff --git a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp index 7c57d618f1a..b52259cc9ac 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp @@ -537,10 +537,6 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { case TargetOpcode::G_AND: case TargetOpcode::G_OR: case TargetOpcode::G_XOR: - // Shifts. - case TargetOpcode::G_SHL: - case TargetOpcode::G_LSHR: - case TargetOpcode::G_ASHR: // Floating point ops. case TargetOpcode::G_FADD: case TargetOpcode::G_FSUB: @@ -555,6 +551,17 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { getFPExtMapping(DstTy.getSizeInBits(), SrcTy.getSizeInBits()), /*NumOperands*/ 2); } + // Shifts. + case TargetOpcode::G_SHL: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_ASHR: { + LLT ShiftAmtTy = MRI.getType(MI.getOperand(2).getReg()); + LLT SrcTy = MRI.getType(MI.getOperand(1).getReg()); + if (ShiftAmtTy.getSizeInBits() == 64 && SrcTy.getSizeInBits() == 32) + return getInstructionMapping(DefaultMappingID, 1, + &ValMappings[Shift64Imm], 3); + return getSameKindOfOperandsMapping(MI); + } case TargetOpcode::COPY: { unsigned DstReg = MI.getOperand(0).getReg(); unsigned SrcReg = MI.getOperand(1).getReg(); diff --git a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h index 31bd36e971d..016fed65eb2 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h +++ b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.h @@ -57,6 +57,7 @@ protected: FPExt16To64Idx = 43, FPExt32To64Idx = 45, FPExt64To128Idx = 47, + Shift64Imm = 49 }; static bool checkPartialMap(unsigned Idx, unsigned ValStartIdx, |

