summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIFoldOperands.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp14
1 files changed, 9 insertions, 5 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 3d59f8d82ae..831ac5948a6 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -315,12 +315,14 @@ static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
return;
}
- APInt Imm(64, OpToFold.getImm());
const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
const TargetRegisterClass *FoldRC =
TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);
+ APInt Imm(TII->operandBitWidth(FoldDesc.OpInfo[1].OperandType),
+ OpToFold.getImm());
+
// Split 64-bit constants into 32-bits for folding.
if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
unsigned UseReg = UseOp.getReg();
@@ -329,6 +331,8 @@ static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
MRI.getRegClass(UseReg) :
TRI.getPhysRegClass(UseReg);
+ assert(Imm.getBitWidth() == 64);
+
if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
return;
@@ -505,7 +509,6 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
if (!isSafeToFold(MI))
continue;
- unsigned OpSize = TII->getOpSize(MI, 1);
MachineOperand &OpToFold = MI.getOperand(1);
bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
@@ -559,14 +562,15 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
Use = MRI.use_begin(Dst.getReg()), E = MRI.use_end();
Use != E; ++Use) {
MachineInstr *UseMI = Use->getParent();
+ unsigned OpNo = Use.getOperandNo();
- if (TII->isInlineConstant(OpToFold, OpSize)) {
- foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList,
+ if (TII->isInlineConstant(*UseMI, OpNo, OpToFold)) {
+ foldOperand(OpToFold, UseMI, OpNo, FoldList,
CopiesToReplace, TII, TRI, MRI);
} else {
if (++NumLiteralUses == 1) {
NonInlineUse = &*Use;
- NonInlineUseOpNo = Use.getOperandNo();
+ NonInlineUseOpNo = OpNo;
}
}
}
OpenPOWER on IntegriCloud