summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp14
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp9
2 files changed, 21 insertions, 2 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index be98720765e..4834d158d85 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -156,7 +156,8 @@ static bool updateOperand(FoldCandidate &Fold,
if (Fold.isImm()) {
if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked) {
- // Set op_sel_hi on this operand or bail out if op_sel is already set.
+ // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
+ // already set.
unsigned Opcode = MI->getOpcode();
int OpNo = MI->getOperandNo(&Old);
int ModIdx = -1;
@@ -172,7 +173,16 @@ static bool updateOperand(FoldCandidate &Fold,
unsigned Val = Mod.getImm();
if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
return false;
- Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
+ // If upper part is all zero we do not need op_sel_hi.
+ if (!isUInt<16>(Fold.ImmToFold)) {
+ if (!(Fold.ImmToFold & 0xffff)) {
+ Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
+ Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
+ Old.ChangeToImmediate(Fold.ImmToFold >> 16);
+ return true;
+ }
+ Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
+ }
}
Old.ChangeToImmediate(Fold.ImmToFold);
return true;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index e7e1bdbd0b1..6feca38ba68 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2339,6 +2339,15 @@ bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
}
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
+ if (isUInt<16>(Imm)) {
+ int16_t Trunc = static_cast<int16_t>(Imm);
+ return ST.has16BitInsts() &&
+ AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
+ }
+ if (!(Imm & 0xffff)) {
+ return ST.has16BitInsts() &&
+ AMDGPU::isInlinableLiteral16(Imm >> 16, ST.hasInv2PiInlineImm());
+ }
uint32_t Trunc = static_cast<uint32_t>(Imm);
return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm());
}
OpenPOWER on IntegriCloud