diff options
| author | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2018-04-19 21:16:50 +0000 |
|---|---|---|
| committer | Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> | 2018-04-19 21:16:50 +0000 |
| commit | 160f85794de398bf488edf6b97c0bc1eb5754c80 (patch) | |
| tree | c10554e020f7b3f722a7db73bf63f0617575cba5 | |
| parent | bf26d54047fde7a750e41f0707ace9ba2517322a (diff) | |
| download | bcm5719-llvm-160f85794de398bf488edf6b97c0bc1eb5754c80.tar.gz bcm5719-llvm-160f85794de398bf488edf6b97c0bc1eb5754c80.zip | |
[AMDGPU] Use packed literals with zero either lower or hi part
Differential Revision: https://reviews.llvm.org/D45790
llvm-svn: 330365
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 14 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 9 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/add.v2i16.ll | 3 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/immv216.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/pk_max_f16_literal.ll | 95 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/sub.v2i16.ll | 3 |
6 files changed, 119 insertions, 7 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index be98720765e..4834d158d85 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -156,7 +156,8 @@ static bool updateOperand(FoldCandidate &Fold, if (Fold.isImm()) { if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked) { - // Set op_sel_hi on this operand or bail out if op_sel is already set. + // Set op_sel/op_sel_hi on this operand or bail out if op_sel is + // already set. unsigned Opcode = MI->getOpcode(); int OpNo = MI->getOperandNo(&Old); int ModIdx = -1; @@ -172,7 +173,16 @@ static bool updateOperand(FoldCandidate &Fold, unsigned Val = Mod.getImm(); if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1)) return false; - Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); + // If upper part is all zero we do not need op_sel_hi. + if (!isUInt<16>(Fold.ImmToFold)) { + if (!(Fold.ImmToFold & 0xffff)) { + Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0); + Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); + Old.ChangeToImmediate(Fold.ImmToFold >> 16); + return true; + } + Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1); + } } Old.ChangeToImmediate(Fold.ImmToFold); return true; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index e7e1bdbd0b1..6feca38ba68 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -2339,6 +2339,15 @@ bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, } case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: { + if (isUInt<16>(Imm)) { + int16_t Trunc = static_cast<int16_t>(Imm); + return ST.has16BitInsts() && + AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); + } + if (!(Imm & 0xffff)) { + return ST.has16BitInsts() && + AMDGPU::isInlinableLiteral16(Imm >> 16, ST.hasInv2PiInlineImm()); + } uint32_t Trunc = static_cast<uint32_t>(Imm); return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); } diff --git a/llvm/test/CodeGen/AMDGPU/add.v2i16.ll b/llvm/test/CodeGen/AMDGPU/add.v2i16.ll index aa5026aba36..c0bf7ba70a1 100644 --- a/llvm/test/CodeGen/AMDGPU/add.v2i16.ll +++ b/llvm/test/CodeGen/AMDGPU/add.v2i16.ll @@ -115,8 +115,7 @@ define amdgpu_kernel void @v_test_add_v2i16_inline_neg1(<2 x i16> addrspace(1)* } ; GCN-LABEL: {{^}}v_test_add_v2i16_inline_lo_zero_hi: -; GFX9: s_mov_b32 [[K:s[0-9]+]], 32{{$}} -; GFX9: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, [[K]]{{$}} +; GFX9: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, 32{{$}} ; VI-NOT: v_add_u16 ; VI: v_add_u16_e32 v{{[0-9]+}}, 32, v{{[0-9]+}} diff --git a/llvm/test/CodeGen/AMDGPU/immv216.ll b/llvm/test/CodeGen/AMDGPU/immv216.ll index 714aa66d277..4844060feba 100644 --- a/llvm/test/CodeGen/AMDGPU/immv216.ll +++ b/llvm/test/CodeGen/AMDGPU/immv216.ll @@ -117,7 +117,7 @@ define amdgpu_kernel void @store_literal_imm_v2f16(<2 x half> addrspace(1)* %out ; GCN-LABEL: {{^}}add_inline_imm_0.0_v2f16: ; GFX9: s_load_dword [[VAL:s[0-9]+]] -; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 0 op_sel_hi:[1,0]{{$}} +; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 0{{$}} ; GFX9: buffer_store_dword [[REG]] ; VI: buffer_load_ushort [[VAL0:v[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/pk_max_f16_literal.ll b/llvm/test/CodeGen/AMDGPU/pk_max_f16_literal.ll new file mode 100644 index 00000000000..f19afa5df30 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/pk_max_f16_literal.ll @@ -0,0 +1,95 @@ +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s + +; GCN-LABEL: {{^}}test_pk_max_f16_literal_0_1: +; GFX9: v_pk_max_f16 v{{[0-9]+}}, v{{[0-9]+}}, 1.0 op_sel:[0,1] op_sel_hi:[1,0]{{$}} +define amdgpu_kernel void @test_pk_max_f16_literal_0_1(<2 x half> addrspace(1)* nocapture %arg) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i64 %tmp1 + %tmp3 = load <2 x half>, <2 x half> addrspace(1)* %tmp2, align 4 + %tmp4 = tail call <2 x half> @llvm.maxnum.v2f16(<2 x half> %tmp3, <2 x half> <half 0xH0000, half 0xH3C00>) + store <2 x half> %tmp4, <2 x half> addrspace(1)* %tmp2, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_pk_max_f16_literal_1_0: +; GFX9: v_pk_max_f16 v{{[0-9]+}}, v{{[0-9]+}}, 1.0{{$}} +define amdgpu_kernel void @test_pk_max_f16_literal_1_0(<2 x half> addrspace(1)* nocapture %arg) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i64 %tmp1 + %tmp3 = load <2 x half>, <2 x half> addrspace(1)* %tmp2, align 4 + %tmp4 = tail call <2 x half> @llvm.maxnum.v2f16(<2 x half> %tmp3, <2 x half> <half 0xH3C00, half 0xH0000>) + store <2 x half> %tmp4, <2 x half> addrspace(1)* %tmp2, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_pk_max_f16_literal_1_1: +; GFX9: v_pk_max_f16 v{{[0-9]+}}, v{{[0-9]+}}, 1.0 op_sel_hi:[1,0]{{$}} +define amdgpu_kernel void @test_pk_max_f16_literal_1_1(<2 x half> addrspace(1)* nocapture %arg) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i64 %tmp1 + %tmp3 = load <2 x half>, <2 x half> addrspace(1)* %tmp2, align 4 + %tmp4 = tail call <2 x half> @llvm.maxnum.v2f16(<2 x half> %tmp3, <2 x half> <half 0xH3C00, half 0xH3C00>) + store <2 x half> %tmp4, <2 x half> addrspace(1)* %tmp2, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_pk_max_f16_literal_0_m1: +; GFX9: v_pk_max_f16 v{{[0-9]+}}, -1.0, v{{[0-9]+}} op_sel:[1,0] op_sel_hi:[0,1]{{$}} +define amdgpu_kernel void @test_pk_max_f16_literal_0_m1(<2 x half> addrspace(1)* nocapture %arg) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i64 %tmp1 + %tmp3 = load <2 x half>, <2 x half> addrspace(1)* %tmp2, align 4 + %tmp4 = tail call <2 x half> @llvm.maxnum.v2f16(<2 x half> %tmp3, <2 x half> <half 0xH0000, half 0xHBC00>) + store <2 x half> %tmp4, <2 x half> addrspace(1)* %tmp2, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_pk_max_f16_literal_m1_0: +; GFX9: v_pk_max_f16 v{{[0-9]+}}, v{{[0-9]+}}, -1.0{{$}} +define amdgpu_kernel void @test_pk_max_f16_literal_m1_0(<2 x half> addrspace(1)* nocapture %arg) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i64 %tmp1 + %tmp3 = load <2 x half>, <2 x half> addrspace(1)* %tmp2, align 4 + %tmp4 = tail call <2 x half> @llvm.maxnum.v2f16(<2 x half> %tmp3, <2 x half> <half 0xHBC00, half 0xH0000>) + store <2 x half> %tmp4, <2 x half> addrspace(1)* %tmp2, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_pk_max_f16_literal_m1_m1: +; GFX9: v_pk_max_f16 v{{[0-9]+}}, v{{[0-9]+}}, -1.0 op_sel_hi:[1,0]{{$}} +define amdgpu_kernel void @test_pk_max_f16_literal_m1_m1(<2 x half> addrspace(1)* nocapture %arg) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i64 %tmp1 + %tmp3 = load <2 x half>, <2 x half> addrspace(1)* %tmp2, align 4 + %tmp4 = tail call <2 x half> @llvm.maxnum.v2f16(<2 x half> %tmp3, <2 x half> <half 0xHBC00, half 0xHBC00>) + store <2 x half> %tmp4, <2 x half> addrspace(1)* %tmp2, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_pk_max_f16_literal_0_0: +; GFX9: v_pk_max_f16 v{{[0-9]+}}, v{{[0-9]+}}, 0{{$}} +define amdgpu_kernel void @test_pk_max_f16_literal_0_0(<2 x half> addrspace(1)* nocapture %arg) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = zext i32 %tmp to i64 + %tmp2 = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i64 %tmp1 + %tmp3 = load <2 x half>, <2 x half> addrspace(1)* %tmp2, align 4 + %tmp4 = tail call <2 x half> @llvm.maxnum.v2f16(<2 x half> %tmp3, <2 x half> <half 0xH0000, half 0xH0000>) + store <2 x half> %tmp4, <2 x half> addrspace(1)* %tmp2, align 4 + ret void +} + +declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>) +declare i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll index 4f149308e62..a608ef715c5 100644 --- a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll +++ b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll @@ -112,8 +112,7 @@ define amdgpu_kernel void @v_test_sub_v2i16_inline_neg1(<2 x i16> addrspace(1)* } ; GCN-LABEL: {{^}}v_test_sub_v2i16_inline_lo_zero_hi: -; GFX9: s_mov_b32 [[K:s[0-9]+]], 32{{$}} -; GFX9: v_pk_sub_i16 v{{[0-9]+}}, v{{[0-9]+}}, [[K]] +; GFX9: v_pk_sub_i16 v{{[0-9]+}}, v{{[0-9]+}}, 32{{$}} ; VI-NOT: v_subrev_i16 ; VI: v_add_u16_e32 v{{[0-9]+}}, 0xffffffe0, v{{[0-9]+}} |

