summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/immv216.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/immv216.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/immv216.ll193
1 files changed, 118 insertions, 75 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/immv216.ll b/llvm/test/CodeGen/AMDGPU/immv216.ll
index 4844060feba..3227633496a 100644
--- a/llvm/test/CodeGen/AMDGPU/immv216.ll
+++ b/llvm/test/CodeGen/AMDGPU/immv216.ll
@@ -1,6 +1,6 @@
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
; FIXME: Merge into imm.ll
; GCN-LABEL: {{^}}store_inline_imm_neg_0.0_v2i16:
@@ -120,11 +120,14 @@ define amdgpu_kernel void @store_literal_imm_v2f16(<2 x half> addrspace(1)* %out
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 0{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 0, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONST0:v[0-9]+]], 0
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST0]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST0]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 0
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_0.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -138,11 +141,14 @@ define amdgpu_kernel void @add_inline_imm_0.0_v2f16(<2 x half> addrspace(1)* %ou
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 0.5 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 0.5, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONST05:v[0-9]+]], 0x3800
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST05]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST05]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 0.5
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_0.5_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -156,11 +162,14 @@ define amdgpu_kernel void @add_inline_imm_0.5_v2f16(<2 x half> addrspace(1)* %ou
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -0.5 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -0.5, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONSTM05:v[0-9]+]], 0xb800
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONSTM05]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONSTM05]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], -0.5
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_neg_0.5_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -174,11 +183,14 @@ define amdgpu_kernel void @add_inline_imm_neg_0.5_v2f16(<2 x half> addrspace(1)*
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 1.0 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 1.0, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONST1:v[0-9]+]], 0x3c00
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 1.0
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_1.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -192,11 +204,15 @@ define amdgpu_kernel void @add_inline_imm_1.0_v2f16(<2 x half> addrspace(1)* %ou
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -1.0 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -1.0, [[VAL0]]
-; VI-DAG: v_mov_b32_e32 [[CONSTM1:v[0-9]+]], 0xbc00
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONSTM1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
+; VI-DAG: v_mov_b32_e32 [[CONST1:v[0-9]+]], 0xbc00
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], -1.0
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_neg_1.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -210,11 +226,14 @@ define amdgpu_kernel void @add_inline_imm_neg_1.0_v2f16(<2 x half> addrspace(1)*
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 2.0 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 2.0, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONST2:v[0-9]+]], 0x4000
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST2]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST2]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 2.0
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_2.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -228,11 +247,14 @@ define amdgpu_kernel void @add_inline_imm_2.0_v2f16(<2 x half> addrspace(1)* %ou
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -2.0 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -2.0, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONSTM2:v[0-9]+]], 0xc000
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONSTM2]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONSTM2]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], -2.0
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_neg_2.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -246,11 +268,14 @@ define amdgpu_kernel void @add_inline_imm_neg_2.0_v2f16(<2 x half> addrspace(1)*
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 4.0 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 4.0, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONST4:v[0-9]+]], 0x4400
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST4]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST4]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 4.0
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_4.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -264,11 +289,14 @@ define amdgpu_kernel void @add_inline_imm_4.0_v2f16(<2 x half> addrspace(1)* %ou
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], -4.0 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, -4.0, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONSTM4:v[0-9]+]], 0xc400
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONSTM4]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONSTM4]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], -4.0
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_neg_4.0_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -321,11 +349,14 @@ define amdgpu_kernel void @commute_add_literal_v2f16(<2 x half> addrspace(1)* %o
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 1 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 1, [[VAL0]]
-; VI-DAG: v_mov_b32_e32 [[CONST1:v[0-9]+]], 1
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
+; VI-DAG: v_mov_b32_e32 [[CONST1:v[0-9]+]], 1{{$}}
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 1{{$}}
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_1_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -339,11 +370,15 @@ define amdgpu_kernel void @add_inline_imm_1_v2f16(<2 x half> addrspace(1)* %out,
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 2 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 2, [[VAL0]]
-; VI-DAG: v_mov_b32_e32 [[CONST2:v[0-9]+]], 2
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST2]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
+; VI-DAG: v_mov_b32_e32 [[CONST2:v[0-9]+]], 2{{$}}
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST2]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 2{{$}}
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_2_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -357,11 +392,15 @@ define amdgpu_kernel void @add_inline_imm_2_v2f16(<2 x half> addrspace(1)* %out,
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 16 op_sel_hi:[1,0]{{$}}
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 16, [[VAL0]]
-; VI-DAG: v_mov_b32_e32 [[CONST16:v[0-9]+]], 16
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
+; VI-DAG: v_mov_b32_e32 [[CONST16:v[0-9]+]], 16{{$}}
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST16]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 16{{$}}
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_16_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -375,10 +414,9 @@ define amdgpu_kernel void @add_inline_imm_16_v2f16(<2 x half> addrspace(1)* %out
; GFX9: v_mov_b32_e32 [[REG:v[0-9]+]], [[VAL]]
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI: v_or_b32_e32 [[REG:v[0-9]+]]
-; VI: v_add_u32_e32 [[REG]], vcc, -1, [[REG]]
+; VI: s_load_dword [[VAL:s[0-9]+]]
+; VI: s_add_i32 [[ADD:s[0-9]+]], [[VAL]], -1{{$}}
+; VI: v_mov_b32_e32 [[REG:v[0-9]+]], [[ADD]]
; VI: buffer_store_dword [[REG]]
define amdgpu_kernel void @add_inline_imm_neg_1_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
%xbc = bitcast <2 x half> %x to i32
@@ -393,10 +431,9 @@ define amdgpu_kernel void @add_inline_imm_neg_1_v2f16(<2 x half> addrspace(1)* %
; GFX9: v_mov_b32_e32 [[REG:v[0-9]+]], [[VAL]]
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI: v_or_b32_e32 [[REG:v[0-9]+]]
-; VI: v_add_u32_e32 [[REG]], vcc, 0xfffefffe, [[REG]]
+; VI: s_load_dword [[VAL:s[0-9]+]]
+; VI: s_add_i32 [[ADD:s[0-9]+]], [[VAL]], 0xfffefffe{{$}}
+; VI: v_mov_b32_e32 [[REG:v[0-9]+]], [[ADD]]
; VI: buffer_store_dword [[REG]]
define amdgpu_kernel void @add_inline_imm_neg_2_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
%xbc = bitcast <2 x half> %x to i32
@@ -411,10 +448,10 @@ define amdgpu_kernel void @add_inline_imm_neg_2_v2f16(<2 x half> addrspace(1)* %
; GFX9: v_mov_b32_e32 [[REG:v[0-9]+]], [[VAL]]
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI: v_or_b32_e32 [[REG:v[0-9]+]]
-; VI: v_add_u32_e32 [[REG]], vcc, 0xfff0fff0, [[REG]]
+
+; VI: s_load_dword [[VAL:s[0-9]+]]
+; VI: s_add_i32 [[ADD:s[0-9]+]], [[VAL]], 0xfff0fff0{{$}}
+; VI: v_mov_b32_e32 [[REG:v[0-9]+]], [[ADD]]
; VI: buffer_store_dword [[REG]]
define amdgpu_kernel void @add_inline_imm_neg_16_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
%xbc = bitcast <2 x half> %x to i32
@@ -429,11 +466,14 @@ define amdgpu_kernel void @add_inline_imm_neg_16_v2f16(<2 x half> addrspace(1)*
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 63
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 63, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONST63:v[0-9]+]], 63
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST63]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST63]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 63
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_63_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
@@ -447,11 +487,14 @@ define amdgpu_kernel void @add_inline_imm_63_v2f16(<2 x half> addrspace(1)* %out
; GFX9: v_pk_add_f16 [[REG:v[0-9]+]], [[VAL]], 64
; GFX9: buffer_store_dword [[REG]]
-; VI: buffer_load_ushort [[VAL0:v[0-9]+]]
-; VI: buffer_load_ushort [[VAL1:v[0-9]+]]
-; VI-DAG: v_add_f16_e32 v{{[0-9]+}}, 64, [[VAL0]]
+; FIXME: Shouldn't need right shift and SDWA, also extra copy
+; VI-DAG: s_load_dword [[VAL:s[0-9]+]]
; VI-DAG: v_mov_b32_e32 [[CONST64:v[0-9]+]], 64
-; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[VAL1]], [[CONST64]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
+; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
+
+; VI-DAG: v_add_f16_sdwa v{{[0-9]+}}, [[V_SHR]], [[CONST64]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-DAG: v_add_f16_e64 v{{[0-9]+}}, [[VAL]], 64
; VI: v_or_b32
; VI: buffer_store_dword
define amdgpu_kernel void @add_inline_imm_64_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %x) #0 {
OpenPOWER on IntegriCloud