diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/imm16.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/imm16.ll | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/imm16.ll b/llvm/test/CodeGen/AMDGPU/imm16.ll index 2e73eb06502..e42d5879189 100644 --- a/llvm/test/CodeGen/AMDGPU/imm16.ll +++ b/llvm/test/CodeGen/AMDGPU/imm16.ll @@ -7,7 +7,7 @@ ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x8000{{$}} ; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff8000{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_neg_0.0_i16(i16 addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_neg_0.0_i16(i16 addrspace(1)* %out) { store volatile i16 -32768, i16 addrspace(1)* %out ret void } @@ -15,7 +15,7 @@ define void @store_inline_imm_neg_0.0_i16(i16 addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_0.0_f16: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_0.0_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_0.0_f16(half addrspace(1)* %out) { store half 0.0, half addrspace(1)* %out ret void } @@ -24,7 +24,7 @@ define void @store_inline_imm_0.0_f16(half addrspace(1)* %out) { ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x8000{{$}} ; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffff8000{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_imm_neg_0.0_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_imm_neg_0.0_f16(half addrspace(1)* %out) { store half -0.0, half addrspace(1)* %out ret void } @@ -32,7 +32,7 @@ define void @store_imm_neg_0.0_f16(half addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_0.5_f16: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3800{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_0.5_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_0.5_f16(half addrspace(1)* %out) { store half 0.5, half addrspace(1)* %out ret void } @@ -41,7 +41,7 @@ define void @store_inline_imm_0.5_f16(half addrspace(1)* %out) { ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xb800{{$}} ; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffb800{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_m_0.5_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_0.5_f16(half addrspace(1)* %out) { store half -0.5, half addrspace(1)* %out ret void } @@ -49,7 +49,7 @@ define void @store_inline_imm_m_0.5_f16(half addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_1.0_f16: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3c00{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_1.0_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_1.0_f16(half addrspace(1)* %out) { store half 1.0, half addrspace(1)* %out ret void } @@ -58,7 +58,7 @@ define void @store_inline_imm_1.0_f16(half addrspace(1)* %out) { ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xbc00{{$}} ; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffbc00{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_m_1.0_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_1.0_f16(half addrspace(1)* %out) { store half -1.0, half addrspace(1)* %out ret void } @@ -66,7 +66,7 @@ define void @store_inline_imm_m_1.0_f16(half addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_2.0_f16: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4000{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_2.0_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_2.0_f16(half addrspace(1)* %out) { store half 2.0, half addrspace(1)* %out ret void } @@ -75,7 +75,7 @@ define void @store_inline_imm_2.0_f16(half addrspace(1)* %out) { ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xc000{{$}} ; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffc000{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_m_2.0_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_2.0_f16(half addrspace(1)* %out) { store half -2.0, half addrspace(1)* %out ret void } @@ -83,7 +83,7 @@ define void @store_inline_imm_m_2.0_f16(half addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_4.0_f16: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x4400{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_4.0_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_4.0_f16(half addrspace(1)* %out) { store half 4.0, half addrspace(1)* %out ret void } @@ -92,7 +92,7 @@ define void @store_inline_imm_4.0_f16(half addrspace(1)* %out) { ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xc400{{$}} ; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffc400{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_m_4.0_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_4.0_f16(half addrspace(1)* %out) { store half -4.0, half addrspace(1)* %out ret void } @@ -101,7 +101,7 @@ define void @store_inline_imm_m_4.0_f16(half addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_inv_2pi_f16: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3118{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_inv_2pi_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_inv_2pi_f16(half addrspace(1)* %out) { store half 0xH3118, half addrspace(1)* %out ret void } @@ -110,7 +110,7 @@ define void @store_inline_imm_inv_2pi_f16(half addrspace(1)* %out) { ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xb118{{$}} ; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xffffb118{{$}} ; GCN: buffer_store_short [[REG]] -define void @store_inline_imm_m_inv_2pi_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_inv_2pi_f16(half addrspace(1)* %out) { store half 0xHB118, half addrspace(1)* %out ret void } @@ -118,7 +118,7 @@ define void @store_inline_imm_m_inv_2pi_f16(half addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_literal_imm_f16: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x6c00 ; GCN: buffer_store_short [[REG]] -define void @store_literal_imm_f16(half addrspace(1)* %out) { +define amdgpu_kernel void @store_literal_imm_f16(half addrspace(1)* %out) { store half 4096.0, half addrspace(1)* %out ret void } @@ -127,7 +127,7 @@ define void @store_literal_imm_f16(half addrspace(1)* %out) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 0, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_0.0_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_0.0_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0.0 store half %y, half addrspace(1)* %out ret void @@ -137,7 +137,7 @@ define void @add_inline_imm_0.0_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 0.5, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_0.5_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_0.5_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0.5 store half %y, half addrspace(1)* %out ret void @@ -147,7 +147,7 @@ define void @add_inline_imm_0.5_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], -0.5, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_neg_0.5_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_neg_0.5_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, -0.5 store half %y, half addrspace(1)* %out ret void @@ -157,7 +157,7 @@ define void @add_inline_imm_neg_0.5_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 1.0, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_1.0_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_1.0_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 1.0 store half %y, half addrspace(1)* %out ret void @@ -167,7 +167,7 @@ define void @add_inline_imm_1.0_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], -1.0, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_neg_1.0_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_neg_1.0_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, -1.0 store half %y, half addrspace(1)* %out ret void @@ -177,7 +177,7 @@ define void @add_inline_imm_neg_1.0_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 2.0, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_2.0_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_2.0_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 2.0 store half %y, half addrspace(1)* %out ret void @@ -187,7 +187,7 @@ define void @add_inline_imm_2.0_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], -2.0, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_neg_2.0_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_neg_2.0_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, -2.0 store half %y, half addrspace(1)* %out ret void @@ -197,7 +197,7 @@ define void @add_inline_imm_neg_2.0_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 4.0, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_4.0_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_4.0_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 4.0 store half %y, half addrspace(1)* %out ret void @@ -207,7 +207,7 @@ define void @add_inline_imm_4.0_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], -4.0, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_neg_4.0_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_neg_4.0_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, -4.0 store half %y, half addrspace(1)* %out ret void @@ -217,7 +217,7 @@ define void @add_inline_imm_neg_4.0_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 0.5, [[VAL]] ; VI: buffer_store_short [[REG]] -define void @commute_add_inline_imm_0.5_f16(half addrspace(1)* %out, half addrspace(1)* %in) { +define amdgpu_kernel void @commute_add_inline_imm_0.5_f16(half addrspace(1)* %out, half addrspace(1)* %in) { %x = load half, half addrspace(1)* %in %y = fadd half %x, 0.5 store half %y, half addrspace(1)* %out @@ -228,7 +228,7 @@ define void @commute_add_inline_imm_0.5_f16(half addrspace(1)* %out, half addrsp ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 0x6400, [[VAL]] ; VI: buffer_store_short [[REG]] -define void @commute_add_literal_f16(half addrspace(1)* %out, half addrspace(1)* %in) { +define amdgpu_kernel void @commute_add_literal_f16(half addrspace(1)* %out, half addrspace(1)* %in) { %x = load half, half addrspace(1)* %in %y = fadd half %x, 1024.0 store half %y, half addrspace(1)* %out @@ -239,7 +239,7 @@ define void @commute_add_literal_f16(half addrspace(1)* %out, half addrspace(1)* ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 1, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_1_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_1_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0xH0001 store half %y, half addrspace(1)* %out ret void @@ -249,7 +249,7 @@ define void @add_inline_imm_1_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 2, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_2_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_2_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0xH0002 store half %y, half addrspace(1)* %out ret void @@ -259,7 +259,7 @@ define void @add_inline_imm_2_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 16, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_16_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_16_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0xH0010 store half %y, half addrspace(1)* %out ret void @@ -269,7 +269,7 @@ define void @add_inline_imm_16_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], -1, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_neg_1_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_neg_1_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0xHFFFF store half %y, half addrspace(1)* %out ret void @@ -279,7 +279,7 @@ define void @add_inline_imm_neg_1_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], -2, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_neg_2_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_neg_2_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0xHFFFE store half %y, half addrspace(1)* %out ret void @@ -289,7 +289,7 @@ define void @add_inline_imm_neg_2_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], -16, [[VAL]]{{$}} ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_neg_16_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_neg_16_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0xHFFF0 store half %y, half addrspace(1)* %out ret void @@ -299,7 +299,7 @@ define void @add_inline_imm_neg_16_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 63, [[VAL]] ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_63_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_63_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0xH003F store half %y, half addrspace(1)* %out ret void @@ -309,7 +309,7 @@ define void @add_inline_imm_63_f16(half addrspace(1)* %out, half %x) { ; VI: buffer_load_ushort [[VAL:v[0-9]+]] ; VI: v_add_f16_e32 [[REG:v[0-9]+]], 64, [[VAL]] ; VI: buffer_store_short [[REG]] -define void @add_inline_imm_64_f16(half addrspace(1)* %out, half %x) { +define amdgpu_kernel void @add_inline_imm_64_f16(half addrspace(1)* %out, half %x) { %y = fadd half %x, 0xH0040 store half %y, half addrspace(1)* %out ret void |