diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/imm.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/imm.ll | 136 |
1 files changed, 68 insertions, 68 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/imm.ll b/llvm/test/CodeGen/AMDGPU/imm.ll index b2c873ab73a..c2668a077b0 100644 --- a/llvm/test/CodeGen/AMDGPU/imm.ll +++ b/llvm/test/CodeGen/AMDGPU/imm.ll @@ -5,7 +5,7 @@ ; GCN-LABEL: {{^}}i64_imm_inline_lo: ; GCN: v_mov_b32_e32 v[[LO_VGPR:[0-9]+]], 5 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VGPR]]: -define void @i64_imm_inline_lo(i64 addrspace(1) *%out) { +define amdgpu_kernel void @i64_imm_inline_lo(i64 addrspace(1) *%out) { entry: store i64 1311768464867721221, i64 addrspace(1) *%out ; 0x1234567800000005 ret void @@ -15,7 +15,7 @@ entry: ; GCN-LABEL: {{^}}i64_imm_inline_hi: ; GCN: v_mov_b32_e32 v[[HI_VGPR:[0-9]+]], 5 ; GCN: buffer_store_dwordx2 v{{\[[0-9]+:}}[[HI_VGPR]] -define void @i64_imm_inline_hi(i64 addrspace(1) *%out) { +define amdgpu_kernel void @i64_imm_inline_hi(i64 addrspace(1) *%out) { entry: store i64 21780256376, i64 addrspace(1) *%out ; 0x0000000512345678 ret void @@ -25,7 +25,7 @@ entry: ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_bfrev_b32_e32 v[[HI_VREG:[0-9]+]], 1{{$}} ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) { +define amdgpu_kernel void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) { store i64 -9223372036854775808, i64 addrspace(1) *%out ret void } @@ -33,7 +33,7 @@ define void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) { ; GCN-LABEL: {{^}}store_inline_imm_neg_0.0_i32: ; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) { store i32 -2147483648, i32 addrspace(1)* %out ret void } @@ -41,7 +41,7 @@ define void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_0.0_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_0.0_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_0.0_f32(float addrspace(1)* %out) { store float 0.0, float addrspace(1)* %out ret void } @@ -49,7 +49,7 @@ define void @store_inline_imm_0.0_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_imm_neg_0.0_f32: ; GCN: v_bfrev_b32_e32 [[REG:v[0-9]+]], 1{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_imm_neg_0.0_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_imm_neg_0.0_f32(float addrspace(1)* %out) { store float -0.0, float addrspace(1)* %out ret void } @@ -57,7 +57,7 @@ define void @store_imm_neg_0.0_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_0.5_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0.5{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_0.5_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_0.5_f32(float addrspace(1)* %out) { store float 0.5, float addrspace(1)* %out ret void } @@ -65,7 +65,7 @@ define void @store_inline_imm_0.5_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_m_0.5_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -0.5{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) { store float -0.5, float addrspace(1)* %out ret void } @@ -73,7 +73,7 @@ define void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_1.0_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 1.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_1.0_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_1.0_f32(float addrspace(1)* %out) { store float 1.0, float addrspace(1)* %out ret void } @@ -81,7 +81,7 @@ define void @store_inline_imm_1.0_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_m_1.0_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -1.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) { store float -1.0, float addrspace(1)* %out ret void } @@ -89,7 +89,7 @@ define void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_2.0_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 2.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_2.0_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_2.0_f32(float addrspace(1)* %out) { store float 2.0, float addrspace(1)* %out ret void } @@ -97,7 +97,7 @@ define void @store_inline_imm_2.0_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_m_2.0_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -2.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) { store float -2.0, float addrspace(1)* %out ret void } @@ -105,7 +105,7 @@ define void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_4.0_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 4.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_4.0_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_4.0_f32(float addrspace(1)* %out) { store float 4.0, float addrspace(1)* %out ret void } @@ -113,7 +113,7 @@ define void @store_inline_imm_4.0_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_m_4.0_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], -4.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) { store float -4.0, float addrspace(1)* %out ret void } @@ -123,7 +123,7 @@ define void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) { ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0x3e22f983{{$}} ; VI: v_mov_b32_e32 [[REG:v[0-9]+]], 0.15915494{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_inv_2pi_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_inv_2pi_f32(float addrspace(1)* %out) { store float 0x3FC45F3060000000, float addrspace(1)* %out ret void } @@ -131,7 +131,7 @@ define void @store_inline_imm_inv_2pi_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_inline_imm_m_inv_2pi_f32: ; SI: v_mov_b32_e32 [[REG:v[0-9]+]], 0xbe22f983{{$}} ; GCN: buffer_store_dword [[REG]] -define void @store_inline_imm_m_inv_2pi_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_inv_2pi_f32(float addrspace(1)* %out) { store float 0xBFC45F3060000000, float addrspace(1)* %out ret void } @@ -139,7 +139,7 @@ define void @store_inline_imm_m_inv_2pi_f32(float addrspace(1)* %out) { ; GCN-LABEL: {{^}}store_literal_imm_f32: ; GCN: v_mov_b32_e32 [[REG:v[0-9]+]], 0x45800000 ; GCN: buffer_store_dword [[REG]] -define void @store_literal_imm_f32(float addrspace(1)* %out) { +define amdgpu_kernel void @store_literal_imm_f32(float addrspace(1)* %out) { store float 4096.0, float addrspace(1)* %out ret void } @@ -148,7 +148,7 @@ define void @store_literal_imm_f32(float addrspace(1)* %out) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0.0 store float %y, float addrspace(1)* %out ret void @@ -158,7 +158,7 @@ define void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 0.5{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0.5 store float %y, float addrspace(1)* %out ret void @@ -168,7 +168,7 @@ define void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -0.5{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, -0.5 store float %y, float addrspace(1)* %out ret void @@ -178,7 +178,7 @@ define void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 1.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 1.0 store float %y, float addrspace(1)* %out ret void @@ -188,7 +188,7 @@ define void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -1.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, -1.0 store float %y, float addrspace(1)* %out ret void @@ -198,7 +198,7 @@ define void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 2.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 2.0 store float %y, float addrspace(1)* %out ret void @@ -208,7 +208,7 @@ define void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -2.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, -2.0 store float %y, float addrspace(1)* %out ret void @@ -218,7 +218,7 @@ define void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 4.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 4.0 store float %y, float addrspace(1)* %out ret void @@ -228,7 +228,7 @@ define void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -4.0{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_neg_4.0_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_neg_4.0_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, -4.0 store float %y, float addrspace(1)* %out ret void @@ -238,7 +238,7 @@ define void @add_inline_imm_neg_4.0_f32(float addrspace(1)* %out, float %x) { ; GCN: buffer_load_dword [[VAL:v[0-9]+]] ; GCN: v_add_f32_e32 [[REG:v[0-9]+]], 0.5, [[VAL]] ; GCN: buffer_store_dword [[REG]] -define void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addrspace(1)* %in) { +define amdgpu_kernel void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addrspace(1)* %in) { %x = load float, float addrspace(1)* %in %y = fadd float %x, 0.5 store float %y, float addrspace(1)* %out @@ -249,7 +249,7 @@ define void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addr ; GCN: buffer_load_dword [[VAL:v[0-9]+]] ; GCN: v_add_f32_e32 [[REG:v[0-9]+]], 0x44800000, [[VAL]] ; GCN: buffer_store_dword [[REG]] -define void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1)* %in) { +define amdgpu_kernel void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1)* %in) { %x = load float, float addrspace(1)* %in %y = fadd float %x, 1024.0 store float %y, float addrspace(1)* %out @@ -260,7 +260,7 @@ define void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1 ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 1{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_1_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_1_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0x36a0000000000000 store float %y, float addrspace(1)* %out ret void @@ -270,7 +270,7 @@ define void @add_inline_imm_1_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 2{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_2_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_2_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0x36b0000000000000 store float %y, float addrspace(1)* %out ret void @@ -280,7 +280,7 @@ define void @add_inline_imm_2_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 16 ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_16_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_16_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0x36e0000000000000 store float %y, float addrspace(1)* %out ret void @@ -290,7 +290,7 @@ define void @add_inline_imm_16_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -1{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_neg_1_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_neg_1_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0xffffffffe0000000 store float %y, float addrspace(1)* %out ret void @@ -300,7 +300,7 @@ define void @add_inline_imm_neg_1_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -2{{$}} ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_neg_2_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_neg_2_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0xffffffffc0000000 store float %y, float addrspace(1)* %out ret void @@ -310,7 +310,7 @@ define void @add_inline_imm_neg_2_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], -16 ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_neg_16_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_neg_16_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0xfffffffe00000000 store float %y, float addrspace(1)* %out ret void @@ -320,7 +320,7 @@ define void @add_inline_imm_neg_16_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 63 ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_63_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_63_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0x36ff800000000000 store float %y, float addrspace(1)* %out ret void @@ -330,7 +330,7 @@ define void @add_inline_imm_63_f32(float addrspace(1)* %out, float %x) { ; GCN: s_load_dword [[VAL:s[0-9]+]] ; GCN: v_add_f32_e64 [[REG:v[0-9]+]], [[VAL]], 64 ; GCN: buffer_store_dword [[REG]] -define void @add_inline_imm_64_f32(float addrspace(1)* %out, float %x) { +define amdgpu_kernel void @add_inline_imm_64_f32(float addrspace(1)* %out, float %x) { %y = fadd float %x, 0x3700000000000000 store float %y, float addrspace(1)* %out ret void @@ -342,7 +342,7 @@ define void @add_inline_imm_64_f32(float addrspace(1)* %out, float %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 0{{$}} ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_0.0_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_0.0_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0.0 store double %y, double addrspace(1)* %out ret void @@ -353,7 +353,7 @@ define void @add_inline_imm_0.0_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 0.5 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_0.5_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_0.5_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0.5 store double %y, double addrspace(1)* %out ret void @@ -364,7 +364,7 @@ define void @add_inline_imm_0.5_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -0.5 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_neg_0.5_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_neg_0.5_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, -0.5 store double %y, double addrspace(1)* %out ret void @@ -375,7 +375,7 @@ define void @add_inline_imm_neg_0.5_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 1.0 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_1.0_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_1.0_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 1.0 store double %y, double addrspace(1)* %out ret void @@ -386,7 +386,7 @@ define void @add_inline_imm_1.0_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -1.0 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_neg_1.0_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_neg_1.0_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, -1.0 store double %y, double addrspace(1)* %out ret void @@ -397,7 +397,7 @@ define void @add_inline_imm_neg_1.0_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 2.0 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_2.0_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_2.0_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 2.0 store double %y, double addrspace(1)* %out ret void @@ -408,7 +408,7 @@ define void @add_inline_imm_2.0_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -2.0 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_neg_2.0_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_neg_2.0_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, -2.0 store double %y, double addrspace(1)* %out ret void @@ -419,7 +419,7 @@ define void @add_inline_imm_neg_2.0_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 4.0 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_4.0_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_4.0_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 4.0 store double %y, double addrspace(1)* %out ret void @@ -430,7 +430,7 @@ define void @add_inline_imm_4.0_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -4.0 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_neg_4.0_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_neg_4.0_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, -4.0 store double %y, double addrspace(1)* %out ret void @@ -445,7 +445,7 @@ define void @add_inline_imm_neg_4.0_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; VI: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 0.15915494{{$}} ; VI: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_inv_2pi_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_inv_2pi_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0x3fc45f306dc9c882 store double %y, double addrspace(1)* %out ret void @@ -455,7 +455,7 @@ define void @add_inline_imm_inv_2pi_f64(double addrspace(1)* %out, double %x) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0x6dc9c882 ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbfc45f30 ; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @add_m_inv_2pi_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_m_inv_2pi_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0xbfc45f306dc9c882 store double %y, double addrspace(1)* %out ret void @@ -466,7 +466,7 @@ define void @add_m_inv_2pi_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 1{{$}} ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_1_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_1_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0x0000000000000001 store double %y, double addrspace(1)* %out ret void @@ -477,7 +477,7 @@ define void @add_inline_imm_1_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 2{{$}} ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_2_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_2_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0x0000000000000002 store double %y, double addrspace(1)* %out ret void @@ -488,7 +488,7 @@ define void @add_inline_imm_2_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 16 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_16_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_16_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0x0000000000000010 store double %y, double addrspace(1)* %out ret void @@ -499,7 +499,7 @@ define void @add_inline_imm_16_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -1 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_neg_1_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_neg_1_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0xffffffffffffffff store double %y, double addrspace(1)* %out ret void @@ -510,7 +510,7 @@ define void @add_inline_imm_neg_1_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -2 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_neg_2_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_neg_2_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0xfffffffffffffffe store double %y, double addrspace(1)* %out ret void @@ -521,7 +521,7 @@ define void @add_inline_imm_neg_2_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], -16 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_neg_16_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_neg_16_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0xfffffffffffffff0 store double %y, double addrspace(1)* %out ret void @@ -532,7 +532,7 @@ define void @add_inline_imm_neg_16_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 63 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_63_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_63_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0x000000000000003F store double %y, double addrspace(1)* %out ret void @@ -543,7 +543,7 @@ define void @add_inline_imm_63_f64(double addrspace(1)* %out, double %x) { ; VI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0x2c ; GCN: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], [[VAL]], 64 ; GCN: buffer_store_dwordx2 [[REG]] -define void @add_inline_imm_64_f64(double addrspace(1)* %out, double %x) { +define amdgpu_kernel void @add_inline_imm_64_f64(double addrspace(1)* %out, double %x) { %y = fadd double %x, 0x0000000000000040 store double %y, double addrspace(1)* %out ret void @@ -554,7 +554,7 @@ define void @add_inline_imm_64_f64(double addrspace(1)* %out, double %x) { ; GCN: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0 ; GCN: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], v[[LO_VREG]]{{$}} ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_0.0_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_0.0_f64(double addrspace(1)* %out) { store double 0.0, double addrspace(1)* %out ret void } @@ -564,7 +564,7 @@ define void @store_inline_imm_0.0_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_bfrev_b32_e32 v[[HI_VREG:[0-9]+]], 1{{$}} ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_literal_imm_neg_0.0_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_literal_imm_neg_0.0_f64(double addrspace(1)* %out) { store double -0.0, double addrspace(1)* %out ret void } @@ -573,7 +573,7 @@ define void @store_literal_imm_neg_0.0_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x3fe00000 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_0.5_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_0.5_f64(double addrspace(1)* %out) { store double 0.5, double addrspace(1)* %out ret void } @@ -582,7 +582,7 @@ define void @store_inline_imm_0.5_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbfe00000 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_m_0.5_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_0.5_f64(double addrspace(1)* %out) { store double -0.5, double addrspace(1)* %out ret void } @@ -591,7 +591,7 @@ define void @store_inline_imm_m_0.5_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x3ff00000 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_1.0_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_1.0_f64(double addrspace(1)* %out) { store double 1.0, double addrspace(1)* %out ret void } @@ -600,7 +600,7 @@ define void @store_inline_imm_1.0_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbff00000 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_m_1.0_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_1.0_f64(double addrspace(1)* %out) { store double -1.0, double addrspace(1)* %out ret void } @@ -609,7 +609,7 @@ define void @store_inline_imm_m_1.0_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 2.0 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_2.0_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_2.0_f64(double addrspace(1)* %out) { store double 2.0, double addrspace(1)* %out ret void } @@ -618,7 +618,7 @@ define void @store_inline_imm_2.0_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], -2.0 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_m_2.0_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_2.0_f64(double addrspace(1)* %out) { store double -2.0, double addrspace(1)* %out ret void } @@ -627,7 +627,7 @@ define void @store_inline_imm_m_2.0_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x40100000 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_4.0_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_4.0_f64(double addrspace(1)* %out) { store double 4.0, double addrspace(1)* %out ret void } @@ -636,7 +636,7 @@ define void @store_inline_imm_4.0_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xc0100000 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_m_4.0_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_4.0_f64(double addrspace(1)* %out) { store double -4.0, double addrspace(1)* %out ret void } @@ -645,7 +645,7 @@ define void @store_inline_imm_m_4.0_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0x6dc9c882 ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x3fc45f30 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inv_2pi_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inv_2pi_f64(double addrspace(1)* %out) { store double 0x3fc45f306dc9c882, double addrspace(1)* %out ret void } @@ -654,7 +654,7 @@ define void @store_inv_2pi_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0x6dc9c882 ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0xbfc45f30 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_inline_imm_m_inv_2pi_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_inline_imm_m_inv_2pi_f64(double addrspace(1)* %out) { store double 0xbfc45f306dc9c882, double addrspace(1)* %out ret void } @@ -663,7 +663,7 @@ define void @store_inline_imm_m_inv_2pi_f64(double addrspace(1)* %out) { ; GCN-DAG: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], 0{{$}} ; GCN-DAG: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], 0x40b00000 ; GCN: buffer_store_dwordx2 v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} -define void @store_literal_imm_f64(double addrspace(1)* %out) { +define amdgpu_kernel void @store_literal_imm_f64(double addrspace(1)* %out) { store double 4096.0, double addrspace(1)* %out ret void } |