diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll | 190 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll | 160 |
2 files changed, 334 insertions, 16 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll index 737be5d0044..c81a0a48726 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fcmp.ll @@ -1,10 +1,13 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s declare i64 @llvm.amdgcn.fcmp.f32(float, float, i32) #0 declare i64 @llvm.amdgcn.fcmp.f64(double, double, i32) #0 declare float @llvm.fabs.f32(float) #0 +declare i64 @llvm.amdgcn.fcmp.f16(half, half, i32) #0 +declare half @llvm.fabs.f16(half) #0 + ; GCN-LABEL: {{^}}v_fcmp_f32_dynamic_cc: ; GCN: s_endpgm define amdgpu_kernel void @v_fcmp_f32_dynamic_cc(i64 addrspace(1)* %out, float %src0, float %src1, i32 %cc) { @@ -32,9 +35,9 @@ define amdgpu_kernel void @v_fcmp_f32_oeq_both_operands_with_fabs(i64 addrspace( ret void } -; GCN-LABEL: {{^}}v_fcmp: +; GCN-LABEL: {{^}}v_fcmp_f32: ; GCN-NOT: v_cmp_eq_f32_e64 -define amdgpu_kernel void @v_fcmp(i64 addrspace(1)* %out, float %src) { +define amdgpu_kernel void @v_fcmp_f32(i64 addrspace(1)* %out, float %src) { %result = call i64 @llvm.amdgcn.fcmp.f32(float %src, float 100.00, i32 -1) store i64 %result, i64 addrspace(1)* %out ret void @@ -233,4 +236,183 @@ define amdgpu_kernel void @v_fcmp_f64_ule(i64 addrspace(1)* %out, double %src) { ret void } +; GCN-LABEL: {{^}}v_fcmp_f16_oeq_with_fabs: +; VI: v_cmp_eq_f16_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}}, |{{v[0-9]+}}| + +; SI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], s{{[0-9]+}} +; SI: v_cvt_f32_f16_e64 [[CVT1:v[0-9]+]], |s{{[0-9]+}}| +; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT0]], [[CVT1]] +define amdgpu_kernel void @v_fcmp_f16_oeq_with_fabs(i64 addrspace(1)* %out, half %src, half %a) { + %temp = call half @llvm.fabs.f16(half %a) + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half %temp, i32 1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_oeq_both_operands_with_fabs: +; VI: v_cmp_eq_f16_e64 {{s\[[0-9]+:[0-9]+\]}}, |{{s[0-9]+}}|, |{{v[0-9]+}}| + +; SI: v_cvt_f32_f16_e64 [[CVT0:v[0-9]+]], |s{{[0-9]+}}| +; SI: v_cvt_f32_f16_e64 [[CVT1:v[0-9]+]], |s{{[0-9]+}}| +; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT0]], [[CVT1]] +define amdgpu_kernel void @v_fcmp_f16_oeq_both_operands_with_fabs(i64 addrspace(1)* %out, half %src, half %a) { + %temp = call half @llvm.fabs.f16(half %a) + %src_input = call half @llvm.fabs.f16(half %src) + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src_input, half %temp, i32 1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16: +; GCN-NOT: v_cmp_eq_ +define amdgpu_kernel void @v_fcmp_f16(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 -1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_oeq: +; VI: v_cmp_eq_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_eq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_oeq(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_one: +; VI: v_cmp_neq_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_neq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_one(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 6) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ogt: +; VI: v_cmp_gt_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_gt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ogt(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 2) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_oge: +; VI: v_cmp_ge_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_ge_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_oge(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 3) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_olt: +; VI: v_cmp_lt_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_lt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_olt(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 4) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ole: +; VI: v_cmp_le_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_le_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ole(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 5) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ueq: +; VI: v_cmp_nlg_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_nlg_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ueq(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 9) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_une: +; VI: v_cmp_neq_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_neq_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_une(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 14) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ugt: +; VI: v_cmp_nle_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_nle_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ugt(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 10) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_uge: +; VI: v_cmp_nlt_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_nlt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_uge(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 11) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ult: +; VI: v_cmp_nge_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_nge_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ult(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 12) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_fcmp_f16_ule: +; VI: v_cmp_ngt_f16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000 +; SI-DAG: v_cvt_f32_f16_e32 [[CVT:v[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_ngt_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_fcmp_f16_ule(i64 addrspace(1)* %out, half %src) { + %result = call i64 @llvm.amdgcn.fcmp.f16(half %src, half 100.00, i32 13) + store i64 %result, i64 addrspace(1)* %out + ret void +} + attributes #0 = { nounwind readnone convergent } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll index aa04af7a64a..584fb43fd8e 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll @@ -1,8 +1,9 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) #0 declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32) #0 +declare i64 @llvm.amdgcn.icmp.i16(i16, i16, i32) #0 ; No crash on invalid input ; GCN-LABEL: {{^}}v_icmp_i32_dynamic_cc: @@ -21,13 +22,14 @@ define amdgpu_kernel void @v_icmp_i32_eq(i64 addrspace(1)* %out, i32 %src) { ret void } -; GCN-LABEL: {{^}}v_icmp: +; GCN-LABEL: {{^}}v_icmp_i32: ; GCN-NOT: v_cmp_eq_u32_e64 -define amdgpu_kernel void @v_icmp(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 30) store i64 %result, i64 addrspace(1)* %out ret void } + ; GCN-LABEL: {{^}}v_icmp_i32_ne: ; GCN: v_cmp_ne_u32_e64 define amdgpu_kernel void @v_icmp_i32_ne(i64 addrspace(1)* %out, i32 %src) { @@ -36,33 +38,33 @@ define amdgpu_kernel void @v_icmp_i32_ne(i64 addrspace(1)* %out, i32 %src) { ret void } -; GCN-LABEL: {{^}}v_icmp_u32_ugt: +; GCN-LABEL: {{^}}v_icmp_i32_ugt: ; GCN: v_cmp_gt_u32_e64 -define amdgpu_kernel void @v_icmp_u32_ugt(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32_ugt(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 34) store i64 %result, i64 addrspace(1)* %out ret void } -; GCN-LABEL: {{^}}v_icmp_u32_uge: +; GCN-LABEL: {{^}}v_icmp_i32_uge: ; GCN: v_cmp_ge_u32_e64 -define amdgpu_kernel void @v_icmp_u32_uge(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32_uge(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 35) store i64 %result, i64 addrspace(1)* %out ret void } -; GCN-LABEL: {{^}}v_icmp_u32_ult: +; GCN-LABEL: {{^}}v_icmp_i32_ult: ; GCN: v_cmp_lt_u32_e64 -define amdgpu_kernel void @v_icmp_u32_ult(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32_ult(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 36) store i64 %result, i64 addrspace(1)* %out ret void } -; GCN-LABEL: {{^}}v_icmp_u32_ule: +; GCN-LABEL: {{^}}v_icmp_i32_ule: ; GCN: v_cmp_le_u32_e64 -define amdgpu_kernel void @v_icmp_u32_ule(i64 addrspace(1)* %out, i32 %src) { +define amdgpu_kernel void @v_icmp_i32_ule(i64 addrspace(1)* %out, i32 %src) { %result = call i64 @llvm.amdgcn.icmp.i32(i32 %src, i32 100, i32 37) store i64 %result, i64 addrspace(1)* %out ret void @@ -178,4 +180,138 @@ define amdgpu_kernel void @v_icmp_i64_sle(i64 addrspace(1)* %out, i64 %src) { ret void } +; GCN-LABEL: {{^}}v_icmp_i16_dynamic_cc: +; GCN: s_endpgm +define amdgpu_kernel void @v_icmp_i16_dynamic_cc(i64 addrspace(1)* %out, i16 %src, i32 %cc) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 %cc) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_eq: +; VI: v_cmp_eq_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_eq_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_eq(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 32) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16: +; GCN-NOT: v_cmp_eq_ +define amdgpu_kernel void @v_icmp_i16(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 30) + store i64 %result, i64 addrspace(1)* %out + ret void +} +; GCN-LABEL: {{^}}v_icmp_i16_ne: +; VI: v_cmp_ne_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_ne_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_ne(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 33) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_ugt: +; VI: v_cmp_gt_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_gt_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_ugt(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 34) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_uge: +; VI: v_cmp_ge_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_ge_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_uge(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 35) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_ult: +; VI: v_cmp_lt_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_lt_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_ult(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 36) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_ule: +; VI: v_cmp_le_u16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_and_b32 [[CVT:s[0-9]+]], s{{[0-9]+}}, 0xffff{{$}} +; SI: v_cmp_le_u32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_ule(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 37) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_sgt: +; VI: v_cmp_gt_i16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_gt_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_sgt(i64 addrspace(1)* %out, i16 %src) #1 { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 38) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_sge: +; VI: v_cmp_ge_i16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_ge_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_sge(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 39) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_icmp_i16_slt: +; VI: v_cmp_lt_i16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_lt_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_slt(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 40) + store i64 %result, i64 addrspace(1)* %out + ret void +} +; GCN-LABEL: {{^}}v_icmp_i16_sle: +; VI: v_cmp_le_i16_e64 + +; SI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x64 +; SI-DAG: s_sext_i32_i16 [[CVT:s[0-9]+]], s{{[0-9]+}} +; SI: v_cmp_le_i32_e64 s{{\[[0-9]+:[0-9]+\]}}, [[CVT]], [[K]] +define amdgpu_kernel void @v_icmp_i16_sle(i64 addrspace(1)* %out, i16 %src) { + %result = call i64 @llvm.amdgcn.icmp.i16(i16 %src, i16 100, i32 41) + store i64 %result, i64 addrspace(1)* %out + ret void +} + attributes #0 = { nounwind readnone convergent } |