diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir | 293 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/omod.ll | 286 |
2 files changed, 577 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir index 73daa5fabc1..fbfd0fbf930 100644 --- a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir +++ b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir @@ -1,13 +1,31 @@ # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s --- | - define amdgpu_kernel void @v_max_self_clamp_not_set_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) { + define amdgpu_ps void @v_max_self_clamp_not_set_f32() #0 { ret void } - define amdgpu_kernel void @v_clamp_omod_already_set_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) { + define amdgpu_ps void @v_clamp_omod_already_set_f32() #0 { ret void } + define amdgpu_ps void @v_omod_mul_omod_already_set_f32() #0 { + ret void + } + + define amdgpu_ps void @v_omod_mul_clamp_already_set_f32() #0 { + ret void + } + + define amdgpu_ps void @v_omod_add_omod_already_set_f32() #0 { + ret void + } + + define amdgpu_ps void @v_omod_add_clamp_already_set_f32() #0 { + ret void + } + + attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" } + ... --- # GCN-LABEL: name: v_max_self_clamp_not_set_f32 @@ -133,3 +151,274 @@ body: | BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec S_ENDPGM ... +--- +# Don't fold a mul that looks like an omod if itself has omod set + +# GCN-LABEL: name: v_omod_mul_omod_already_set_f32 +# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec +# GCN-NEXT: %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec +name: v_omod_mul_omod_already_set_f32 +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_32_xm0 } + - { id: 2, class: sgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_32 } + - { id: 7, class: sreg_32 } + - { id: 8, class: sreg_32_xm0 } + - { id: 9, class: sreg_64 } + - { id: 10, class: sreg_32_xm0 } + - { id: 11, class: sreg_32_xm0 } + - { id: 12, class: sgpr_64 } + - { id: 13, class: sgpr_128 } + - { id: 14, class: sreg_32_xm0 } + - { id: 15, class: sreg_64 } + - { id: 16, class: sgpr_128 } + - { id: 17, class: vgpr_32 } + - { id: 18, class: vreg_64 } + - { id: 19, class: vgpr_32 } + - { id: 20, class: vgpr_32 } + - { id: 21, class: vgpr_32 } + - { id: 22, class: vgpr_32 } + - { id: 23, class: vreg_64 } + - { id: 24, class: vgpr_32 } + - { id: 25, class: vreg_64 } + - { id: 26, class: vreg_64 } +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '%vgpr0', virtual-reg: '%3' } +body: | + bb.0 (%ir-block.0): + liveins: %sgpr0_sgpr1, %vgpr0 + + %3 = COPY %vgpr0 + %0 = COPY %sgpr0_sgpr1 + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %25 = REG_SEQUENCE %3, 1, %24, 2 + %10 = S_MOV_B32 61440 + %11 = S_MOV_B32 0 + %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 + %13 = REG_SEQUENCE killed %5, 17, %12, 18 + %14 = S_MOV_B32 2 + %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %16 = REG_SEQUENCE killed %4, 17, %12, 18 + %18 = COPY %26 + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec + %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + S_ENDPGM + +... +--- +# Don't fold a mul that looks like an omod if itself has clamp set +# This might be OK, but would require folding the clamp at the same time. +# GCN-LABEL: name: v_omod_mul_clamp_already_set_f32 +# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec +# GCN-NEXT: %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec + +name: v_omod_mul_clamp_already_set_f32 +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_32_xm0 } + - { id: 2, class: sgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_32 } + - { id: 7, class: sreg_32 } + - { id: 8, class: sreg_32_xm0 } + - { id: 9, class: sreg_64 } + - { id: 10, class: sreg_32_xm0 } + - { id: 11, class: sreg_32_xm0 } + - { id: 12, class: sgpr_64 } + - { id: 13, class: sgpr_128 } + - { id: 14, class: sreg_32_xm0 } + - { id: 15, class: sreg_64 } + - { id: 16, class: sgpr_128 } + - { id: 17, class: vgpr_32 } + - { id: 18, class: vreg_64 } + - { id: 19, class: vgpr_32 } + - { id: 20, class: vgpr_32 } + - { id: 21, class: vgpr_32 } + - { id: 22, class: vgpr_32 } + - { id: 23, class: vreg_64 } + - { id: 24, class: vgpr_32 } + - { id: 25, class: vreg_64 } + - { id: 26, class: vreg_64 } +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '%vgpr0', virtual-reg: '%3' } +body: | + bb.0 (%ir-block.0): + liveins: %sgpr0_sgpr1, %vgpr0 + + %3 = COPY %vgpr0 + %0 = COPY %sgpr0_sgpr1 + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %25 = REG_SEQUENCE %3, 1, %24, 2 + %10 = S_MOV_B32 61440 + %11 = S_MOV_B32 0 + %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 + %13 = REG_SEQUENCE killed %5, 17, %12, 18 + %14 = S_MOV_B32 2 + %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %16 = REG_SEQUENCE killed %4, 17, %12, 18 + %18 = COPY %26 + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec + %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + S_ENDPGM + +... + + + + + + + + + + + + + +--- +# Don't fold a mul that looks like an omod if itself has omod set + +# GCN-LABEL: name: v_omod_add_omod_already_set_f32 +# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec +# GCN-NEXT: %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec +name: v_omod_add_omod_already_set_f32 +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_32_xm0 } + - { id: 2, class: sgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_32 } + - { id: 7, class: sreg_32 } + - { id: 8, class: sreg_32_xm0 } + - { id: 9, class: sreg_64 } + - { id: 10, class: sreg_32_xm0 } + - { id: 11, class: sreg_32_xm0 } + - { id: 12, class: sgpr_64 } + - { id: 13, class: sgpr_128 } + - { id: 14, class: sreg_32_xm0 } + - { id: 15, class: sreg_64 } + - { id: 16, class: sgpr_128 } + - { id: 17, class: vgpr_32 } + - { id: 18, class: vreg_64 } + - { id: 19, class: vgpr_32 } + - { id: 20, class: vgpr_32 } + - { id: 21, class: vgpr_32 } + - { id: 22, class: vgpr_32 } + - { id: 23, class: vreg_64 } + - { id: 24, class: vgpr_32 } + - { id: 25, class: vreg_64 } + - { id: 26, class: vreg_64 } +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '%vgpr0', virtual-reg: '%3' } +body: | + bb.0 (%ir-block.0): + liveins: %sgpr0_sgpr1, %vgpr0 + + %3 = COPY %vgpr0 + %0 = COPY %sgpr0_sgpr1 + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %25 = REG_SEQUENCE %3, 1, %24, 2 + %10 = S_MOV_B32 61440 + %11 = S_MOV_B32 0 + %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 + %13 = REG_SEQUENCE killed %5, 17, %12, 18 + %14 = S_MOV_B32 2 + %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %16 = REG_SEQUENCE killed %4, 17, %12, 18 + %18 = COPY %26 + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec + %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + S_ENDPGM + +... +--- +# Don't fold a mul that looks like an omod if itself has clamp set +# This might be OK, but would require folding the clamp at the same time. +# GCN-LABEL: name: v_omod_add_clamp_already_set_f32 +# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec +# GCN-NEXT: %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec + +name: v_omod_add_clamp_already_set_f32 +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_32_xm0 } + - { id: 2, class: sgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_32 } + - { id: 7, class: sreg_32 } + - { id: 8, class: sreg_32_xm0 } + - { id: 9, class: sreg_64 } + - { id: 10, class: sreg_32_xm0 } + - { id: 11, class: sreg_32_xm0 } + - { id: 12, class: sgpr_64 } + - { id: 13, class: sgpr_128 } + - { id: 14, class: sreg_32_xm0 } + - { id: 15, class: sreg_64 } + - { id: 16, class: sgpr_128 } + - { id: 17, class: vgpr_32 } + - { id: 18, class: vreg_64 } + - { id: 19, class: vgpr_32 } + - { id: 20, class: vgpr_32 } + - { id: 21, class: vgpr_32 } + - { id: 22, class: vgpr_32 } + - { id: 23, class: vreg_64 } + - { id: 24, class: vgpr_32 } + - { id: 25, class: vreg_64 } + - { id: 26, class: vreg_64 } +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '%vgpr0', virtual-reg: '%3' } +body: | + bb.0 (%ir-block.0): + liveins: %sgpr0_sgpr1, %vgpr0 + + %3 = COPY %vgpr0 + %0 = COPY %sgpr0_sgpr1 + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %25 = REG_SEQUENCE %3, 1, %24, 2 + %10 = S_MOV_B32 61440 + %11 = S_MOV_B32 0 + %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 + %13 = REG_SEQUENCE killed %5, 17, %12, 18 + %14 = S_MOV_B32 2 + %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %16 = REG_SEQUENCE killed %4, 17, %12, 18 + %18 = COPY %26 + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec + %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + S_ENDPGM + +... diff --git a/llvm/test/CodeGen/AMDGPU/omod.ll b/llvm/test/CodeGen/AMDGPU/omod.ll new file mode 100644 index 00000000000..d48956f534e --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/omod.ll @@ -0,0 +1,286 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s + +; IEEE bit enabled for compute kernel, no shouldn't use. +; GCN-LABEL: {{^}}v_omod_div2_f32_enable_ieee_signed_zeros: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_div2_f32_enable_ieee_signed_zeros(float addrspace(1)* %out, float addrspace(1)* %aptr) #4 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; IEEE bit enabled for compute kernel, no shouldn't use even though nsz is allowed +; GCN-LABEL: {{^}}v_omod_div2_f32_enable_ieee_nsz: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_div2_f32_enable_ieee_nsz(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; Only allow without IEEE bit if signed zeros are significant. +; GCN-LABEL: {{^}}v_omod_div2_f32_signed_zeros: +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_ps void @v_omod_div2_f32_signed_zeros(float %a) #4 { + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + store float %div2, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_div2_f32: +; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 div:2{{$}} +define amdgpu_ps void @v_omod_div2_f32(float %a) #0 { + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + store float %div2, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_mul2_f32: +; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:2{{$}} +define amdgpu_ps void @v_omod_mul2_f32(float %a) #0 { + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 2.0 + store float %div2, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_mul4_f32: +; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:4{{$}} +define amdgpu_ps void @v_omod_mul4_f32(float %a) #0 { + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 4.0 + store float %div2, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_mul4_multi_use_f32: +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 4.0, [[ADD]]{{$}} +define amdgpu_ps void @v_omod_mul4_multi_use_f32(float %a) #0 { + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 4.0 + store float %div2, float addrspace(1)* undef + store volatile float %add, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_mul4_dbg_use_f32: +; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 mul:4{{$}} +define amdgpu_ps void @v_omod_mul4_dbg_use_f32(float %a) #0 { + %add = fadd float %a, 1.0 + call void @llvm.dbg.value(metadata float %add, i64 0, metadata !4, metadata !9), !dbg !10 + %div2 = fmul float %add, 4.0 + store float %div2, float addrspace(1)* undef + ret void +} + +; Clamp is applied after omod, folding both into instruction is OK. +; GCN-LABEL: {{^}}v_clamp_omod_div2_f32: +; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, 1.0 clamp div:2{{$}} +define amdgpu_ps void @v_clamp_omod_div2_f32(float %a) #0 { + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + + %max = call float @llvm.maxnum.f32(float %div2, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + store float %clamp, float addrspace(1)* undef + ret void +} + +; Cannot fold omod into clamp +; GCN-LABEL: {{^}}v_omod_div2_clamp_f32: +; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], v0, 1.0 clamp{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_ps void @v_omod_div2_clamp_f32(float %a) #0 { + %add = fadd float %a, 1.0 + %max = call float @llvm.maxnum.f32(float %add, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + %div2 = fmul float %clamp, 0.5 + store float %div2, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_div2_abs_src_f32: +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}} +; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ADD]]|, 0.5{{$}} +define amdgpu_ps void @v_omod_div2_abs_src_f32(float %a) #0 { + %add = fadd float %a, 1.0 + %abs.add = call float @llvm.fabs.f32(float %add) + %div2 = fmul float %abs.add, 0.5 + store float %div2, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_self_clamp_f32: +; GCN: v_add_f32_e64 v{{[0-9]+}}, v0, v0 clamp{{$}} +define amdgpu_ps void @v_omod_add_self_clamp_f32(float %a) #0 { + %add = fadd float %a, %a + %max = call float @llvm.maxnum.f32(float %add, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + store float %clamp, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_clamp_self_f32: +; GCN: v_max_f32_e64 [[CLAMP:v[0-9]+]], v0, v0 clamp{{$}} +; GCN: v_add_f32_e32 v{{[0-9]+}}, [[CLAMP]], [[CLAMP]]{{$}} +define amdgpu_ps void @v_omod_add_clamp_self_f32(float %a) #0 { + %max = call float @llvm.maxnum.f32(float %a, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + %add = fadd float %clamp, %clamp + store float %add, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_abs_self_f32: +; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0 +; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, |[[X]]|{{$}} +define amdgpu_ps void @v_omod_add_abs_self_f32(float %a) #0 { + %x = fadd float %a, 1.0 + %abs.x = call float @llvm.fabs.f32(float %x) + %add = fadd float %abs.x, %abs.x + store float %add, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_abs_x_x_f32: + +; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0 +; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, [[X]]{{$}} +define amdgpu_ps void @v_omod_add_abs_x_x_f32(float %a) #0 { + %x = fadd float %a, 1.0 + %abs.x = call float @llvm.fabs.f32(float %x) + %add = fadd float %abs.x, %x + store float %add, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_x_abs_x_f32: +; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, v0 +; GCN: v_add_f32_e64 v{{[0-9]+}}, [[X]], |[[X]]|{{$}} +define amdgpu_ps void @v_omod_add_x_abs_x_f32(float %a) #0 { + %x = fadd float %a, 1.0 + %abs.x = call float @llvm.fabs.f32(float %x) + %add = fadd float %x, %abs.x + store float %add, float addrspace(1)* undef + ret void +} + +; Don't fold omod into omod into another omod. +; GCN-LABEL: {{^}}v_omod_div2_omod_div2_f32: +; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], v0, 1.0 div:2{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_ps void @v_omod_div2_omod_div2_f32(float %a) #0 { + %add = fadd float %a, 1.0 + %div2.0 = fmul float %add, 0.5 + %div2.1 = fmul float %div2.0, 0.5 + store float %div2.1, float addrspace(1)* undef + ret void +} + +; Don't fold omod if denorms enabled +; GCN-LABEL: {{^}}v_omod_div2_f32_denormals: +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_ps void @v_omod_div2_f32_denormals(float %a) #2 { + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + store float %div2, float addrspace(1)* undef + ret void +} + +; Don't fold omod if denorms enabled for add form. +; GCN-LABEL: {{^}}v_omod_mul2_f32_denormals: +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}} +; GCN: v_add_f32_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}} +define amdgpu_ps void @v_omod_mul2_f32_denormals(float %a) #2 { + %add = fadd float %a, 1.0 + %mul2 = fadd float %add, %add + store float %mul2, float addrspace(1)* undef + ret void +} + +; Don't fold omod if denorms enabled +; GCN-LABEL: {{^}}v_omod_div2_f16_denormals: +; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}} +; VI: v_mul_f16_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_ps void @v_omod_div2_f16_denormals(half %a) #0 { + %add = fadd half %a, 1.0 + %div2 = fmul half %add, 0.5 + store half %div2, half addrspace(1)* undef + ret void +} + +; Don't fold omod if denorms enabled for add form. +; GCN-LABEL: {{^}}v_omod_mul2_f16_denormals: +; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, v0{{$}} +; VI: v_add_f16_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}} +define amdgpu_ps void @v_omod_mul2_f16_denormals(half %a) #0 { + %add = fadd half %a, 1.0 + %mul2 = fadd half %add, %add + store half %mul2, half addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_div2_f16_no_denormals: +; VI-NOT: v0 +; VI: v_add_f16_e64 [[ADD:v[0-9]+]], v0, 1.0 div:2{{$}} +define amdgpu_ps void @v_omod_div2_f16_no_denormals(half %a) #3 { + %add = fadd half %a, 1.0 + %div2 = fmul half %add, 0.5 + store half %div2, half addrspace(1)* undef + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #1 +declare float @llvm.fabs.f32(float) #1 +declare float @llvm.floor.f32(float) #1 +declare float @llvm.minnum.f32(float, float) #1 +declare float @llvm.maxnum.f32(float, float) #1 +declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1 +declare double @llvm.fabs.f64(double) #1 +declare double @llvm.minnum.f64(double, double) #1 +declare double @llvm.maxnum.f64(double, double) #1 +declare half @llvm.fabs.f16(half) #1 +declare half @llvm.minnum.f16(half, half) #1 +declare half @llvm.maxnum.f16(half, half) #1 +declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1 + +attributes #0 = { nounwind "no-signed-zeros-fp-math"="true" } +attributes #1 = { nounwind readnone } +attributes #2 = { nounwind "target-features"="+fp32-denormals" "no-signed-zeros-fp-math"="true" } +attributes #3 = { nounwind "target-features"="-fp64-fp16-denormals" "no-signed-zeros-fp-math"="true" } +attributes #4 = { nounwind "no-signed-zeros-fp-math"="false" } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2, !3} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug) +!1 = !DIFile(filename: "/tmp/foo.cl", directory: "/dev/null") +!2 = !{i32 2, !"Dwarf Version", i32 4} +!3 = !{i32 2, !"Debug Info Version", i32 3} +!4 = !DILocalVariable(name: "add", arg: 1, scope: !5, file: !1, line: 1) +!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0) +!6 = !DISubroutineType(types: !7) +!7 = !{null, !8} +!8 = !DIBasicType(name: "float", size: 32, align: 32) +!9 = !DIExpression() +!10 = !DILocation(line: 1, column: 42, scope: !5) |