summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td6
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td14
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.f16.ll116
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll16
4 files changed, 136 insertions, 16 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index c8c0bde325f..8555db01645 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -360,10 +360,10 @@ def int_amdgcn_sffbh :
[IntrNoMem, IntrSpeculatable]
>;
-// v_mad_f32/v_mac_f32, selected regardless of denorm support.
+// v_mad_f32|f16/v_mac_f32|f16, selected regardless of denorm support.
def int_amdgcn_fmad_ftz :
- Intrinsic<[llvm_float_ty],
- [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]
>;
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 87a891bb361..cf0255db561 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -704,15 +704,19 @@ multiclass FMADPat <ValueType vt, Instruction inst> {
defm : FMADPat <f16, V_MAC_F16_e64>;
defm : FMADPat <f32, V_MAC_F32_e64>;
-class FMADModsPat<Instruction inst, SDPatternOperator mad_opr> : GCNPat<
- (f32 (mad_opr (VOP3Mods f32:$src0, i32:$src0_mod),
- (VOP3Mods f32:$src1, i32:$src1_mod),
- (VOP3Mods f32:$src2, i32:$src2_mod))),
+class FMADModsPat<Instruction inst, SDPatternOperator mad_opr, ValueType Ty>
+ : GCNPat<
+ (Ty (mad_opr (VOP3Mods Ty:$src0, i32:$src0_mod),
+ (VOP3Mods Ty:$src1, i32:$src1_mod),
+ (VOP3Mods Ty:$src2, i32:$src2_mod))),
(inst $src0_mod, $src0, $src1_mod, $src1,
$src2_mod, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
>;
-def : FMADModsPat<V_MAD_F32, AMDGPUfmad_ftz>;
+def : FMADModsPat<V_MAD_F32, AMDGPUfmad_ftz, f32>;
+def : FMADModsPat<V_MAD_F16, AMDGPUfmad_ftz, f16> {
+ let SubtargetPredicate = Has16BitInsts;
+}
multiclass SelectPat <ValueType vt, Instruction inst> {
def : GCNPat <
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.f16.ll
new file mode 100644
index 00000000000..0f6dd3f5465
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.f16.ll
@@ -0,0 +1,116 @@
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX8 %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX8 %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
+
+declare half @llvm.amdgcn.fmad.ftz.f16(half %a, half %b, half %c)
+
+; GCN-LABEL: {{^}}mad_f16:
+; GFX8: v_ma{{[dc]}}_f16
+; GFX9: v_mad_legacy_f16
+define amdgpu_kernel void @mad_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %b.val, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mad_f16_imm_a:
+; GCN: v_madmk_f16 {{v[0-9]+}}, {{v[0-9]+}}, 0x4800, {{v[0-9]+}}
+define amdgpu_kernel void @mad_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half 8.0, half %b.val, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mad_f16_imm_b:
+; GCN: v_mov_b32_e32 [[KB:v[0-9]+]], 0x4800
+; GFX8: v_mad_f16 {{v[0-9]+}}, {{v[0-9]+}}, [[KB]],
+; GFX9: v_mad_legacy_f16 {{v[0-9]+}}, {{v[0-9]+}}, [[KB]],
+define amdgpu_kernel void @mad_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half 8.0, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mad_f16_imm_c:
+; GCN: v_madak_f16 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, 0x4800{{$}}
+define amdgpu_kernel void @mad_f16_imm_c(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %b.val, half 8.0)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mad_f16_neg_b:
+; GFX8: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
+; GFX9: v_mad_legacy_f16 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @mad_f16_neg_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %neg.b = fsub half -0.0, %b.val
+ %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %neg.b, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mad_f16_abs_b:
+; GFX8: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, |v{{[0-9]+}}|, v{{[0-9]+}}
+; GFX9: v_mad_legacy_f16 v{{[0-9]+}}, v{{[0-9]+}}, |v{{[0-9]+}}|, v{{[0-9]+}}
+define amdgpu_kernel void @mad_f16_abs_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %abs.b = call half @llvm.fabs.f16(half %b.val)
+ %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %abs.b, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mad_f16_neg_abs_b:
+; GFX8: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, -|v{{[0-9]+}}|, v{{[0-9]+}}
+; GFX9: v_mad_legacy_f16 v{{[0-9]+}}, v{{[0-9]+}}, -|v{{[0-9]+}}|, v{{[0-9]+}}
+define amdgpu_kernel void @mad_f16_neg_abs_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %abs.b = call half @llvm.fabs.f16(half %b.val)
+ %neg.abs.b = fsub half -0.0, %abs.b
+ %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %neg.abs.b, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+declare half @llvm.fabs.f16(half)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll
index c9c9ef167c4..792ce80f45c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll
@@ -3,7 +3,7 @@
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
-declare float @llvm.amdgcn.fmad.ftz(float %a, float %b, float %c)
+declare float @llvm.amdgcn.fmad.ftz.f32(float %a, float %b, float %c)
; GCN-LABEL: {{^}}mad_f32:
; GCN: v_ma{{[dc]}}_f32
@@ -15,7 +15,7 @@ define amdgpu_kernel void @mad_f32(
%a.val = load float, float addrspace(1)* %a
%b.val = load float, float addrspace(1)* %b
%c.val = load float, float addrspace(1)* %c
- %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float %b.val, float %c.val)
+ %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %b.val, float %c.val)
store float %r.val, float addrspace(1)* %r
ret void
}
@@ -29,7 +29,7 @@ define amdgpu_kernel void @mad_f32_imm_a(
float addrspace(1)* %c) {
%b.val = load float, float addrspace(1)* %b
%c.val = load float, float addrspace(1)* %c
- %r.val = call float @llvm.amdgcn.fmad.ftz(float 8.0, float %b.val, float %c.val)
+ %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float 8.0, float %b.val, float %c.val)
store float %r.val, float addrspace(1)* %r
ret void
}
@@ -43,7 +43,7 @@ define amdgpu_kernel void @mad_f32_imm_b(
float addrspace(1)* %c) {
%a.val = load float, float addrspace(1)* %a
%c.val = load float, float addrspace(1)* %c
- %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float 8.0, float %c.val)
+ %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float 8.0, float %c.val)
store float %r.val, float addrspace(1)* %r
ret void
}
@@ -57,7 +57,7 @@ define amdgpu_kernel void @mad_f32_imm_c(
float addrspace(1)* %b) {
%a.val = load float, float addrspace(1)* %a
%b.val = load float, float addrspace(1)* %b
- %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float %b.val, float 8.0)
+ %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %b.val, float 8.0)
store float %r.val, float addrspace(1)* %r
ret void
}
@@ -73,7 +73,7 @@ define amdgpu_kernel void @mad_f32_neg_b(
%b.val = load float, float addrspace(1)* %b
%c.val = load float, float addrspace(1)* %c
%neg.b = fsub float -0.0, %b.val
- %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float %neg.b, float %c.val)
+ %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %neg.b, float %c.val)
store float %r.val, float addrspace(1)* %r
ret void
}
@@ -89,7 +89,7 @@ define amdgpu_kernel void @mad_f32_abs_b(
%b.val = load float, float addrspace(1)* %b
%c.val = load float, float addrspace(1)* %c
%abs.b = call float @llvm.fabs.f32(float %b.val)
- %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float %abs.b, float %c.val)
+ %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %abs.b, float %c.val)
store float %r.val, float addrspace(1)* %r
ret void
}
@@ -106,7 +106,7 @@ define amdgpu_kernel void @mad_f32_neg_abs_b(
%c.val = load float, float addrspace(1)* %c
%abs.b = call float @llvm.fabs.f32(float %b.val)
%neg.abs.b = fsub float -0.0, %abs.b
- %r.val = call float @llvm.amdgcn.fmad.ftz(float %a.val, float %neg.abs.b, float %c.val)
+ %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %neg.abs.b, float %c.val)
store float %r.val, float addrspace(1)* %r
ret void
}
OpenPOWER on IntegriCloud