diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-12-22 03:05:44 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2016-12-22 03:05:44 +0000 |
| commit | cdff21b14ec687e1e8b855d2ba92f68a5e037a26 (patch) | |
| tree | 3b782c40be82859e54cedc0ad1c159875cc4ff58 /llvm/test/CodeGen/AMDGPU | |
| parent | 4052a576c05a9d11b7f7ac354db901275101003b (diff) | |
| download | bcm5719-llvm-cdff21b14ec687e1e8b855d2ba92f68a5e037a26.tar.gz bcm5719-llvm-cdff21b14ec687e1e8b855d2ba92f68a5e037a26.zip | |
AMDGPU: Allow rcp and rsq usage with f16
llvm-svn: 290302
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/fdiv.f16.ll | 190 |
1 files changed, 180 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll index da791f7e665..fad45c6816d 100644 --- a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll @@ -1,9 +1,10 @@ ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s ; Make sure fdiv is promoted to f32. -; GCN-LABEL: {{^}}fdiv_f16 +; GCN-LABEL: {{^}}v_fdiv_f16 ; SI: v_cvt_f32_f16 ; SI: v_cvt_f32_f16 ; SI: v_div_scale_f32 @@ -19,8 +20,8 @@ ; SI: v_div_fixup_f32 ; SI: v_cvt_f16_f32 -; VI: buffer_load_ushort [[LHS:v[0-9]+]] -; VI: buffer_load_ushort [[RHS:v[0-9]+]] +; VI: flat_load_ushort [[LHS:v[0-9]+]] +; VI: flat_load_ushort [[RHS:v[0-9]+]] ; VI-DAG: v_cvt_f32_f16_e32 [[CVT_LHS:v[0-9]+]], [[LHS]] ; VI-DAG: v_cvt_f32_f16_e32 [[CVT_RHS:v[0-9]+]], [[RHS]] @@ -29,15 +30,184 @@ ; VI: v_mul_f32_e32 [[MUL:v[0-9]+]], [[RCP_RHS]], [[CVT_LHS]] ; VI: v_cvt_f16_f32_e32 [[CVT_BACK:v[0-9]+]], [[MUL]] ; VI: v_div_fixup_f16 [[RESULT:v[0-9]+]], [[CVT_BACK]], [[RHS]], [[LHS]] -; VI: buffer_store_short [[RESULT]] -define void @fdiv_f16( +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_fdiv_f16( half addrspace(1)* %r, half addrspace(1)* %a, - half addrspace(1)* %b) { + half addrspace(1)* %b) #0 { entry: - %a.val = load volatile half, half addrspace(1)* %a - %b.val = load volatile half, half addrspace(1)* %b + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %a.val = load volatile half, half addrspace(1)* %gep.a + %b.val = load volatile half, half addrspace(1)* %gep.b %r.val = fdiv half %a.val, %b.val - store half %r.val, half addrspace(1)* %r + store half %r.val, half addrspace(1)* %gep.r ret void } + +; GCN-LABEL: {{^}}v_rcp_f16: +; VI: flat_load_ushort [[VAL:v[0-9]+]] +; VI-NOT: [[VAL]] +; VI: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL]] +; VI-NOT: [[RESULT]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_rcp_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %b.val = load volatile half, half addrspace(1)* %gep.b + %r.val = fdiv half 1.0, %b.val + store half %r.val, half addrspace(1)* %gep.r + ret void +} + +; GCN-LABEL: {{^}}v_rcp_f16_abs: +; VI: flat_load_ushort [[VAL:v[0-9]+]] +; VI-NOT: [[VAL]] +; VI: v_rcp_f16_e64 [[RESULT:v[0-9]+]], |[[VAL]]| +; VI-NOT: [RESULT]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_rcp_f16_abs(half addrspace(1)* %r, half addrspace(1)* %b) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %b.val = load volatile half, half addrspace(1)* %gep.b + %b.abs = call half @llvm.fabs.f16(half %b.val) + %r.val = fdiv half 1.0, %b.abs + store half %r.val, half addrspace(1)* %gep.r + ret void +} + +; GCN-LABEL: {{^}}v_rcp_f16_arcp: +; VI: flat_load_ushort [[VAL:v[0-9]+]] +; VI-NOT: [[VAL]] +; VI: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL]] +; VI-NOT: [[RESULT]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_rcp_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %b) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %b.val = load volatile half, half addrspace(1)* %gep.b + %r.val = fdiv arcp half 1.0, %b.val + store half %r.val, half addrspace(1)* %gep.r + ret void +} + +; GCN-LABEL: {{^}}v_rcp_f16_neg: +; VI: flat_load_ushort [[VAL:v[0-9]+]] +; VI-NOT: [[VAL]] +; VI: v_rcp_f16_e64 [[RESULT:v[0-9]+]], -[[VAL]] +; VI-NOT: [RESULT]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_rcp_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %b.val = load volatile half, half addrspace(1)* %gep.b + %r.val = fdiv half -1.0, %b.val + store half %r.val, half addrspace(1)* %gep.r + ret void +} + +; GCN-LABEL: {{^}}v_rsq_f16: +; VI: flat_load_ushort [[VAL:v[0-9]+]] +; VI-NOT: [[VAL]] +; VI: v_rsq_f16_e32 [[RESULT:v[0-9]+]], [[VAL]] +; VI-NOT: [RESULT]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_rsq_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %b.val = load volatile half, half addrspace(1)* %gep.b + %b.sqrt = call half @llvm.sqrt.f16(half %b.val) + %r.val = fdiv half 1.0, %b.sqrt + store half %r.val, half addrspace(1)* %gep.r + ret void +} + +; GCN-LABEL: {{^}}v_rsq_f16_neg: +; VI: flat_load_ushort [[VAL:v[0-9]+]] +; VI-NOT: [[VAL]] +; VI: v_sqrt_f16_e32 [[SQRT:v[0-9]+]], [[VAL]] +; VI-NEXT: v_rcp_f16_e64 [[RESULT:v[0-9]+]], -[[SQRT]] +; VI-NOT: [RESULT]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_rsq_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %b.val = load volatile half, half addrspace(1)* %gep.b + %b.sqrt = call half @llvm.sqrt.f16(half %b.val) + %r.val = fdiv half -1.0, %b.sqrt + store half %r.val, half addrspace(1)* %gep.r + ret void +} + +; GCN-LABEL: {{^}}v_fdiv_f16_arcp: +; VI: flat_load_ushort [[LHS:v[0-9]+]] +; VI: flat_load_ushort [[RHS:v[0-9]+]] + +; VI: v_rcp_f16_e32 [[RCP:v[0-9]+]], [[RHS]] +; VI: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[RCP]], [[LHS]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_fdiv_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %a.val = load volatile half, half addrspace(1)* %gep.a + %b.val = load volatile half, half addrspace(1)* %gep.b + %r.val = fdiv arcp half %a.val, %b.val + store half %r.val, half addrspace(1)* %gep.r + ret void +} + +; GCN-LABEL: {{^}}v_fdiv_f16_unsafe: +; VI: flat_load_ushort [[LHS:v[0-9]+]] +; VI: flat_load_ushort [[RHS:v[0-9]+]] + +; VI: v_rcp_f16_e32 [[RCP:v[0-9]+]], [[RHS]] +; VI: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[RCP]], [[LHS]] + +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_fdiv_f16_unsafe(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #2 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext + %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext + %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext + %a.val = load volatile half, half addrspace(1)* %gep.a + %b.val = load volatile half, half addrspace(1)* %gep.b + %r.val = fdiv half %a.val, %b.val + store half %r.val, half addrspace(1)* %gep.r + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #1 +declare half @llvm.sqrt.f16(half) #1 +declare half @llvm.fabs.f16(half) #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } +attributes #2 = { nounwind "unsafe-fp-math"="true" } |

