summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-01-11 22:00:02 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-01-11 22:00:02 +0000
commit69e3001b84b5c8704640b32bc41252bcd081092e (patch)
treee98ccc7ef35ad1550ccdb3c6d2eefbf995251d66 /llvm/test/CodeGen
parentff69405213d6f1ca8c1dc89d814cf24600ce3ac1 (diff)
downloadbcm5719-llvm-69e3001b84b5c8704640b32bc41252bcd081092e.tar.gz
bcm5719-llvm-69e3001b84b5c8704640b32bc41252bcd081092e.zip
AMDGPU: Fix folding immediates into mac src2
Whether it is legal or not needs to check for the instruction it will be replaced with. llvm-svn: 291711
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_mac.ll66
1 files changed, 66 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/v_mac.ll b/llvm/test/CodeGen/AMDGPU/v_mac.ll
index 027c6381790..16aed5928b0 100644
--- a/llvm/test/CodeGen/AMDGPU/v_mac.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_mac.ll
@@ -212,5 +212,71 @@ entry:
ret void
}
+; Without special casing the inline constant check for v_mac_f32's
+; src2, this fails to fold the 1.0 into a mad.
+
+; GCN-LABEL: {{^}}fold_inline_imm_into_mac_src2_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; GCN: v_add_f32_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
+; GCN: v_mad_f32 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
+define void @fold_inline_imm_into_mac_src2_f32(float addrspace(1)* %out, float addrspace(1)* %a, float addrspace(1)* %b) #3 {
+bb:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep.a = getelementptr inbounds float, float addrspace(1)* %a, i64 %tid.ext
+ %gep.b = getelementptr inbounds float, float addrspace(1)* %b, i64 %tid.ext
+ %gep.out = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
+ %tmp = load volatile float, float addrspace(1)* %gep.a
+ %tmp1 = load volatile float, float addrspace(1)* %gep.b
+ %tmp2 = fadd float %tmp, %tmp
+ %tmp3 = fmul float %tmp2, 4.0
+ %tmp4 = fsub float 1.0, %tmp3
+ %tmp5 = fadd float %tmp4, %tmp1
+ %tmp6 = fadd float %tmp1, %tmp1
+ %tmp7 = fmul float %tmp6, %tmp
+ %tmp8 = fsub float 1.0, %tmp7
+ %tmp9 = fmul float %tmp8, 8.0
+ %tmp10 = fadd float %tmp5, %tmp9
+ store float %tmp10, float addrspace(1)* %gep.out
+ ret void
+}
+
+; GCN-LABEL: {{^}}fold_inline_imm_into_mac_src2_f16:
+; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_ushort [[B:v[0-9]+]]
+
+; FIXME: How is this not folded?
+; SI: v_cvt_f32_f16_e32 v{{[0-9]+}}, 0x3c00
+
+; VI: v_add_f16_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
+; VI: v_mad_f16 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
+define void @fold_inline_imm_into_mac_src2_f16(half addrspace(1)* %out, half addrspace(1)* %a, half addrspace(1)* %b) #3 {
+bb:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid.ext = sext i32 %tid to i64
+ %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
+ %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
+ %gep.out = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext
+ %tmp = load volatile half, half addrspace(1)* %gep.a
+ %tmp1 = load volatile half, half addrspace(1)* %gep.b
+ %tmp2 = fadd half %tmp, %tmp
+ %tmp3 = fmul half %tmp2, 4.0
+ %tmp4 = fsub half 1.0, %tmp3
+ %tmp5 = fadd half %tmp4, %tmp1
+ %tmp6 = fadd half %tmp1, %tmp1
+ %tmp7 = fmul half %tmp6, %tmp
+ %tmp8 = fsub half 1.0, %tmp7
+ %tmp9 = fmul half %tmp8, 8.0
+ %tmp10 = fadd half %tmp5, %tmp9
+ store half %tmp10, half addrspace(1)* %gep.out
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+
attributes #0 = { nounwind "unsafe-fp-math"="false" }
attributes #1 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
OpenPOWER on IntegriCloud