summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorAlexander Timofeev <Alexander.Timofeev@amd.com>2016-12-08 17:28:47 +0000
committerAlexander Timofeev <Alexander.Timofeev@amd.com>2016-12-08 17:28:47 +0000
commit18009560c59deceb2be577e0182f7016d6ee1121 (patch)
tree05c51c8ad3b3de4f579ea884037cded642bec1b2 /llvm/test
parenteebed6229ad000a34959f4a9794b9e009eb0227d (diff)
downloadbcm5719-llvm-18009560c59deceb2be577e0182f7016d6ee1121.tar.gz
bcm5719-llvm-18009560c59deceb2be577e0182f7016d6ee1121.zip
[AMDGPU] Scalarization of global uniform loads.
Summary: LC can currently select scalar load for uniform memory access basing on readonly memory address space only. This restriction originated from the fact that in HW prior to VI vector and scalar caches are not coherent. With MemoryDependenceAnalysis we can check that the memory location corresponding to the memory operand of the LOAD is not clobbered along the all paths from the function entry. Reviewers: rampitec, tstellarAMD, arsenm Subscribers: wdng, arsenm, nhaehnle Differential Revision: https://reviews.llvm.org/D26917 llvm-svn: 289076
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_smrd.ll126
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll80
2 files changed, 206 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/global_smrd.ll b/llvm/test/CodeGen/AMDGPU/global_smrd.ll
new file mode 100644
index 00000000000..20890894c0c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/global_smrd.ll
@@ -0,0 +1,126 @@
+; RUN: llc -O2 -mtriple amdgcn--amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads=true -verify-machineinstrs < %s | FileCheck %s
+
+; uniform loads
+; CHECK-LABEL: @uniform_load
+; CHECK: s_load_dwordx4
+; CHECK-NOT: flat_load_dword
+
+define amdgpu_kernel void @uniform_load(float addrspace(1)* %arg, float addrspace(1)* %arg1) {
+bb:
+ %tmp2 = load float, float addrspace(1)* %arg, align 4, !tbaa !8
+ %tmp3 = fadd float %tmp2, 0.000000e+00
+ %tmp4 = getelementptr inbounds float, float addrspace(1)* %arg, i64 1
+ %tmp5 = load float, float addrspace(1)* %tmp4, align 4, !tbaa !8
+ %tmp6 = fadd float %tmp3, %tmp5
+ %tmp7 = getelementptr inbounds float, float addrspace(1)* %arg, i64 2
+ %tmp8 = load float, float addrspace(1)* %tmp7, align 4, !tbaa !8
+ %tmp9 = fadd float %tmp6, %tmp8
+ %tmp10 = getelementptr inbounds float, float addrspace(1)* %arg, i64 3
+ %tmp11 = load float, float addrspace(1)* %tmp10, align 4, !tbaa !8
+ %tmp12 = fadd float %tmp9, %tmp11
+ %tmp13 = getelementptr inbounds float, float addrspace(1)* %arg1
+ store float %tmp12, float addrspace(1)* %tmp13, align 4, !tbaa !8
+ ret void
+}
+
+; non-uniform loads
+; CHECK-LABEL: @non-uniform_load
+; CHECK: flat_load_dword
+; CHECK-NOT: s_load_dwordx4
+
+define amdgpu_kernel void @non-uniform_load(float addrspace(1)* %arg, float addrspace(1)* %arg1) #0 {
+bb:
+ %tmp = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %tmp2 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp
+ %tmp3 = load float, float addrspace(1)* %tmp2, align 4, !tbaa !8
+ %tmp4 = fadd float %tmp3, 0.000000e+00
+ %tmp5 = add i32 %tmp, 1
+ %tmp6 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp5
+ %tmp7 = load float, float addrspace(1)* %tmp6, align 4, !tbaa !8
+ %tmp8 = fadd float %tmp4, %tmp7
+ %tmp9 = add i32 %tmp, 2
+ %tmp10 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp9
+ %tmp11 = load float, float addrspace(1)* %tmp10, align 4, !tbaa !8
+ %tmp12 = fadd float %tmp8, %tmp11
+ %tmp13 = add i32 %tmp, 3
+ %tmp14 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp13
+ %tmp15 = load float, float addrspace(1)* %tmp14, align 4, !tbaa !8
+ %tmp16 = fadd float %tmp12, %tmp15
+ %tmp17 = getelementptr inbounds float, float addrspace(1)* %arg1, i32 %tmp
+ store float %tmp16, float addrspace(1)* %tmp17, align 4, !tbaa !8
+ ret void
+}
+
+
+; uniform load dominated by no-alias store - scalarize
+; CHECK-LABEL: @no_memdep_alias_arg
+; CHECK: flat_store_dword
+; CHECK: s_load_dword [[SVAL:s[0-9]+]]
+; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]]
+; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]]
+
+define amdgpu_kernel void @no_memdep_alias_arg(i32 addrspace(1)* noalias %in, i32 addrspace(1)* %out0, i32 addrspace(1)* %out1) {
+ store i32 0, i32 addrspace(1)* %out0
+ %val = load i32, i32 addrspace(1)* %in
+ store i32 %val, i32 addrspace(1)* %out1
+ ret void
+}
+
+; uniform load dominated by alias store - vector
+; CHECK-LABEL: {{^}}memdep:
+; CHECK: flat_store_dword
+; CHECK: flat_load_dword [[VVAL:v[0-9]+]]
+; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]]
+define amdgpu_kernel void @memdep(i32 addrspace(1)* %in, i32 addrspace(1)* %out0, i32 addrspace(1)* %out1) {
+ store i32 0, i32 addrspace(1)* %out0
+ %val = load i32, i32 addrspace(1)* %in
+ store i32 %val, i32 addrspace(1)* %out1
+ ret void
+}
+
+; uniform load from global array
+; CHECK-LABEL: @global_array
+; CHECK: s_load_dwordx2 [[A_ADDR:s\[[0-9]+:[0-9]+\]]]
+; CHECK: s_load_dwordx2 [[A_ADDR1:s\[[0-9]+:[0-9]+\]]], [[A_ADDR]], 0x0
+; CHECK: s_load_dword [[SVAL:s[0-9]+]], [[A_ADDR1]], 0x0
+; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]]
+; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]]
+
+@A = common local_unnamed_addr addrspace(1) global i32 addrspace(1)* null, align 4
+
+define amdgpu_kernel void @global_array(i32 addrspace(1)* nocapture %out) {
+entry:
+ %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @A, align 4
+ %1 = load i32, i32 addrspace(1)* %0, align 4
+ store i32 %1, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+
+; uniform load from global array dominated by alias store
+; CHECK-LABEL: @global_array_alias_store
+; CHECK: flat_store_dword
+; CHECK: v_mov_b32_e32 v[[ADDR_LO:[0-9]+]], s{{[0-9]+}}
+; CHECK: v_mov_b32_e32 v[[ADDR_HI:[0-9]+]], s{{[0-9]+}}
+; CHECK: flat_load_dwordx2 [[A_ADDR:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[ADDR_LO]]:[[ADDR_HI]]{{\]}}
+; CHECK: flat_load_dword [[VVAL:v[0-9]+]], [[A_ADDR]]
+; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]]
+define amdgpu_kernel void @global_array_alias_store(i32 addrspace(1)* nocapture %out, i32 %n) {
+entry:
+ %gep = getelementptr i32, i32 addrspace(1) * %out, i32 %n
+ store i32 12, i32 addrspace(1) * %gep
+ %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @A, align 4
+ %1 = load i32, i32 addrspace(1)* %0, align 4
+ store i32 %1, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #1 = { nounwind readnone }
+
+!8 = !{!9, !9, i64 0}
+!9 = !{!"float", !10, i64 0}
+!10 = !{!"omnipotent char", !11, i64 0}
+!11 = !{!"Simple C/C++ TBAA"}
diff --git a/llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll b/llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll
new file mode 100644
index 00000000000..a6a04151caa
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll
@@ -0,0 +1,80 @@
+; RUN: llc -mtriple amdgcn--amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads=true -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK-LABEL: %bb11
+
+; Load from %arg in a Loop body has alias store
+
+; CHECK: flat_load_dword
+
+; CHECK-LABEL: %bb20
+; CHECK: flat_store_dword
+
+; #####################################################################
+
+; CHECK-LABEL: %bb22
+
+; Load from %arg has alias store in Loop
+
+; CHECK: flat_load_dword
+
+; #####################################################################
+
+; Load from %arg1 has no-alias store in Loop - arg1[i+1] never alias arg1[i]
+
+; CHECK: s_load_dword
+
+define amdgpu_kernel void @cfg(i32 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) #0 {
+bb:
+ %tmp = sext i32 %arg2 to i64
+ %tmp3 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp
+ %tmp4 = load i32, i32 addrspace(1)* %tmp3, align 4, !tbaa !0
+ %tmp5 = icmp sgt i32 %tmp4, 0
+ br i1 %tmp5, label %bb6, label %bb8
+
+bb6: ; preds = %bb
+ br label %bb11
+
+bb7: ; preds = %bb22
+ br label %bb8
+
+bb8: ; preds = %bb7, %bb
+ %tmp9 = phi i32 [ 0, %bb ], [ %tmp30, %bb7 ]
+ %tmp10 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp
+ store i32 %tmp9, i32 addrspace(1)* %tmp10, align 4, !tbaa !0
+ ret void
+
+bb11: ; preds = %bb22, %bb6
+ %tmp12 = phi i32 [ %tmp30, %bb22 ], [ 0, %bb6 ]
+ %tmp13 = phi i32 [ %tmp25, %bb22 ], [ 0, %bb6 ]
+ %tmp14 = srem i32 %tmp13, %arg2
+ %tmp15 = sext i32 %tmp14 to i64
+ %tmp16 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp15
+ %tmp17 = load i32, i32 addrspace(1)* %tmp16, align 4, !tbaa !0
+ %tmp18 = icmp sgt i32 %tmp17, 100
+ %tmp19 = sext i32 %tmp13 to i64
+ br i1 %tmp18, label %bb20, label %bb22
+
+bb20: ; preds = %bb11
+ %tmp21 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp19
+ store i32 0, i32 addrspace(1)* %tmp21, align 4, !tbaa !0
+ br label %bb22
+
+bb22: ; preds = %bb20, %bb11
+ %tmp23 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp19
+ %tmp24 = load i32, i32 addrspace(1)* %tmp23, align 4, !tbaa !0
+ %tmp25 = add nuw nsw i32 %tmp13, 1
+ %tmp26 = sext i32 %tmp25 to i64
+ %tmp27 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp26
+ %tmp28 = load i32, i32 addrspace(1)* %tmp27, align 4, !tbaa !0
+ %tmp29 = add i32 %tmp24, %tmp12
+ %tmp30 = add i32 %tmp29, %tmp28
+ %tmp31 = icmp eq i32 %tmp25, %tmp4
+ br i1 %tmp31, label %bb7, label %bb11
+}
+
+attributes #0 = { "target-cpu"="fiji" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
OpenPOWER on IntegriCloud