summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AMDGPU/load-constant-f32.ll
diff options
context:
space:
mode:
authorFarhana Aleen <farhana.aleen@gmail.com>2018-03-07 17:09:18 +0000
committerFarhana Aleen <farhana.aleen@gmail.com>2018-03-07 17:09:18 +0000
commit89196642f72cc3325963918c0426ce128c414104 (patch)
tree1c0fb19b4dcb115932854c6cfc86768b38a5e136 /llvm/test/CodeGen/AMDGPU/load-constant-f32.ll
parentc0e768df9050133466695f3a8f3bf0b3bb343987 (diff)
downloadbcm5719-llvm-89196642f72cc3325963918c0426ce128c414104.tar.gz
bcm5719-llvm-89196642f72cc3325963918c0426ce128c414104.zip
[AMDGPU] Increased vector length for global/constant loads.
Summary: GCN ISA supports instructions that can read 16 consecutive dwords from memory through the scalar data cache; loadstoreVectorizer should take advantage of the wider vector length and pack 16/8 elements of dwords/quadwords. Author: FarhanaAleen Reviewed By: rampitec Subscribers: llvm-commits, AMDGPU Differential Revision: https://reviews.llvm.org/D44179 llvm-svn: 326910
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/load-constant-f32.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-constant-f32.ll37
1 files changed, 37 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-f32.ll b/llvm/test/CodeGen/AMDGPU/load-constant-f32.ll
new file mode 100644
index 00000000000..4a07038fc90
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-f32.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+; Tests whether a load chain of 8 constants gets vectorized into a wider load.
+; FUNC-LABEL: {{^}}constant_load_v8f32:
+; GCN: s_load_dwordx8
+; EG: VTX_READ_128
+; EG: VTX_READ_128
+define amdgpu_kernel void @constant_load_v8f32(float addrspace(4)* noalias nocapture readonly %weights, float addrspace(1)* noalias nocapture %out_ptr) {
+entry:
+ %out_ptr.promoted = load float, float addrspace(1)* %out_ptr, align 4
+ %tmp = load float, float addrspace(4)* %weights, align 4
+ %add = fadd float %tmp, %out_ptr.promoted
+ %arrayidx.1 = getelementptr inbounds float, float addrspace(4)* %weights, i64 1
+ %tmp1 = load float, float addrspace(4)* %arrayidx.1, align 4
+ %add.1 = fadd float %tmp1, %add
+ %arrayidx.2 = getelementptr inbounds float, float addrspace(4)* %weights, i64 2
+ %tmp2 = load float, float addrspace(4)* %arrayidx.2, align 4
+ %add.2 = fadd float %tmp2, %add.1
+ %arrayidx.3 = getelementptr inbounds float, float addrspace(4)* %weights, i64 3
+ %tmp3 = load float, float addrspace(4)* %arrayidx.3, align 4
+ %add.3 = fadd float %tmp3, %add.2
+ %arrayidx.4 = getelementptr inbounds float, float addrspace(4)* %weights, i64 4
+ %tmp4 = load float, float addrspace(4)* %arrayidx.4, align 4
+ %add.4 = fadd float %tmp4, %add.3
+ %arrayidx.5 = getelementptr inbounds float, float addrspace(4)* %weights, i64 5
+ %tmp5 = load float, float addrspace(4)* %arrayidx.5, align 4
+ %add.5 = fadd float %tmp5, %add.4
+ %arrayidx.6 = getelementptr inbounds float, float addrspace(4)* %weights, i64 6
+ %tmp6 = load float, float addrspace(4)* %arrayidx.6, align 4
+ %add.6 = fadd float %tmp6, %add.5
+ %arrayidx.7 = getelementptr inbounds float, float addrspace(4)* %weights, i64 7
+ %tmp7 = load float, float addrspace(4)* %arrayidx.7, align 4
+ %add.7 = fadd float %tmp7, %add.6
+ store float %add.7, float addrspace(1)* %out_ptr, align 4
+ ret void
+} \ No newline at end of file
OpenPOWER on IntegriCloud