diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-04-21 21:35:04 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2017-04-21 21:35:04 +0000 |
commit | c07bda7b8788b80dff43ab2f5827f7e7224fc64b (patch) | |
tree | 1281c93fde43a14e2e9e8673b26477359b647af4 /llvm/test/Transforms/InferAddressSpaces/AMDGPU | |
parent | a3417bc4dd90c4279f9f2bcaebe7c0762e275045 (diff) | |
download | bcm5719-llvm-c07bda7b8788b80dff43ab2f5827f7e7224fc64b.tar.gz bcm5719-llvm-c07bda7b8788b80dff43ab2f5827f7e7224fc64b.zip |
InferAddressSpaces: Infer for just GEPs
Fixes leaving intermediate flat addressing computations
where a GEP instruction's source is a constant expression.
Still leaves behind a trivial addrspacecast + gep pair that
instcombine is able to handle, which ideally could be folded
here directly.
llvm-svn: 301044
Diffstat (limited to 'llvm/test/Transforms/InferAddressSpaces/AMDGPU')
-rw-r--r-- | llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll new file mode 100644 index 00000000000..6b94a74da35 --- /dev/null +++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll @@ -0,0 +1,48 @@ +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s + +; Test that pure GetElementPtr instructions not directly connected to +; a memory operation are inferred. + +@lds = internal unnamed_addr addrspace(3) global [648 x double] undef, align 8 + +; CHECK-LABEL: @constexpr_gep_addrspacecast( +; CHECK: %gep0 = getelementptr inbounds double, double addrspace(4)* addrspacecast (double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384) to double addrspace(4)*), i64 %idx0 +; CHECK-NEXT: %asc = addrspacecast double addrspace(4)* %gep0 to double addrspace(3)* +; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %asc +define void @constexpr_gep_addrspacecast(i64 %idx0, i64 %idx1) { + %gep0 = getelementptr inbounds double, double addrspace(4)* getelementptr ([648 x double], [648 x double] addrspace(4)* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double] addrspace(4)*), i64 0, i64 384), i64 %idx0 + %asc = addrspacecast double addrspace(4)* %gep0 to double addrspace(3)* + store double 1.0, double addrspace(3)* %asc, align 8 + ret void +} + +; CHECK-LABEL: @constexpr_gep_gep_addrspacecast( +; CHECK: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0 +; CHECK-NEXT: %1 = addrspacecast double addrspace(3)* %gep0 to double addrspace(4)* +; CHECK-NEXT: %gep1 = getelementptr inbounds double, double addrspace(4)* %1, i64 %idx1 +; CHECK-NEXT: %asc = addrspacecast double addrspace(4)* %gep1 to double addrspace(3)* +; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %asc, align 8 +define void @constexpr_gep_gep_addrspacecast(i64 %idx0, i64 %idx1) { + %gep0 = getelementptr inbounds double, double addrspace(4)* getelementptr ([648 x double], [648 x double] addrspace(4)* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double] addrspace(4)*), i64 0, i64 384), i64 %idx0 + %gep1 = getelementptr inbounds double, double addrspace(4)* %gep0, i64 %idx1 + %asc = addrspacecast double addrspace(4)* %gep1 to double addrspace(3)* + store double 1.0, double addrspace(3)* %asc, align 8 + ret void +} + +; Don't crash +; CHECK-LABEL: @vector_gep( +; CHECK: %cast = addrspacecast <4 x [1024 x i32] addrspace(3)*> %array to <4 x [1024 x i32] addrspace(4)*> +define amdgpu_kernel void @vector_gep(<4 x [1024 x i32] addrspace(3)*> %array) nounwind { + %cast = addrspacecast <4 x [1024 x i32] addrspace(3)*> %array to <4 x [1024 x i32] addrspace(4)*> + %p = getelementptr [1024 x i32], <4 x [1024 x i32] addrspace(4)*> %cast, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16> + %p0 = extractelement <4 x i32 addrspace(4)*> %p, i32 0 + %p1 = extractelement <4 x i32 addrspace(4)*> %p, i32 1 + %p2 = extractelement <4 x i32 addrspace(4)*> %p, i32 2 + %p3 = extractelement <4 x i32 addrspace(4)*> %p, i32 3 + store i32 99, i32 addrspace(4)* %p0 + store i32 99, i32 addrspace(4)* %p1 + store i32 99, i32 addrspace(4)* %p2 + store i32 99, i32 addrspace(4)* %p3 + ret void +} |