summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorEli Bendersky <eliben@google.com>2014-05-01 18:38:36 +0000
committerEli Bendersky <eliben@google.com>2014-05-01 18:38:36 +0000
commita108a65df2716de0c12d39f189c300a17a7bde8b (patch)
treeeeb3a1831b939c633e5df5aa4a060bc36c5d0382 /llvm/test
parent748be6c3760e47ff9a2c9d88a48f18d92309727b (diff)
downloadbcm5719-llvm-a108a65df2716de0c12d39f189c300a17a7bde8b.tar.gz
bcm5719-llvm-a108a65df2716de0c12d39f189c300a17a7bde8b.zip
Add an optimization that does CSE in a group of similar GEPs.
This optimization merges the common part of a group of GEPs, so we can compute each pointer address by adding a simple offset to the common part. The optimization is currently only enabled for the NVPTX backend, where it has a large payoff on some benchmarks. Review: http://reviews.llvm.org/D3462 Patch by Jingyue Wu. llvm-svn: 207783
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg4
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll60
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll101
3 files changed, 165 insertions, 0 deletions
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
new file mode 100644
index 00000000000..40532cdaa20
--- /dev/null
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
@@ -0,0 +1,4 @@
+targets = set(config.root.targets_to_build.split())
+if not 'NVPTX' in targets:
+ config.unsupported = True
+
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
new file mode 100644
index 00000000000..66f4096fa96
--- /dev/null
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
@@ -0,0 +1,60 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
+; RUN: opt < %s -S -separate-const-offset-from-gep -gvn -dce | FileCheck %s --check-prefix=IR
+
+; Verifies the SeparateConstOffsetFromGEP pass.
+; The following code computes
+; *output = array[x][y] + array[x][y+1] + array[x+1][y] + array[x+1][y+1]
+;
+; We expect SeparateConstOffsetFromGEP to transform it to
+;
+; float *base = &a[x][y];
+; *output = base[0] + base[1] + base[32] + base[33];
+;
+; so the backend can emit PTX that uses fewer virtual registers.
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+@array = internal addrspace(3) constant [32 x [32 x float]] zeroinitializer, align 4
+
+define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
+.preheader:
+ %0 = zext i32 %y to i64
+ %1 = zext i32 %x to i64
+ %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %3 = addrspacecast float addrspace(3)* %2 to float*
+ %4 = load float* %3, align 4
+ %5 = fadd float %4, 0.000000e+00
+ %6 = add i32 %y, 1
+ %7 = zext i32 %6 to i64
+ %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+ %9 = addrspacecast float addrspace(3)* %8 to float*
+ %10 = load float* %9, align 4
+ %11 = fadd float %5, %10
+ %12 = add i32 %x, 1
+ %13 = zext i32 %12 to i64
+ %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+ %15 = addrspacecast float addrspace(3)* %14 to float*
+ %16 = load float* %15, align 4
+ %17 = fadd float %11, %16
+ %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+ %19 = addrspacecast float addrspace(3)* %18 to float*
+ %20 = load float* %19, align 4
+ %21 = fadd float %17, %20
+ store float %21, float* %output, align 4
+ ret void
+}
+
+; PTX-LABEL: sum_of_array(
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
+; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
+
+; IR-LABEL: @sum_of_array(
+; IR: [[BASE_PTR:%[0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i32 %x, i32 %y
+; IR: [[BASE_INT:%[0-9]+]] = ptrtoint float addrspace(3)* [[BASE_PTR]] to i64
+; IR: %5 = add i64 [[BASE_INT]], 4
+; IR: %10 = add i64 [[BASE_INT]], 128
+; IR: %15 = add i64 [[BASE_INT]], 132
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
new file mode 100644
index 00000000000..f4020019c9a
--- /dev/null
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
@@ -0,0 +1,101 @@
+; RUN: opt < %s -separate-const-offset-from-gep -dce -S | FileCheck %s
+
+; Several unit tests for -separate-const-offset-from-gep. The transformation
+; heavily relies on TargetTransformInfo, so we put these tests under
+; target-specific folders.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+; target triple is necessary; otherwise TargetTransformInfo rejects any
+; addressing mode.
+target triple = "nvptx64-unknown-unknown"
+
+%struct.S = type { float, double }
+
+@struct_array = global [1024 x %struct.S] zeroinitializer, align 16
+@float_2d_array = global [32 x [32 x float]] zeroinitializer, align 4
+
+; We should not extract any struct field indices, because fields in a struct
+; may have different types.
+define double* @struct(i32 %i) {
+entry:
+ %add = add nsw i32 %i, 5
+ %idxprom = sext i32 %add to i64
+ %p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+ ret double* %p
+}
+; CHECK-LABEL: @struct
+; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i32 %i, i32 1
+
+; We should be able to trace into sext/zext if it's directly used as a GEP
+; index.
+define float* @sext_zext(i32 %i, i32 %j) {
+entry:
+ %i1 = add i32 %i, 1
+ %j2 = add i32 %j, 2
+ %i1.ext = sext i32 %i1 to i64
+ %j2.ext = zext i32 %j2 to i64
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i1.ext, i64 %j2.ext
+ ret float* %p
+}
+; CHECK-LABEL: @sext_zext
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i32 %i, i32 %j
+; CHECK: add i64 %{{[0-9]+}}, 136
+
+; We should be able to trace into sext/zext if it can be distributed to both
+; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
+define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) {
+ %b1 = add nsw i32 %b, 1
+ %b2 = sext i32 %b1 to i64
+ %i = add i64 %a, %b2
+ %d1 = add nuw i32 %d, 1
+ %d2 = zext i32 %d1 to i64
+ %j = add i64 %c, %d2
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+ ret float* %p
+}
+; CHECK-LABEL: @ext_add_no_overflow
+; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}}
+; CHECK: [[BASE_INT:%[0-9]+]] = ptrtoint float* [[BASE_PTR]] to i64
+; CHECK: add i64 [[BASE_INT]], 132
+
+; We should treat "or" with no common bits (%k) as "add", and leave "or" with
+; potentially common bits (%l) as is.
+define float* @or(i64 %i) {
+entry:
+ %j = shl i64 %i, 2
+ %k = or i64 %j, 3 ; no common bits
+ %l = or i64 %j, 4 ; potentially common bits
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %k, i64 %l
+ ret float* %p
+}
+; CHECK-LABEL: @or
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %j, i64 %l
+; CHECK: add i64 %{{[0-9]+}}, 384
+
+; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b +
+; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't
+; affected.
+define float* @expr(i64 %a, i64 %b, i64* %out) {
+entry:
+ %b5 = add i64 %b, 5
+ %i = add i64 %b5, %a
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
+ store i64 %b5, i64* %out
+ ret float* %p
+}
+; CHECK-LABEL: @expr
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %0, i64 0
+; CHECK: add i64 %{{[0-9]+}}, 640
+; CHECK: store i64 %b5, i64* %out
+
+; Verifies we handle "sub" correctly.
+define float* @sub(i64 %i, i64 %j) {
+ %i2 = sub i64 %i, 5 ; i - 5
+ %j2 = sub i64 5, %j ; 5 - i
+ %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
+ ret float* %p
+}
+; CHECK-LABEL: @sub
+; CHECK: %[[j2:[0-9]+]] = sub i64 0, %j
+; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
+; CHECK: add i64 %{{[0-9]+}}, -620
OpenPOWER on IntegriCloud