summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2016-06-15 00:11:01 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2016-06-15 00:11:01 +0000
commitf42c69206d6bee89cbd3c5f8eb502131b234422c (patch)
treeaeeb96eb571c0ed060f3f1c60f421f5f8f5ebb29
parent5c063c108a80d98c64ca427353eac6c47f47c60a (diff)
downloadbcm5719-llvm-f42c69206d6bee89cbd3c5f8eb502131b234422c.tar.gz
bcm5719-llvm-f42c69206d6bee89cbd3c5f8eb502131b234422c.zip
AMDGPU: Run pointer optimization passes
llvm-svn: 272736
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp53
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll37
-rw-r--r--llvm/test/CodeGen/AMDGPU/min.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/predicates.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/setcc-opt.ll89
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-cfg.ll11
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll3
7 files changed, 131 insertions, 84 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 96ddd81abc5..3bccf163608 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -36,7 +36,8 @@
#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Scalar.h"
-#include <llvm/CodeGen/Passes.h>
+#include "llvm/Transforms/Scalar/GVN.h"
+#include "llvm/CodeGen/Passes.h"
using namespace llvm;
@@ -180,8 +181,9 @@ public:
return nullptr;
}
+ void addEarlyCSEOrGVNPass();
+ void addStraightLineScalarOptimizationPasses();
void addIRPasses() override;
- void addCodeGenPrepare() override;
bool addPreISel() override;
bool addInstSelector() override;
bool addGCPasses() override;
@@ -225,6 +227,29 @@ TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
});
}
+void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
+ if (getOptLevel() == CodeGenOpt::Aggressive)
+ addPass(createGVNPass());
+ else
+ addPass(createEarlyCSEPass());
+}
+
+void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
+ addPass(createSeparateConstOffsetFromGEPPass());
+ addPass(createSpeculativeExecutionPass());
+ // ReassociateGEPs exposes more opportunites for SLSR. See
+ // the example in reassociate-geps-and-slsr.ll.
+ addPass(createStraightLineStrengthReducePass());
+ // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
+ // EarlyCSE can reuse.
+ addEarlyCSEOrGVNPass();
+ // Run NaryReassociate after EarlyCSE/GVN to be more effective.
+ addPass(createNaryReassociatePass());
+ // NaryReassociate on GEPs creates redundant common expressions, so run
+ // EarlyCSE after it.
+ addPass(createEarlyCSEPass());
+}
+
void AMDGPUPassConfig::addIRPasses() {
// There is no reason to run these.
disablePass(&StackMapLivenessID);
@@ -244,17 +269,31 @@ void AMDGPUPassConfig::addIRPasses() {
// Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
addPass(createAMDGPUOpenCLImageTypeLoweringPass());
- TargetPassConfig::addIRPasses();
-}
-
-void AMDGPUPassConfig::addCodeGenPrepare() {
const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
const AMDGPUSubtarget &ST = *TM.getSubtargetImpl();
if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) {
addPass(createAMDGPUPromoteAlloca(&TM));
addPass(createSROAPass());
}
- TargetPassConfig::addCodeGenPrepare();
+
+ addStraightLineScalarOptimizationPasses();
+
+ TargetPassConfig::addIRPasses();
+
+ // EarlyCSE is not always strong enough to clean up what LSR produces. For
+ // example, GVN can combine
+ //
+ // %0 = add %a, %b
+ // %1 = add %b, %a
+ //
+ // and
+ //
+ // %0 = shl nsw %a, 2
+ // %1 = shl %a, 2
+ //
+ // but EarlyCSE can do neither of them.
+ if (getOptLevel() != CodeGenOpt::None)
+ addEarlyCSEOrGVNPass();
}
bool
diff --git a/llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll b/llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll
index 969769bea6f..cec334f7df6 100644
--- a/llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
-; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=SI %s
-; RUN: llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
@local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
@local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
@@ -12,15 +12,14 @@
; GCN: .long 47180
; GCN-NEXT: .long 32900
-; EG: {{^}}local_memory_two_objects:
+
+; FUNC-LABEL: {{^}}local_memory_two_objects:
; We would like to check the lds writes are using different
; addresses, but due to variations in the scheduler, we can't do
; this consistently on evergreen GPUs.
; EG: LDS_WRITE
; EG: LDS_WRITE
-; GCN: ds_write_b32 {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
-; GCN-NOT: ds_write_b32 {{v[0-9]*}}, v[[ADDRW]]
; GROUP_BARRIER must be the last instruction in a clause
; EG: GROUP_BARRIER
@@ -30,9 +29,29 @@
; constant offsets.
; EG: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
; EG-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
-; SI: v_add_i32_e32 [[SIPTR:v[0-9]+]], vcc, 16, v{{[0-9]+}}
-; SI: ds_read_b32 {{v[0-9]+}}, [[SIPTR]]
-; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset1:4
+
+
+; GCN: v_lshlrev_b32_e32 [[ADDRW:v[0-9]+]], 2, v0
+; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*}} offset:16
+; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*$}}
+
+
+; SI: v_add_i32_e32 [[ADDRW_OFF:v[0-9]+]], vcc, 16, [[ADDRW]]
+
+; SI-DAG: ds_write_b32 [[ADDRW]],
+; SI-DAG: ds_write_b32 [[ADDRW_OFF]],
+
+; GCN: s_barrier
+
+; SI-DAG: v_sub_i32_e32 [[SUB0:v[0-9]+]], vcc, 28, [[ADDRW]]
+; SI-DAG: v_sub_i32_e32 [[SUB1:v[0-9]+]], vcc, 12, [[ADDRW]]
+
+; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB0]]
+; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB1]]
+
+; CI: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 0, [[ADDRW]]
+; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, [[SUB]] offset0:3 offset1:7
+
define void @local_memory_two_objects(i32 addrspace(1)* %out) {
entry:
%x.i = call i32 @llvm.r600.read.tidig.x() #0
diff --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll
index f9355c5741c..5d64a152af3 100644
--- a/llvm/test/CodeGen/AMDGPU/min.ll
+++ b/llvm/test/CodeGen/AMDGPU/min.ll
@@ -223,7 +223,7 @@ define void @s_test_umin_ule_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; EG: MIN_UINT
define void @v_test_umin_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%a = load i32, i32 addrspace(1)* %aptr, align 4
- %b = load i32, i32 addrspace(1)* %aptr, align 4
+ %b = load i32, i32 addrspace(1)* %bptr, align 4
%cmp = icmp ult i32 %a, %b
%val = select i1 %cmp, i32 %a, i32 %b
store i32 %val, i32 addrspace(1)* %out, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/predicates.ll b/llvm/test/CodeGen/AMDGPU/predicates.ll
index 0ce74d97ba8..79dee61cc7c 100644
--- a/llvm/test/CodeGen/AMDGPU/predicates.ll
+++ b/llvm/test/CodeGen/AMDGPU/predicates.ll
@@ -1,27 +1,27 @@
-; RUN: llc < %s -march=r600 -mattr=disable-irstructurizer -mcpu=redwood | FileCheck %s
+; RUN: llc -spec-exec-max-speculation-cost=0 -march=r600 -mattr=disable-irstructurizer -mcpu=redwood < %s | FileCheck %s
; These tests make sure the compiler is optimizing branches using predicates
; when it is legal to do so.
-; CHECK: {{^}}simple_if:
+; CHECK-LABEL: {{^}}simple_if:
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
define void @simple_if(i32 addrspace(1)* %out, i32 %in) {
entry:
- %0 = icmp sgt i32 %in, 0
- br i1 %0, label %IF, label %ENDIF
+ %cmp0 = icmp sgt i32 %in, 0
+ br i1 %cmp0, label %IF, label %ENDIF
IF:
- %1 = shl i32 %in, 1
+ %tmp1 = shl i32 %in, 1
br label %ENDIF
ENDIF:
- %2 = phi i32 [ %in, %entry ], [ %1, %IF ]
- store i32 %2, i32 addrspace(1)* %out
+ %tmp2 = phi i32 [ %in, %entry ], [ %tmp1, %IF ]
+ store i32 %tmp2, i32 addrspace(1)* %out
ret void
}
-; CHECK: {{^}}simple_if_else:
+; CHECK-LABEL: {{^}}simple_if_else:
; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
@@ -44,7 +44,7 @@ ENDIF:
ret void
}
-; CHECK: {{^}}nested_if:
+; CHECK-LABEL: {{^}}nested_if:
; CHECK: ALU_PUSH_BEFORE
; CHECK: JUMP
; CHECK: POP
@@ -71,7 +71,7 @@ ENDIF:
ret void
}
-; CHECK: {{^}}nested_if_else:
+; CHECK-LABEL: {{^}}nested_if_else:
; CHECK: ALU_PUSH_BEFORE
; CHECK: JUMP
; CHECK: POP
diff --git a/llvm/test/CodeGen/AMDGPU/setcc-opt.ll b/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
index 405640cf88e..d2c57a810c2 100644
--- a/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
@@ -36,38 +36,6 @@ define void @sext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
ret void
}
-; This really folds away to false
-; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
-; GCN: v_cmp_eq_i32_e32 vcc,
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}}
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
-; GCN-NEXT: buffer_store_byte [[TMP]]
-; GCN-NEXT: s_endpgm
-define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
- %icmp0 = icmp eq i32 %a, %b
- %ext = sext i1 %icmp0 to i32
- %icmp1 = icmp eq i32 %ext, 1
- store i1 %icmp1, i1 addrspace(1)* %out
- ret void
-}
-
-; This really folds away to true
-; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
-; GCN: v_cmp_ne_i32_e32 vcc,
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
-; GCN-NEXT: v_cmp_ne_i32_e32 vcc, 1, [[TMP]]{{$}}
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
-; GCN-NEXT: buffer_store_byte [[TMP]]
-; GCN-NEXT: s_endpgm
-define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
- %icmp0 = icmp ne i32 %a, %b
- %ext = sext i1 %icmp0 to i32
- %icmp1 = icmp ne i32 %ext, 1
- store i1 %icmp1, i1 addrspace(1)* %out
- ret void
-}
-
; FUNC-LABEL: {{^}}sext_bool_icmp_eq_neg1:
; GCN-NOT: v_cmp
; GCN: v_cmp_eq_i32_e32 vcc,
@@ -177,24 +145,6 @@ define void @zext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounw
ret void
}
-; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k:
-; SI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; VI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
-; VI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
-; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]]
-; GCN: v_cmp_ne_i32_e32 vcc, 2, [[VB]]{{$}}
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
-; GCN: buffer_store_byte
-; GCN: s_endpgm
-define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
- %icmp0 = icmp ne i32 %a, %b
- %ext = sext i1 %icmp0 to i32
- %icmp1 = icmp ne i32 %ext, 2
- store i1 %icmp1, i1 addrspace(1)* %out
- ret void
-}
-
; FUNC-LABEL: {{^}}cmp_zext_k_i8max:
; SI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; VI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
@@ -294,3 +244,40 @@ define void @zext_bool_icmp_eq_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind
store i1 %icmp1, i1 addrspace(1)* %out
ret void
}
+
+; FIXME: These cases should really be able fold to true/false in
+; DAGCombiner
+
+; This really folds away to false
+; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp eq i32 %a, %b
+ %ext = sext i1 %icmp0 to i32
+ %icmp1 = icmp eq i32 %ext, 1
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = sext i1 %icmp0 to i32
+ %icmp1 = icmp ne i32 %ext, 1
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+ %icmp0 = icmp ne i32 %a, %b
+ %ext = sext i1 %icmp0 to i32
+ %icmp1 = icmp ne i32 %ext, 2
+ store i1 %icmp1, i1 addrspace(1)* %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
index dfc82f820cc..ac9e2b5f843 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -259,16 +259,17 @@ ENDIF: ; preds = %IF, %main_body
; SI: buffer_store
; SI: {{^}}[[EXIT]]:
; SI: s_endpgm
-define void @icmp_users_different_blocks(i32 %cond, i32 addrspace(1)* %out) {
+define void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %out) {
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
- %tmp1 = icmp sgt i32 %cond, 0
- br i1 %tmp1, label %bb2, label %bb9
+ %cmp0 = icmp sgt i32 %cond0, 0
+ %cmp1 = icmp sgt i32 %cond1, 0
+ br i1 %cmp0, label %bb2, label %bb9
bb2: ; preds = %bb
- %tmp2 = sext i1 %tmp1 to i32
+ %tmp2 = sext i1 %cmp1 to i32
%tmp3 = add i32 %tmp2, %tmp
- br i1 %tmp1, label %bb9, label %bb7
+ br i1 %cmp1, label %bb9, label %bb7
bb7: ; preds = %bb5
store i32 %tmp3, i32 addrspace(1)* %out
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
index 349baaf82f1..8c83df5843d 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
@@ -76,7 +76,8 @@ bb13:
; CHECK: br i1
; CHECK: bb:
-; CHECK: getelementptr i8, i8 addrspace(1)* %t, i32 %lsr.iv
+; CHECK: %idxprom = sext i32 %lsr.iv1 to i64
+; CHECK: getelementptr i8, i8 addrspace(1)* %t, i64 %idxprom
define void @global_gep_user(i32 %arg0) nounwind {
entry:
br label %bb11
OpenPOWER on IntegriCloud